Merge pull request #54 from status-im/feature/oct-19-rebase

Oct 19 rebase
This commit is contained in:
Victor Farazdagi 2016-10-21 12:21:47 +03:00 committed by GitHub
commit 8e6546f51a
88 changed files with 6622 additions and 466 deletions

View File

@ -1,7 +1,5 @@
[
"enode://e15869ba08a25e49be7568b951e15af5d77a472c8e4104a14a4951f99936d65f91240d5b5f23674aee44f1ac09d8adfc6a9bff75cd8c2df73a26442f313f2da4@162.243.63.248:30303",
"enode://efe4e6899e05237180c0970aedb81cb5aecf5b200779c7c9e1f955783e8299b364c0b981c03f4c36ad5328ef972b417afde260bbf2c5a8db37ba7f5738033952@198.199.105.122:30303",
"enode://5a5839435f48d1e3f2f907e4582f0a134e0b7857afe507073978ca32cf09ea54989dac433605047d0bc4cd19a8c80affac6876069014283aa7c7bb4954d0e623@95.85.40.211:30303",
"enode://2f05d430b4cb1c0e2a0772d48da3a034f1b596ea7163ab80d3404802d10b7d55bde323897c2be0d36026181e1a68510ea1f42a646ef9494c27e61f61e4088b7d@188.166.229.119:30303",
"enode://ad61a21f83f12b0ca494611650f5e4b6427784e7c62514dcb729a3d65106de6f12836813acf39bdc35c12ecfd0e230723678109fd4e7091ce389697bd7da39b4@139.59.212.114:30303"
"enode://fc3065bb80bfced98a01441718e2b70a0353f023b9da3d57beb8f96a827402d23702b3a461e1c1b6c7a208cb09cc0aea9b7c42bf953bb8f732529c198b158db4@95.85.40.211:30303",
"enode://5ffa3a39f95614d881e07d24e265865218c45fe73b3a5f5d05868190e385cbf60d03ac8beaa4c31b7ee84a0ec947f22c969e2dd1783041a4d7381f7774c74526@188.166.229.119:30303",
"enode://3b020a1fd6ab980a5670975e8a7361af1732fa3fa1819b751a94b6a4265e8c52b02c608c0de1347784b834b298280b018bcf6547f47bbba63612cba0e4707ec1@139.59.212.114:30303"
]

View File

@ -61,15 +61,15 @@ type SelectedExtKey struct {
}
type NodeManager struct {
currentNode *node.Node // currently running geth node
ctx *cli.Context // the CLI context used to start the geth node
lightEthereum *les.LightEthereum // LES service
accountManager *accounts.Manager // the account manager attached to the currentNode
jailedRequestQueue *JailedRequestQueue // bridge via which jail notifies node of incoming requests
SelectedAccount *SelectedExtKey // account that was processed during the last call to SelectAccount()
whisperService *whisper.Whisper // Whisper service
client *rpc.ClientRestartWrapper // RPC client
nodeStarted chan struct{} // channel to wait for node to start
currentNode *node.Node // currently running geth node
ctx *cli.Context // the CLI context used to start the geth node
lightEthereum *les.LightEthereum // LES service
accountManager *accounts.Manager // the account manager attached to the currentNode
jailedRequestQueue *JailedRequestQueue // bridge via which jail notifies node of incoming requests
SelectedAccount *SelectedExtKey // account that was processed during the last call to SelectAccount()
whisperService *whisper.Whisper // Whisper service
client *rpc.Client // RPC client
nodeStarted chan struct{} // channel to wait for node to start
}
var (
@ -166,13 +166,11 @@ func (m *NodeManager) RunNode() {
m.lightEthereum.StatusBackend.SetTransactionQueueHandler(onSendTransactionRequest)
m.lightEthereum.StatusBackend.SetAccountsFilterHandler(onAccountsListRequest)
m.client = rpc.NewClientRestartWrapper(func() *rpc.Client {
client, err := m.currentNode.Attach()
if err != nil {
return nil
}
return client
})
var err error
m.client, err = m.currentNode.Attach()
if err != nil {
glog.V(logger.Warn).Infoln("cannot get RPC client service:", ErrInvalidClient)
}
// @TODO Remove after LES supports discover out of box
m.populateStaticPeers()
@ -268,16 +266,16 @@ func (m *NodeManager) LightEthereumService() (*les.LightEthereum, error) {
return m.lightEthereum, nil
}
func (m *NodeManager) HasClientRestartWrapper() bool {
func (m *NodeManager) HasRPCClient() bool {
return m.client != nil
}
func (m *NodeManager) ClientRestartWrapper() (*rpc.ClientRestartWrapper, error) {
func (m *NodeManager) RPCClient() (*rpc.Client, error) {
if m == nil || !m.HasNode() {
return nil, ErrInvalidGethNode
}
if !m.HasClientRestartWrapper() {
if !m.HasRPCClient() {
return nil, ErrInvalidClient
}

View File

@ -23,7 +23,7 @@ var muPrepareTestNode sync.Mutex
const (
TestDataDir = "../.ethereumtest"
TestNodeSyncSeconds = 480
TestNodeSyncSeconds = 60
)
type NodeNotificationHandler func(jsonEvent string)
@ -119,7 +119,7 @@ func PrepareTestNode() (err error) {
if !manager.HasNode() {
panic(ErrInvalidGethNode)
}
if !manager.HasClientRestartWrapper() {
if !manager.HasRPCClient() {
panic(ErrInvalidGethNode)
}
if !manager.HasWhisperService() {

View File

@ -25,7 +25,7 @@ var (
type Jail struct {
sync.RWMutex
client *rpc.ClientRestartWrapper // lazy inited on the first call to jail.ClientRestartWrapper()
client *rpc.Client // lazy inited on the first call
cells map[string]*JailedRuntime // jail supports running many isolated instances of jailed runtime
statusJS string
requestQueue *geth.JailedRequestQueue
@ -108,7 +108,7 @@ func (jail *Jail) Parse(chatId string, js string) string {
}
func (jail *Jail) Call(chatId string, path string, args string) string {
_, err := jail.ClientRestartWrapper()
_, err := jail.RPCClient()
if err != nil {
return printError(err.Error())
}
@ -145,7 +145,7 @@ func (jail *Jail) GetVM(chatId string) (*otto.Otto, error) {
// Send will serialize the first argument, send it to the node and returns the response.
func (jail *Jail) Send(chatId string, call otto.FunctionCall) (response otto.Value) {
clientFactory, err := jail.ClientRestartWrapper()
client, err := jail.RPCClient()
if err != nil {
return newErrorResponse(call, -32603, err.Error(), nil)
}
@ -201,7 +201,6 @@ func (jail *Jail) Send(chatId string, call otto.FunctionCall) (response otto.Val
return newErrorResponse(call, -32603, err.Error(), nil)
}
client := clientFactory.Client()
errc := make(chan error, 1)
errc2 := make(chan error)
go func() {
@ -252,7 +251,7 @@ func (jail *Jail) Send(chatId string, call otto.FunctionCall) (response otto.Val
return response
}
func (jail *Jail) ClientRestartWrapper() (*rpc.ClientRestartWrapper, error) {
func (jail *Jail) RPCClient() (*rpc.Client, error) {
if jail == nil {
return nil, ErrInvalidJail
}
@ -267,7 +266,7 @@ func (jail *Jail) ClientRestartWrapper() (*rpc.ClientRestartWrapper, error) {
}
// obtain RPC client from running node
client, err := nodeManager.ClientRestartWrapper()
client, err := nodeManager.RPCClient()
if err != nil {
return nil, err
}

View File

@ -47,7 +47,7 @@ func TestJailUnInited(t *testing.T) {
t.Errorf("error expected, but got: %v", err)
}
_, err = jailInstance.ClientRestartWrapper()
_, err = jailInstance.RPCClient()
if err != jail.ErrInvalidJail {
t.Errorf("error expected, but got: %v", err)
}

View File

@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/discv5"
"github.com/ethereum/go-ethereum/p2p/nat"
)
@ -38,6 +39,8 @@ func main() {
nodeKeyFile = flag.String("nodekey", "", "private key filename")
nodeKeyHex = flag.String("nodekeyhex", "", "private key as hex (for testing)")
natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|extip:<IP>)")
runv5 = flag.Bool("v5", false, "run a v5 topic discovery bootnode")
v5test = flag.Bool("v5test", false, "run a v5 topic discovery test node (adds default bootnodes to form a test network)")
nodeKey *ecdsa.PrivateKey
err error
@ -79,8 +82,21 @@ func main() {
os.Exit(0)
}
if _, err := discover.ListenUDP(nodeKey, *listenAddr, natm, ""); err != nil {
utils.Fatalf("%v", err)
if *runv5 || *v5test {
if ntab, err := discv5.ListenUDP(nodeKey, *listenAddr, natm, ""); err != nil {
utils.Fatalf("%v", err)
} else {
if *v5test {
if err := ntab.SetFallbackNodes(discv5.BootNodes); err != nil {
utils.Fatalf("%v", err)
}
}
}
} else {
if _, err := discover.ListenUDP(nodeKey, *listenAddr, natm, ""); err != nil {
utils.Fatalf("%v", err)
}
}
select {}
}

View File

@ -33,6 +33,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/params"
"gopkg.in/urfave/cli.v1"
)
@ -226,6 +227,9 @@ func NewEnv(state *state.StateDB, transactor common.Address, value *big.Int, cfg
type ruleSet struct{}
func (ruleSet) IsHomestead(*big.Int) bool { return true }
func (ruleSet) GasTable(*big.Int) params.GasTable {
return params.GasTableHomesteadGasRepriceFork
}
func (self *VMEnv) RuleSet() vm.RuleSet { return ruleSet{} }
func (self *VMEnv) Vm() vm.Vm { return self.evm }

View File

@ -31,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/syndtr/goleveldb/leveldb/util"
"gopkg.in/urfave/cli.v1"
)
@ -81,13 +82,29 @@ func importChain(ctx *cli.Context) error {
}
stack := makeFullNode(ctx)
chain, chainDb := utils.MakeChain(ctx, stack)
defer chainDb.Close()
// Import the chain
start := time.Now()
err := utils.ImportChain(chain, ctx.Args().First())
chainDb.Close()
if err != nil {
if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
utils.Fatalf("Import error: %v", err)
}
fmt.Printf("Import done in %v", time.Since(start))
fmt.Printf("Import done in %v, compacting...\n", time.Since(start))
// Compact the entire database to more accurately measure disk io and print the stats
if db, ok := chainDb.(*ethdb.LDBDatabase); ok {
start = time.Now()
if err := db.LDB().CompactRange(util.Range{}); err != nil {
utils.Fatalf("Compaction failed: %v", err)
}
fmt.Printf("Compaction done in %v.\n", time.Since(start))
stats, err := db.LDB().GetProperty("leveldb.stats")
if err != nil {
utils.Fatalf("Failed to read database stats: %v", err)
}
fmt.Println(stats)
}
return nil
}

View File

@ -70,13 +70,10 @@ func localConsole(ctx *cli.Context) error {
defer node.Stop()
// Attach to the newly started node and start the JavaScript console
client := rpc.NewClientRestartWrapper(func() *rpc.Client {
client, err := node.Attach()
if err != nil {
utils.Fatalf("Failed to attach to the inproc geth: %v", err)
}
return client
})
client, err := node.Attach()
if err != nil {
utils.Fatalf("Failed to attach to the inproc geth: %v", err)
}
config := console.Config{
DataDir: node.DataDir(),
DocRoot: ctx.GlobalString(utils.JSpathFlag.Name),
@ -105,14 +102,10 @@ func localConsole(ctx *cli.Context) error {
// console to it.
func remoteConsole(ctx *cli.Context) error {
// Attach to a remotely running geth instance and start the JavaScript console
client := rpc.NewClientRestartWrapper(func() *rpc.Client {
client, err := dialRPC(ctx.Args().First())
if err != nil {
utils.Fatalf("Unable to attach to remote geth: %v", err)
}
return client
})
client, err := dialRPC(ctx.Args().First())
if err != nil {
utils.Fatalf("Unable to attach to remote geth: %v", err)
}
config := console.Config{
DataDir: utils.MakeDataDir(ctx),
DocRoot: ctx.GlobalString(utils.JSpathFlag.Name),
@ -161,14 +154,10 @@ func ephemeralConsole(ctx *cli.Context) error {
defer node.Stop()
// Attach to the newly started node and start the JavaScript console
client := rpc.NewClientRestartWrapper(func() *rpc.Client {
client, err := node.Attach()
if err != nil {
utils.Fatalf("Failed to attach to the inproc geth: %v", err)
}
return client
})
client, err := node.Attach()
if err != nil {
utils.Fatalf("Failed to attach to the inproc geth: %v", err)
}
config := console.Config{
DataDir: node.DataDir(),
DocRoot: ctx.GlobalString(utils.JSpathFlag.Name),

View File

@ -143,6 +143,7 @@ participating.
utils.LightKDFFlag,
utils.JSpathFlag,
utils.ListenPortFlag,
utils.ListenPortV5Flag,
utils.MaxPeersFlag,
utils.MaxPendingPeersFlag,
utils.EtherbaseFlag,
@ -157,6 +158,7 @@ participating.
utils.NATFlag,
utils.NatspecEnabledFlag,
utils.NoDiscoverFlag,
utils.DiscoveryV5Flag,
utils.NoEthFlag,
utils.NodeKeyFileFlag,
utils.NodeKeyHexFlag,
@ -313,16 +315,14 @@ func startNode(ctx *cli.Context, stack *node.Node) {
if ctx.GlobalBool(utils.TestNetFlag.Name) {
// TestNet (John Gerryts @phonikg)
addPeer("enode://7d00e8c27b2328e2008a9fc86e81afba22681fdac675b99805fa62cc29ee8a2a9d83f916f7661da6a6bd78155a430bb2bd7cec733ca9e700e236ec9c71d97e24@50.112.52.169:30301")
addPeer("enode://6807cacb2b43b39d19162254c189b8828c008bb6539d39d832e98d3c65aeb70e10ce2698772b07e704b21ce7a6a4407ad0e15951ebb63b452f878cd366a1c3f5@50.112.52.169:30301")
} else {
if ctx.GlobalBool(utils.OpposeDAOFork.Name) {
// Classic (Azure)
addPeer("enode://fc3d7b57e5d317946bf421411632ec98d5ffcbf94548cd7bc10088e4fef176670f8ec70280d301a9d0b22fe498203f62b323da15b3acc18b02a1fee2a06b7d3f@40.118.3.223:30305")
} else {
// MainNet (Azure)
addPeer("enode://feaf206a308a669a789be45f4dadcb351246051727f12415ad69e44f8080daf0569c10fe1d9944d245dd1f3e1c89cedda8ce03d7e3d5ed8975a35cad4b4f7ec1@40.118.3.223:30303")
addPeer("enode://97d280903aff3db6049b5d5f8a5fb2c7ea9228b4352eeaa0ee919772b20009a22d1801ec4365f25c60d2f2dc9c35c6017a1d5a654e027f066ee765be4ecc5019@40.118.3.223:30303")
// MainNet (John Gerryts @phonikg)
addPeer("enode://3cbd26f73513af0e789c55ea9efa6d259be2d5f6882bdb52740e21e01379287b652642a87207f1bc07c64aae3ab51ab566dede7588d6064022d40577fe59d5de@50.112.52.169:30300")
addPeer("enode://08cc6631556d7ef632de642c0bcbbb0f9dc457155ecf1b5b92ba28baff076cd6cbfdd9e0524584fde021c691a508f133c3d019d5caad502b39944fc6ba5ce02f@50.112.52.169:30300")
}
}
}

View File

@ -113,10 +113,12 @@ var AppHelpFlagGroups = []flagGroup{
Flags: []cli.Flag{
utils.BootnodesFlag,
utils.ListenPortFlag,
utils.ListenPortV5Flag,
utils.MaxPeersFlag,
utils.MaxPendingPeersFlag,
utils.NATFlag,
utils.NoDiscoverFlag,
utils.DiscoveryV5Flag,
utils.NodeKeyFileFlag,
utils.NodeKeyHexFlag,
},

View File

@ -164,12 +164,12 @@ var (
LightServFlag = cli.IntFlag{
Name: "lightserv",
Usage: "Maximum percentage of time allowed for serving LES requests (0-90)",
Value: 20,
Value: 80,
}
LightPeersFlag = cli.IntFlag{
Name: "lightpeers",
Usage: "Maximum number of LES client peers",
Value: 10,
Value: 20,
}
LightKDFFlag = cli.BoolFlag{
Name: "lightkdf",
@ -346,6 +346,11 @@ var (
Usage: "Network listening port",
Value: 30303,
}
ListenPortV5Flag = cli.IntFlag{
Name: "v5port",
Usage: "Experimental RLPx V5 (Topic Discovery) listening port",
Value: 30304,
}
BootnodesFlag = cli.StringFlag{
Name: "bootnodes",
Usage: "Comma separated enode URLs for P2P discovery bootstrap",
@ -368,6 +373,10 @@ var (
Name: "nodiscover",
Usage: "Disables the peer discovery mechanism (manual peer addition)",
}
DiscoveryV5Flag = cli.BoolFlag{
Name: "v5disc",
Usage: "Enables the experimental RLPx V5 (Topic Discovery) mechanism",
}
NoEthFlag = cli.BoolFlag{
Name: "noeth",
Usage: "Disable Ethereum Protocol",
@ -522,6 +531,10 @@ func MakeListenAddress(ctx *cli.Context) string {
return fmt.Sprintf(":%d", ctx.GlobalInt(ListenPortFlag.Name))
}
func MakeListenAddressV5(ctx *cli.Context) string {
return fmt.Sprintf(":%d", ctx.GlobalInt(ListenPortV5Flag.Name))
}
// MakeNAT creates a port mapper from set command line flags.
func MakeNAT(ctx *cli.Context) nat.Interface {
natif, err := nat.Parse(ctx.GlobalString(NATFlag.Name))
@ -653,8 +666,10 @@ func MakeNode(ctx *cli.Context, name, gitCommit string) *node.Node {
Version: vsn,
UserIdent: makeNodeUserIdent(ctx),
NoDiscovery: ctx.GlobalBool(NoDiscoverFlag.Name),
DiscoveryV5: ctx.GlobalBool(DiscoveryV5Flag.Name),
BootstrapNodes: MakeBootstrapNodes(ctx),
ListenAddr: MakeListenAddress(ctx),
ListenAddrV5: MakeListenAddressV5(ctx),
NAT: MakeNAT(ctx),
MaxPeers: ctx.GlobalInt(MaxPeersFlag.Name),
MaxPendingPeers: ctx.GlobalInt(MaxPendingPeersFlag.Name),
@ -721,6 +736,7 @@ func RegisterEthService(ctx *cli.Context, stack *node.Node, extra []byte) {
NoDefSrv: ctx.GlobalBool(NoDefSrvFlag.Name),
LightServ: ctx.GlobalInt(LightServFlag.Name),
LightPeers: ctx.GlobalInt(LightPeersFlag.Name),
MaxPeers: ctx.GlobalInt(MaxPeersFlag.Name),
DatabaseCache: ctx.GlobalInt(CacheFlag.Name),
DatabaseHandles: MakeDatabaseHandles(),
NetworkId: ctx.GlobalInt(NetworkIdFlag.Name),
@ -774,7 +790,7 @@ func RegisterEthService(ctx *cli.Context, stack *node.Node, extra []byte) {
} else {
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
fullNode, err := eth.New(ctx, ethConf)
if fullNode != nil {
if fullNode != nil && ethConf.LightServ > 0 {
ls, _ := les.NewLesServer(fullNode, ethConf)
fullNode.AddLesServer(ls)
}
@ -848,6 +864,13 @@ func MakeChainConfigFromDb(ctx *cli.Context, db ethdb.Database) *core.ChainConfi
}
config.DAOForkSupport = true
}
if config.HomesteadGasRepriceBlock == nil {
if ctx.GlobalBool(TestNetFlag.Name) {
config.HomesteadGasRepriceBlock = params.TestNetHomesteadGasRepriceBlock
} else {
config.HomesteadGasRepriceBlock = params.MainNetHomesteadGasRepriceBlock
}
}
// Force override any existing configs if explicitly requested
switch {
case ctx.GlobalBool(SupportDAOFork.Name):

View File

@ -0,0 +1,40 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package common
import (
"fmt"
"regexp"
"strings"
"time"
)
// PrettyDuration is a pretty printed version of a time.Duration value that cuts
// the unnecessary precision off from the formatted textual representation.
type PrettyDuration time.Duration
var prettyDurationRe = regexp.MustCompile("\\.[0-9]+")
// String implements the Stringer interface, allowing pretty printing of duration
// values rounded to three decimals.
func (d PrettyDuration) String() string {
label := fmt.Sprintf("%v", time.Duration(d))
if match := prettyDurationRe.FindString(label); len(match) > 4 {
label = strings.Replace(label, match, match[:4], 1)
}
return label
}

View File

@ -26,20 +26,18 @@ import (
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/rpc"
"github.com/robertkrimen/otto"
"golang.org/x/net/context"
)
// bridge is a collection of JavaScript utility methods to bride the .js runtime
// environment and the Go RPC connection backing the remote method calls.
type bridge struct {
client *rpc.ClientRestartWrapper // RPC client to execute Ethereum requests through
client *rpc.Client // RPC client to execute Ethereum requests through
prompter UserPrompter // Input prompter to allow interactive user feedback
printer io.Writer // Output writer to serialize any display strings to
ctx context.Context
}
// newBridge creates a new JavaScript wrapper around an RPC client.
func newBridge(client *rpc.ClientRestartWrapper, prompter UserPrompter, printer io.Writer) *bridge {
func newBridge(client *rpc.Client, prompter UserPrompter, printer io.Writer) *bridge {
return &bridge{
client: client,
prompter: prompter,
@ -47,10 +45,6 @@ func newBridge(client *rpc.ClientRestartWrapper, prompter UserPrompter, printer
}
}
func (b *bridge) setContext(ctx context.Context) {
b.ctx = ctx
}
// NewAccount is a wrapper around the personal.newAccount RPC method that uses a
// non-echoing password prompt to aquire the passphrase and executes the original
// RPC method (saved in jeth.newAccount) with it to actually execute the RPC call.
@ -228,26 +222,7 @@ func (b *bridge) Send(call otto.FunctionCall) (response otto.Value) {
resp, _ := call.Otto.Object(`({"jsonrpc":"2.0"})`)
resp.Set("id", req.Id)
var result json.RawMessage
client := b.client.Client()
errc := make(chan error, 1)
errc2 := make(chan error)
go func(){
if b.ctx != nil {
select {
case <-b.ctx.Done():
b.client.Restart()
errc2 <- b.ctx.Err()
case err := <-errc:
errc2 <- err
}
} else {
errc2 <- <-errc
}
}()
errc <- client.Call(&result, req.Method, req.Params...)
err = <-errc2
err = b.client.Call(&result, req.Method, req.Params...)
switch err := err.(type) {
case nil:
if result == nil {

View File

@ -26,7 +26,6 @@ import (
"regexp"
"sort"
"strings"
"time"
"github.com/ethereum/go-ethereum/internal/jsre"
"github.com/ethereum/go-ethereum/internal/web3ext"
@ -34,7 +33,6 @@ import (
"github.com/mattn/go-colorable"
"github.com/peterh/liner"
"github.com/robertkrimen/otto"
"golang.org/x/net/context"
)
var (
@ -52,27 +50,26 @@ const DefaultPrompt = "> "
// Config is te collection of configurations to fine tune the behavior of the
// JavaScript console.
type Config struct {
DataDir string // Data directory to store the console history at
DocRoot string // Filesystem path from where to load JavaScript files from
Client *rpc.ClientRestartWrapper // RPC client to execute Ethereum requests through
Prompt string // Input prompt prefix string (defaults to DefaultPrompt)
Prompter UserPrompter // Input prompter to allow interactive user feedback (defaults to TerminalPrompter)
Printer io.Writer // Output writer to serialize any display strings to (defaults to os.Stdout)
Preload []string // Absolute paths to JavaScript files to preload
DataDir string // Data directory to store the console history at
DocRoot string // Filesystem path from where to load JavaScript files from
Client *rpc.Client // RPC client to execute Ethereum requests through
Prompt string // Input prompt prefix string (defaults to DefaultPrompt)
Prompter UserPrompter // Input prompter to allow interactive user feedback (defaults to TerminalPrompter)
Printer io.Writer // Output writer to serialize any display strings to (defaults to os.Stdout)
Preload []string // Absolute paths to JavaScript files to preload
}
// Console is a JavaScript interpreted runtime environment. It is a fully fleged
// JavaScript console attached to a running node via an external or in-process RPC
// client.
type Console struct {
client *rpc.ClientRestartWrapper // RPC client to execute Ethereum requests through
jsre *jsre.JSRE // JavaScript runtime environment running the interpreter
prompt string // Input prompt prefix string
prompter UserPrompter // Input prompter to allow interactive user feedback
histPath string // Absolute path to the console scrollback history
history []string // Scroll history maintained by the console
printer io.Writer // Output writer to serialize any display strings to
setContext func(context.Context)
client *rpc.Client // RPC client to execute Ethereum requests through
jsre *jsre.JSRE // JavaScript runtime environment running the interpreter
prompt string // Input prompt prefix string
prompter UserPrompter // Input prompter to allow interactive user feedback
histPath string // Absolute path to the console scrollback history
history []string // Scroll history maintained by the console
printer io.Writer // Output writer to serialize any display strings to
}
func New(config Config) (*Console, error) {
@ -106,7 +103,6 @@ func New(config Config) (*Console, error) {
func (c *Console) init(preload []string) error {
// Initialize the JavaScript <-> Go RPC bridge
bridge := newBridge(c.client, c.prompter, c.printer)
c.setContext = bridge.setContext
c.jsre.Set("jeth", struct{}{})
jethObj, _ := c.jsre.Get("jeth")
@ -131,7 +127,7 @@ func (c *Console) init(preload []string) error {
return fmt.Errorf("web3 provider: %v", err)
}
// Load the supported APIs into the JavaScript runtime environment
apis, err := c.client.Client().SupportedModules()
apis, err := c.client.SupportedModules()
if err != nil {
return fmt.Errorf("api modules: %v", err)
}
@ -257,7 +253,7 @@ func (c *Console) Welcome() {
console.log(" datadir: " + admin.datadir);
`)
// List all the supported modules for the user to call
if apis, err := c.client.Client().SupportedModules(); err == nil {
if apis, err := c.client.SupportedModules(); err == nil {
modules := make([]string, 0, len(apis))
for api, version := range apis {
modules = append(modules, fmt.Sprintf("%s:%s", api, version))
@ -351,12 +347,7 @@ func (c *Console) Interactive() {
}
}
}
done := make(chan struct{})
ctx, _ := context.WithTimeout(context.Background(), time.Second*5)
c.setContext(ctx)
c.Evaluate(input)
c.setContext(nil)
close(done)
input = ""
}
}

View File

@ -269,7 +269,7 @@ func (self *BlockChain) FastSyncCommitHead(hash common.Hash) error {
if block == nil {
return fmt.Errorf("non existent block [%x…]", hash[:4])
}
if _, err := trie.NewSecure(block.Root(), self.chainDb); err != nil {
if _, err := trie.NewSecure(block.Root(), self.chainDb, 0); err != nil {
return err
}
// If all checks out, manually set the head block
@ -775,8 +775,12 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
// Report some public statistics so the user has a clue what's going on
first, last := blockChain[0], blockChain[len(blockChain)-1]
glog.V(logger.Info).Infof("imported %d receipt(s) (%d ignored) in %v. #%d [%x… / %x…]", stats.processed, stats.ignored,
time.Since(start), last.Number(), first.Hash().Bytes()[:4], last.Hash().Bytes()[:4])
ignored := ""
if stats.ignored > 0 {
ignored = fmt.Sprintf(" (%d ignored)", stats.ignored)
}
glog.V(logger.Info).Infof("imported %d receipts%s in %9v. #%d [%x… / %x…]", stats.processed, ignored, common.PrettyDuration(time.Since(start)), last.Number(), first.Hash().Bytes()[:4], last.Hash().Bytes()[:4])
return 0, nil
}
@ -840,19 +844,16 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
// faster than direct delivery and requires much less mutex
// acquiring.
var (
stats struct{ queued, processed, ignored int }
stats = insertStats{startTime: time.Now()}
events = make([]interface{}, 0, len(chain))
coalescedLogs vm.Logs
tstart = time.Now()
nonceChecked = make([]bool, len(chain))
nonceChecked = make([]bool, len(chain))
)
// Start the parallel nonce verifier.
nonceAbort, nonceResults := verifyNoncesFromBlocks(self.pow, chain)
defer close(nonceAbort)
txcount := 0
for i, block := range chain {
if atomic.LoadInt32(&self.procInterrupt) == 1 {
glog.V(logger.Debug).Infoln("Premature abort during block chain processing")
@ -947,7 +948,6 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
return i, err
}
txcount += len(block.Transactions())
// write the block to the chain and get the status
status, err := self.WriteBlock(block)
if err != nil {
@ -957,8 +957,9 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
switch status {
case CanonStatTy:
if glog.V(logger.Debug) {
glog.Infof("[%v] inserted block #%d (%d TXs %v G %d UNCs) (%x...). Took %v\n", time.Now().UnixNano(), block.Number(), len(block.Transactions()), block.GasUsed(), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart))
glog.Infof("inserted block #%d [%x…] in %9v: %3d txs %7v gas %d uncles.", block.Number(), block.Hash().Bytes()[0:4], common.PrettyDuration(time.Since(bstart)), len(block.Transactions()), block.GasUsed(), len(block.Uncles()))
}
blockInsertTimer.UpdateSince(bstart)
events = append(events, ChainEvent{block, block.Hash(), logs})
// This puts transactions in a extra db for rpc
@ -975,26 +976,65 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
}
case SideStatTy:
if glog.V(logger.Detail) {
glog.Infof("inserted forked block #%d (TD=%v) (%d TXs %d UNCs) (%x...). Took %v\n", block.Number(), block.Difficulty(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart))
glog.Infof("inserted forked block #%d [%x…] (TD=%v) in %9v: %3d txs %d uncles.", block.Number(), block.Hash().Bytes()[0:4], block.Difficulty(), common.PrettyDuration(time.Since(bstart)), len(block.Transactions()), len(block.Uncles()))
}
blockInsertTimer.UpdateSince(bstart)
events = append(events, ChainSideEvent{block, logs})
case SplitStatTy:
events = append(events, ChainSplitEvent{block, logs})
}
stats.processed++
if glog.V(logger.Info) {
stats.report(chain, i)
}
}
if (stats.queued > 0 || stats.processed > 0 || stats.ignored > 0) && bool(glog.V(logger.Info)) {
tend := time.Since(tstart)
start, end := chain[0], chain[len(chain)-1]
glog.Infof("imported %d block(s) (%d queued %d ignored) including %d txs in %v. #%v [%x / %x]\n", stats.processed, stats.queued, stats.ignored, txcount, tend, end.Number(), start.Hash().Bytes()[:4], end.Hash().Bytes()[:4])
}
go self.postChainEvents(events, coalescedLogs)
return 0, nil
}
// insertStats tracks and reports on block insertion.
type insertStats struct {
queued, processed, ignored int
lastIndex int
startTime time.Time
}
// statsReportLimit is the time limit during import after which we always print
// out progress. This avoids the user wondering what's going on.
const statsReportLimit = 8 * time.Second
// report prints statistics if some number of blocks have been processed
// or more than a few seconds have passed since the last message.
func (st *insertStats) report(chain []*types.Block, index int) {
var (
now = time.Now()
elapsed = now.Sub(st.startTime)
)
if index == len(chain)-1 || elapsed >= statsReportLimit {
start, end := chain[st.lastIndex], chain[index]
txcount := countTransactions(chain[st.lastIndex : index+1])
extra := ""
if st.queued > 0 || st.ignored > 0 {
extra = fmt.Sprintf(" (%d queued %d ignored)", st.queued, st.ignored)
}
glog.Infof("imported %d blocks%s, %5d txs in %9v. #%v [%x… / %x…]\n", st.processed, extra, txcount, common.PrettyDuration(elapsed), end.Number(), start.Hash().Bytes()[:4], end.Hash().Bytes()[:4])
*st = insertStats{startTime: now, lastIndex: index}
}
}
func countTransactions(chain []*types.Block) (c int) {
for _, b := range chain {
c += len(b.Transactions())
}
return c
}
// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
// to be part of the new canonical chain and accumulates potential missing transactions and post an
// event about them

View File

@ -21,6 +21,7 @@ import (
"math/big"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/params"
)
var ChainConfigNotFoundErr = errors.New("ChainConfig not found") // general config not found error
@ -35,6 +36,8 @@ type ChainConfig struct {
DAOForkBlock *big.Int `json:"daoForkBlock"` // TheDAO hard-fork switch block (nil = no fork)
DAOForkSupport bool `json:"daoForkSupport"` // Whether the nodes supports or opposes the DAO hard-fork
HomesteadGasRepriceBlock *big.Int `json:"homesteadGasRepriceBlock"` // Homestead gas reprice switch block (nil = no fork)
VmConfig vm.Config `json:"-"`
}
@ -45,3 +48,14 @@ func (c *ChainConfig) IsHomestead(num *big.Int) bool {
}
return num.Cmp(c.HomesteadBlock) >= 0
}
// GasTable returns the gas table corresponding to the current phase (homestead or homestead reprice).
//
// The returned GasTable's fields shouldn't, under any circumstances, be changed.
func (c *ChainConfig) GasTable(num *big.Int) params.GasTable {
if c.HomesteadGasRepriceBlock == nil || num == nil || num.Cmp(c.HomesteadGasRepriceBlock) < 0 {
return params.GasTableHomestead
}
return params.GasTableHomesteadGasRepriceFork
}

View File

@ -18,6 +18,7 @@ package core
import (
crand "crypto/rand"
"fmt"
"math"
"math/big"
mrand "math/rand"
@ -321,8 +322,12 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, checkFreq int, w
}
// Report some public statistics so the user has a clue what's going on
first, last := chain[0], chain[len(chain)-1]
glog.V(logger.Info).Infof("imported %d header(s) (%d ignored) in %v. #%v [%x… / %x…]", stats.processed, stats.ignored,
time.Since(start), last.Number, first.Hash().Bytes()[:4], last.Hash().Bytes()[:4])
ignored := ""
if stats.ignored > 0 {
ignored = fmt.Sprintf(" (%d ignored)", stats.ignored)
}
glog.V(logger.Info).Infof("imported %d headers%s in %9v. #%v [%x… / %x…]", stats.processed, ignored, common.PrettyDuration(time.Since(start)), last.Number, first.Hash().Bytes()[:4], last.Hash().Bytes()[:4])
return 0, nil
}

View File

@ -137,9 +137,9 @@ func (self *StateObject) markSuicided() {
func (c *StateObject) getTrie(db trie.Database) *trie.SecureTrie {
if c.trie == nil {
var err error
c.trie, err = trie.NewSecure(c.data.Root, db)
c.trie, err = trie.NewSecure(c.data.Root, db, 0)
if err != nil {
c.trie, _ = trie.NewSecure(common.Hash{}, db)
c.trie, _ = trie.NewSecure(common.Hash{}, db, 0)
c.setError(fmt.Errorf("can't create storage trie: %v", err))
}
}

View File

@ -41,7 +41,10 @@ var StartingNonce uint64
const (
// Number of past tries to keep. The arbitrarily chosen value here
// is max uncle depth + 1.
maxTrieCacheLength = 8
maxPastTries = 8
// Trie cache generation limit.
maxTrieCacheGen = 100
// Number of codehash->size associations to keep.
codeSizeCacheSize = 100000
@ -86,7 +89,7 @@ type StateDB struct {
// Create a new state from a given trie
func New(root common.Hash, db ethdb.Database) (*StateDB, error) {
tr, err := trie.NewSecure(root, db)
tr, err := trie.NewSecure(root, db, maxTrieCacheGen)
if err != nil {
return nil, err
}
@ -155,14 +158,14 @@ func (self *StateDB) openTrie(root common.Hash) (*trie.SecureTrie, error) {
return &tr, nil
}
}
return trie.NewSecure(root, self.db)
return trie.NewSecure(root, self.db, maxTrieCacheGen)
}
func (self *StateDB) pushTrie(t *trie.SecureTrie) {
self.lock.Lock()
defer self.lock.Unlock()
if len(self.pastTries) >= maxTrieCacheLength {
if len(self.pastTries) >= maxPastTries {
copy(self.pastTries, self.pastTries[1:])
self.pastTries[len(self.pastTries)-1] = t
} else {

View File

@ -30,6 +30,7 @@ import (
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
)
var (
@ -46,10 +47,12 @@ var (
)
var (
maxQueuedPerAccount = uint64(64) // Max limit of queued transactions per address
maxQueuedInTotal = uint64(65536) // Max limit of queued transactions from all accounts
maxQueuedLifetime = 3 * time.Hour // Max amount of time transactions from idle accounts are queued
evictionInterval = time.Minute // Time interval to check for evictable transactions
minPendingPerAccount = uint64(16) // Min number of guaranteed transaction slots per address
maxPendingTotal = uint64(4096) // Max limit of pending transactions from all accounts (soft)
maxQueuedPerAccount = uint64(64) // Max limit of queued transactions per address
maxQueuedInTotal = uint64(1024) // Max limit of queued transactions from all accounts
maxQueuedLifetime = 3 * time.Hour // Max amount of time transactions from idle accounts are queued
evictionInterval = time.Minute // Time interval to check for evictable transactions
)
type stateFn func() (*state.StateDB, error)
@ -481,7 +484,6 @@ func (pool *TxPool) promoteExecutables() {
}
// Iterate over all accounts and promote any executable transactions
queued := uint64(0)
for addr, list := range pool.queue {
// Drop all transactions that are deemed too old (low nonce)
for _, tx := range list.Forward(state.GetNonce(addr)) {
@ -519,6 +521,59 @@ func (pool *TxPool) promoteExecutables() {
delete(pool.queue, addr)
}
}
// If the pending limit is overflown, start equalizing allowances
pending := uint64(0)
for _, list := range pool.pending {
pending += uint64(list.Len())
}
if pending > maxPendingTotal {
// Assemble a spam order to penalize large transactors first
spammers := prque.New()
for addr, list := range pool.pending {
// Only evict transactions from high rollers
if uint64(list.Len()) > minPendingPerAccount {
// Skip local accounts as pools should maintain backlogs for themselves
for _, tx := range list.txs.items {
if !pool.localTx.contains(tx.Hash()) {
spammers.Push(addr, float32(list.Len()))
}
break // Checking on transaction for locality is enough
}
}
}
// Gradually drop transactions from offenders
offenders := []common.Address{}
for pending > maxPendingTotal && !spammers.Empty() {
// Retrieve the next offender if not local address
offender, _ := spammers.Pop()
offenders = append(offenders, offender.(common.Address))
// Equalize balances until all the same or below threshold
if len(offenders) > 1 {
// Calculate the equalization threshold for all current offenders
threshold := pool.pending[offender.(common.Address)].Len()
// Iteratively reduce all offenders until below limit or threshold reached
for pending > maxPendingTotal && pool.pending[offenders[len(offenders)-2]].Len() > threshold {
for i := 0; i < len(offenders)-1; i++ {
list := pool.pending[offenders[i]]
list.Cap(list.Len() - 1)
pending--
}
}
}
}
// If still above threshold, reduce to limit or min allowance
if pending > maxPendingTotal && len(offenders) > 0 {
for pending > maxPendingTotal && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > minPendingPerAccount {
for _, addr := range offenders {
list := pool.pending[addr]
list.Cap(list.Len() - 1)
pending--
}
}
}
}
// If we've queued more transactions than the hard limit, drop oldest ones
if queued > maxQueuedInTotal {
// Sort all accounts with queued transactions by heartbeat

View File

@ -20,12 +20,16 @@ import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/params"
)
// RuleSet is an interface that defines the current rule set during the
// execution of the EVM instructions (e.g. whether it's homestead)
type RuleSet interface {
IsHomestead(*big.Int) bool
// GasTable returns the gas prices for this phase, which is based on
// block number passed in.
GasTable(*big.Int) params.GasTable
}
// Environment is an EVM requirement and helper which allows access to outside

View File

@ -35,8 +35,27 @@ var (
GasStop = big.NewInt(0)
GasContractByte = big.NewInt(200)
n64 = big.NewInt(64)
)
// calcGas returns the actual gas cost of the call.
//
// The cost of gas was changed during the homestead price change HF. To allow for EIP150
// to be implemented. The returned gas is gas - base * 63 / 64.
func callGas(gasTable params.GasTable, availableGas, base, callCost *big.Int) *big.Int {
if gasTable.CreateBySuicide != nil {
availableGas = new(big.Int).Sub(availableGas, base)
g := new(big.Int).Div(availableGas, n64)
g.Sub(availableGas, g)
if g.Cmp(callCost) < 0 {
return g
}
}
return callCost
}
// baseCheck checks for any stack error underflows
func baseCheck(op OpCode, stack *Stack, gas *big.Int) error {
// PUSH and DUP are a bit special. They all cost the same but we do want to have checking on stack push limit
@ -127,18 +146,19 @@ var _baseCheck = map[OpCode]req{
MSIZE: {0, GasQuickStep, 1},
GAS: {0, GasQuickStep, 1},
BLOCKHASH: {1, GasExtStep, 1},
BALANCE: {1, GasExtStep, 1},
EXTCODESIZE: {1, GasExtStep, 1},
EXTCODECOPY: {4, GasExtStep, 0},
BALANCE: {1, Zero, 1},
EXTCODESIZE: {1, Zero, 1},
EXTCODECOPY: {4, Zero, 0},
SLOAD: {1, params.SloadGas, 1},
SSTORE: {2, Zero, 0},
SHA3: {2, params.Sha3Gas, 1},
CREATE: {3, params.CreateGas, 1},
CALL: {7, params.CallGas, 1},
CALLCODE: {7, params.CallGas, 1},
DELEGATECALL: {6, params.CallGas, 1},
JUMPDEST: {0, params.JumpdestGas, 0},
// Zero is calculated in the gasSwitch
CALL: {7, Zero, 1},
CALLCODE: {7, Zero, 1},
DELEGATECALL: {6, Zero, 1},
SUICIDE: {1, Zero, 0},
JUMPDEST: {0, params.JumpdestGas, 0},
RETURN: {2, Zero, 0},
PUSH1: {0, GasFastestStep, 1},
DUP1: {0, Zero, 1},

View File

@ -514,7 +514,12 @@ func opCreate(instr instruction, pc *uint64, env Environment, contract *Contract
input = memory.Get(offset.Int64(), size.Int64())
gas = new(big.Int).Set(contract.Gas)
)
contract.UseGas(contract.Gas)
if env.RuleSet().GasTable(env.BlockNumber()).CreateBySuicide != nil {
gas.Div(gas, n64)
gas = gas.Sub(contract.Gas, gas)
}
contract.UseGas(gas)
_, addr, suberr := env.Create(contract, input, gas, contract.Price, value)
// Push item on the stack based on the returned error. If the ruleset is
// homestead we must check for CodeStoreOutOfGasError (homestead only

View File

@ -25,12 +25,16 @@ import (
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
)
// The default, always homestead, rule set for the vm env
type ruleSet struct{}
func (ruleSet) IsHomestead(*big.Int) bool { return true }
func (ruleSet) GasTable(*big.Int) params.GasTable {
return params.GasTableHomesteadGasRepriceFork
}
// Config is a basic type specifying certain configuration flags for running
// the EVM.

View File

@ -44,6 +44,7 @@ type EVM struct {
env Environment
jumpTable vmJumpTable
cfg Config
gasTable params.GasTable
}
// New returns a new instance of the EVM.
@ -52,6 +53,7 @@ func New(env Environment, cfg Config) *EVM {
env: env,
jumpTable: newJumpTable(env.RuleSet(), env.BlockNumber()),
cfg: cfg,
gasTable: env.RuleSet().GasTable(env.BlockNumber()),
}
}
@ -169,7 +171,7 @@ func (evm *EVM) Run(contract *Contract, input []byte) (ret []byte, err error) {
// Get the memory location of pc
op = contract.GetOp(pc)
// calculate the new memory size and gas price for the current executing opcode
newMemSize, cost, err = calculateGasAndSize(evm.env, contract, caller, op, statedb, mem, stack)
newMemSize, cost, err = calculateGasAndSize(evm.gasTable, evm.env, contract, caller, op, statedb, mem, stack)
if err != nil {
return nil, err
}
@ -234,7 +236,7 @@ func (evm *EVM) Run(contract *Contract, input []byte) (ret []byte, err error) {
// calculateGasAndSize calculates the required given the opcode and stack items calculates the new memorysize for
// the operation. This does not reduce gas or resizes the memory.
func calculateGasAndSize(env Environment, contract *Contract, caller ContractRef, op OpCode, statedb Database, mem *Memory, stack *Stack) (*big.Int, *big.Int, error) {
func calculateGasAndSize(gasTable params.GasTable, env Environment, contract *Contract, caller ContractRef, op OpCode, statedb Database, mem *Memory, stack *Stack) (*big.Int, *big.Int, error) {
var (
gas = new(big.Int)
newMemSize *big.Int = new(big.Int)
@ -246,6 +248,24 @@ func calculateGasAndSize(env Environment, contract *Contract, caller ContractRef
// stack Check, memory resize & gas phase
switch op {
case SUICIDE:
// if suicide is not nil: homestead gas fork
if gasTable.CreateBySuicide != nil {
gas.Set(gasTable.Suicide)
if !env.Db().Exist(common.BigToAddress(stack.data[len(stack.data)-1])) {
gas.Add(gas, gasTable.CreateBySuicide)
}
}
if !statedb.HasSuicided(contract.Address()) {
statedb.AddRefund(params.SuicideRefundGas)
}
case EXTCODESIZE:
gas.Set(gasTable.ExtcodeSize)
case BALANCE:
gas.Set(gasTable.Balance)
case SLOAD:
gas.Set(gasTable.SLoad)
case SWAP1, SWAP2, SWAP3, SWAP4, SWAP5, SWAP6, SWAP7, SWAP8, SWAP9, SWAP10, SWAP11, SWAP12, SWAP13, SWAP14, SWAP15, SWAP16:
n := int(op - SWAP1 + 2)
err := stack.require(n)
@ -274,6 +294,8 @@ func calculateGasAndSize(env Environment, contract *Contract, caller ContractRef
gas.Add(gas, new(big.Int).Mul(mSize, params.LogDataGas))
newMemSize = calcMemSize(mStart, mSize)
quadMemGas(mem, newMemSize, gas)
case EXP:
gas.Add(gas, new(big.Int).Mul(big.NewInt(int64(len(stack.data[stack.len()-2].Bytes()))), params.ExpByteGas))
case SSTORE:
@ -302,67 +324,100 @@ func calculateGasAndSize(env Environment, contract *Contract, caller ContractRef
g = params.SstoreResetGas
}
gas.Set(g)
case SUICIDE:
if !statedb.HasSuicided(contract.Address()) {
statedb.AddRefund(params.SuicideRefundGas)
}
case MLOAD:
newMemSize = calcMemSize(stack.peek(), u256(32))
quadMemGas(mem, newMemSize, gas)
case MSTORE8:
newMemSize = calcMemSize(stack.peek(), u256(1))
quadMemGas(mem, newMemSize, gas)
case MSTORE:
newMemSize = calcMemSize(stack.peek(), u256(32))
quadMemGas(mem, newMemSize, gas)
case RETURN:
newMemSize = calcMemSize(stack.peek(), stack.data[stack.len()-2])
quadMemGas(mem, newMemSize, gas)
case SHA3:
newMemSize = calcMemSize(stack.peek(), stack.data[stack.len()-2])
words := toWordSize(stack.data[stack.len()-2])
gas.Add(gas, words.Mul(words, params.Sha3WordGas))
quadMemGas(mem, newMemSize, gas)
case CALLDATACOPY:
newMemSize = calcMemSize(stack.peek(), stack.data[stack.len()-3])
words := toWordSize(stack.data[stack.len()-3])
gas.Add(gas, words.Mul(words, params.CopyGas))
quadMemGas(mem, newMemSize, gas)
case CODECOPY:
newMemSize = calcMemSize(stack.peek(), stack.data[stack.len()-3])
words := toWordSize(stack.data[stack.len()-3])
gas.Add(gas, words.Mul(words, params.CopyGas))
quadMemGas(mem, newMemSize, gas)
case EXTCODECOPY:
gas.Set(gasTable.ExtcodeCopy)
newMemSize = calcMemSize(stack.data[stack.len()-2], stack.data[stack.len()-4])
words := toWordSize(stack.data[stack.len()-4])
gas.Add(gas, words.Mul(words, params.CopyGas))
quadMemGas(mem, newMemSize, gas)
case CREATE:
newMemSize = calcMemSize(stack.data[stack.len()-2], stack.data[stack.len()-3])
quadMemGas(mem, newMemSize, gas)
case CALL, CALLCODE:
gas.Add(gas, stack.data[stack.len()-1])
gas.Set(gasTable.Calls)
if op == CALL {
if !env.Db().Exist(common.BigToAddress(stack.data[stack.len()-2])) {
gas.Add(gas, params.CallNewAccountGas)
}
}
if len(stack.data[stack.len()-3].Bytes()) > 0 {
gas.Add(gas, params.CallValueTransferGas)
}
x := calcMemSize(stack.data[stack.len()-6], stack.data[stack.len()-7])
y := calcMemSize(stack.data[stack.len()-4], stack.data[stack.len()-5])
newMemSize = common.BigMax(x, y)
quadMemGas(mem, newMemSize, gas)
cg := callGas(gasTable, contract.Gas, gas, stack.data[stack.len()-1])
// Replace the stack item with the new gas calculation. This means that
// either the original item is left on the stack or the item is replaced by:
// (availableGas - gas) * 63 / 64
// We replace the stack item so that it's available when the opCall instruction is
// called. This information is otherwise lost due to the dependency on *current*
// available gas.
stack.data[stack.len()-1] = cg
gas.Add(gas, cg)
case DELEGATECALL:
gas.Add(gas, stack.data[stack.len()-1])
gas.Set(gasTable.Calls)
x := calcMemSize(stack.data[stack.len()-5], stack.data[stack.len()-6])
y := calcMemSize(stack.data[stack.len()-3], stack.data[stack.len()-4])
newMemSize = common.BigMax(x, y)
quadMemGas(mem, newMemSize, gas)
cg := callGas(gasTable, contract.Gas, gas, stack.data[stack.len()-1])
// Replace the stack item with the new gas calculation. This means that
// either the original item is left on the stack or the item is replaced by:
// (availableGas - gas) * 63 / 64
// We replace the stack item so that it's available when the opCall instruction is
// called.
stack.data[stack.len()-1] = cg
gas.Add(gas, cg)
}
quadMemGas(mem, newMemSize, gas)
return newMemSize, gas, nil
}

View File

@ -73,6 +73,7 @@ type Config struct {
NoDefSrv bool // No default LES server
LightServ int // Maximum percentage of time allowed for serving LES requests
LightPeers int // Maximum number of LES client peers
MaxPeers int // Maximum number of global peers
SkipBcVersionCheck bool // e.g. blockchain export
DatabaseCache int
@ -121,7 +122,7 @@ type Ethereum struct {
txMu sync.Mutex
blockchain *core.BlockChain
protocolManager *ProtocolManager
ls LesServer
lesServer LesServer
// DB interfaces
chainDb ethdb.Database // Block chain database
@ -147,7 +148,7 @@ type Ethereum struct {
}
func (s *Ethereum) AddLesServer(ls LesServer) {
s.ls = ls
s.lesServer = ls
}
// New creates a new Ethereum object (including the
@ -232,7 +233,18 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
newPool := core.NewTxPool(eth.chainConfig, eth.EventMux(), eth.blockchain.State, eth.blockchain.GasLimit)
eth.txPool = newPool
if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, config.FastSync, config.NetworkId, eth.eventMux, eth.txPool, eth.pow, eth.blockchain, chainDb); err != nil {
maxPeers := config.MaxPeers
if config.LightServ > 0 {
// if we are running a light server, limit the number of ETH peers so that we reserve some space for incoming LES connections
// temporary solution until the new peer connectivity API is finished
halfPeers := maxPeers / 2
maxPeers -= config.LightPeers
if maxPeers < halfPeers {
maxPeers = halfPeers
}
}
if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, config.FastSync, config.NetworkId, maxPeers, eth.eventMux, eth.txPool, eth.pow, eth.blockchain, chainDb); err != nil {
return nil, err
}
eth.miner = miner.New(eth, eth.chainConfig, eth.EventMux(), eth.pow)
@ -327,7 +339,7 @@ func (s *Ethereum) APIs() []rpc.API {
}, {
Namespace: "eth",
Version: "1.0",
Service: filters.NewPublicFilterAPI(s.ApiBackend),
Service: filters.NewPublicFilterAPI(s.ApiBackend, true),
Public: true,
}, {
Namespace: "admin",
@ -395,10 +407,10 @@ func (s *Ethereum) Downloader() *downloader.Downloader { return s.protocolManage
// Protocols implements node.Service, returning all the currently configured
// network protocols to start.
func (s *Ethereum) Protocols() []p2p.Protocol {
if s.ls == nil {
if s.lesServer == nil {
return s.protocolManager.SubProtocols
} else {
return append(s.protocolManager.SubProtocols, s.ls.Protocols()...)
return append(s.protocolManager.SubProtocols, s.lesServer.Protocols()...)
}
}
@ -410,8 +422,8 @@ func (s *Ethereum) Start(srvr *p2p.Server) error {
s.StartAutoDAG()
}
s.protocolManager.Start()
if s.ls != nil {
s.ls.Start()
if s.lesServer != nil {
s.lesServer.Start()
}
return nil
}
@ -424,8 +436,8 @@ func (s *Ethereum) Stop() error {
}
s.blockchain.Stop()
s.protocolManager.Stop()
if s.ls != nil {
s.ls.Stop()
if s.lesServer != nil {
s.lesServer.Stop()
}
s.txPool.Stop()
s.miner.Stop()

View File

@ -50,6 +50,8 @@ func upgradeSequentialKeys(db ethdb.Database) (stopFn func()) {
return nil // empty database, nothing to do
}
glog.V(logger.Info).Infof("Upgrading chain database to use sequential keys")
stopChn := make(chan struct{})
stoppedChn := make(chan struct{})

View File

@ -952,7 +952,7 @@ func (d *Downloader) fetchNodeData() error {
// Log a message to the user and return
if delivered > 0 {
glog.V(logger.Info).Infof("imported %d state entries in %v: processed %d, pending at least %d", delivered, time.Since(start), d.syncStatsStateDone, pending)
glog.V(logger.Info).Infof("imported %d state entries in %9v: processed %d, pending at least %d", delivered, common.PrettyDuration(time.Since(start)), d.syncStatsStateDone, pending)
}
})
}

View File

@ -31,7 +31,6 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/rpc"
)
@ -53,7 +52,8 @@ type filter struct {
// PublicFilterAPI offers support to create and manage filters. This will allow external clients to retrieve various
// information related to the Ethereum protocol such als blocks, transactions and logs.
type PublicFilterAPI struct {
apiBackend ethapi.Backend
backend Backend
useMipMap bool
mux *event.TypeMux
quit chan struct{}
chainDb ethdb.Database
@ -63,13 +63,14 @@ type PublicFilterAPI struct {
}
// NewPublicFilterAPI returns a new PublicFilterAPI instance.
func NewPublicFilterAPI(apiBackend ethapi.Backend) *PublicFilterAPI {
func NewPublicFilterAPI(backend Backend, useMipMap bool) *PublicFilterAPI {
api := &PublicFilterAPI{
apiBackend: apiBackend,
mux: apiBackend.EventMux(),
chainDb: apiBackend.ChainDb(),
events: NewEventSystem(apiBackend.EventMux()),
filters: make(map[rpc.ID]*filter),
backend: backend,
useMipMap: useMipMap,
mux: backend.EventMux(),
chainDb: backend.ChainDb(),
events: NewEventSystem(backend.EventMux()),
filters: make(map[rpc.ID]*filter),
}
go api.timeoutLoop()
@ -325,7 +326,7 @@ func (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([
crit.ToBlock = big.NewInt(rpc.LatestBlockNumber.Int64())
}
filter := New(api.apiBackend)
filter := New(api.backend, api.useMipMap)
filter.SetBeginBlock(crit.FromBlock.Int64())
filter.SetEndBlock(crit.ToBlock.Int64())
filter.SetAddresses(crit.Addresses)
@ -365,7 +366,7 @@ func (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]Log
return []Log{}, nil
}
filter := New(api.apiBackend)
filter := New(api.backend, api.useMipMap)
filter.SetBeginBlock(f.crit.FromBlock.Int64())
filter.SetEndBlock(f.crit.ToBlock.Int64())
filter.SetAddresses(f.crit.Addresses)
@ -541,4 +542,3 @@ func (args *FilterCriteria) UnmarshalJSON(data []byte) error {
return nil
}

View File

@ -17,21 +17,29 @@
package filters
import (
// "math"
"math"
"time"
"github.com/ethereum/go-ethereum/common"
// "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/rpc"
"golang.org/x/net/context"
)
type Backend interface {
ChainDb() ethdb.Database
EventMux() *event.TypeMux
HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error)
GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error)
}
// Filter can be used to retrieve and filter logs
type Filter struct {
apiBackend ethapi.Backend
backend Backend
useMipMap bool
created time.Time
@ -43,10 +51,11 @@ type Filter struct {
// New creates a new filter which uses a bloom filter on blocks to figure out whether
// a particular block is interesting or not.
func New(apiBackend ethapi.Backend) *Filter {
func New(backend Backend, useMipMap bool) *Filter {
return &Filter{
apiBackend: apiBackend,
db: apiBackend.ChainDb(),
backend: backend,
useMipMap: useMipMap,
db: backend.ChainDb(),
}
}
@ -75,7 +84,10 @@ func (f *Filter) SetTopics(topics [][]common.Hash) {
// Run filters logs with the current parameters set
func (f *Filter) Find(ctx context.Context) ([]Log, error) {
head, _ := f.apiBackend.HeaderByNumber(ctx, rpc.LatestBlockNumber)
head, _ := f.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber)
if head == nil {
return nil, nil
}
headBlockNumber := head.Number.Uint64()
var beginBlockNo uint64 = uint64(f.begin)
@ -90,14 +102,13 @@ func (f *Filter) Find(ctx context.Context) ([]Log, error) {
// if no addresses are present we can't make use of fast search which
// uses the mipmap bloom filters to check for fast inclusion and uses
// higher range probability in order to ensure at least a false positive
// if len(self.addresses) == 0 {
return f.getLogs(ctx, beginBlockNo, endBlockNo)
// }
// return self.mipFind(beginBlockNo, endBlockNo, 0)
if !f.useMipMap || len(f.addresses) == 0 {
return f.getLogs(ctx, beginBlockNo, endBlockNo)
}
return f.mipFind(beginBlockNo, endBlockNo, 0), nil
}
/*func (self *Filter) mipFind(start, end uint64, depth int) (logs vm.Logs) {
>>>>>>> les: light client protocol and API
func (f *Filter) mipFind(start, end uint64, depth int) (logs []Log) {
level := core.MIPMapLevels[depth]
// normalise numerator so we can work in level specific batches and
// work with the proper range checks
@ -112,7 +123,8 @@ func (f *Filter) Find(ctx context.Context) ([]Log, error) {
start := uint64(math.Max(float64(num), float64(start)))
end := uint64(math.Min(float64(num+level-1), float64(end)))
if depth+1 == len(core.MIPMapLevels) {
logs = append(logs, f.getLogs(start, end)...)
l, _ := f.getLogs(context.Background(), start, end)
logs = append(logs, l...)
} else {
logs = append(logs, f.mipFind(start, end, depth+1)...)
}
@ -125,11 +137,11 @@ func (f *Filter) Find(ctx context.Context) ([]Log, error) {
}
return logs
}*/
}
func (f *Filter) getLogs(ctx context.Context, start, end uint64) (logs []Log, err error) {
for i := start; i <= end; i++ {
header, err := f.apiBackend.HeaderByNumber(ctx, rpc.BlockNumber(i))
header, err := f.backend.HeaderByNumber(ctx, rpc.BlockNumber(i))
if header == nil || err != nil {
return logs, err
}
@ -138,7 +150,7 @@ func (f *Filter) getLogs(ctx context.Context, start, end uint64) (logs []Log, er
// current parameters
if f.bloomFilter(header.Bloom) {
// Get the logs of the block
receipts, err := f.apiBackend.GetReceipts(ctx, header.Hash())
receipts, err := f.backend.GetReceipts(ctx, header.Hash())
if err != nil {
return nil, err
}

View File

@ -68,6 +68,7 @@ type ProtocolManager struct {
blockchain *core.BlockChain
chaindb ethdb.Database
chainconfig *core.ChainConfig
maxPeers int
downloader *downloader.Downloader
fetcher *fetcher.Fetcher
@ -94,7 +95,7 @@ type ProtocolManager struct {
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
// with the ethereum network.
func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int, mux *event.TypeMux, txpool txPool, pow pow.PoW, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) {
func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int, maxPeers int, mux *event.TypeMux, txpool txPool, pow pow.PoW, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) {
// Create the protocol manager with the base fields
manager := &ProtocolManager{
networkId: networkId,
@ -103,6 +104,7 @@ func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int,
blockchain: blockchain,
chaindb: chaindb,
chainconfig: config,
maxPeers: maxPeers,
peers: newPeerSet(),
newPeerCh: make(chan *peer),
noMorePeers: make(chan struct{}),
@ -253,10 +255,10 @@ func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *p
// handle is the callback invoked to manage the life cycle of an eth peer. When
// this function terminates, the peer is disconnected.
func (pm *ProtocolManager) handle(p *peer) error {
if pm.peers.Len() >= 20 {
if pm.peers.Len() >= pm.maxPeers {
return p2p.DiscTooManyPeers
}
}
glog.V(logger.Debug).Infof("%v: peer connected [%s]", p, p.Name())
// Execute the Ethereum handshake
@ -292,7 +294,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
}
// Start a timer to disconnect if the peer doesn't reply in time
p.forkDrop = time.AfterFunc(daoChallengeTimeout, func() {
glog.V(logger.Warn).Infof("%v: timed out DAO fork-check, dropping", p)
glog.V(logger.Debug).Infof("%v: timed out DAO fork-check, dropping", p)
pm.removePeer(p.id)
})
// Make sure it's cleaned up if the peer dies off

View File

@ -52,6 +52,11 @@ var (
Usage: "pprof HTTP server listening port",
Value: 6060,
}
pprofAddrFlag = cli.StringFlag{
Name: "pprofaddr",
Usage: "pprof HTTP server listening interface",
Value: "127.0.0.1",
}
memprofilerateFlag = cli.IntFlag{
Name: "memprofilerate",
Usage: "Turn on memory profiling with the given rate",
@ -74,7 +79,7 @@ var (
// Flags holds all command-line flags required for debugging.
var Flags = []cli.Flag{
verbosityFlag, vmoduleFlag, backtraceAtFlag,
pprofFlag, pprofPortFlag,
pprofFlag, pprofAddrFlag, pprofPortFlag,
memprofilerateFlag, blockprofilerateFlag, cpuprofileFlag, traceFlag,
}
@ -101,7 +106,7 @@ func Setup(ctx *cli.Context) error {
// pprof server
if ctx.GlobalBool(pprofFlag.Name) {
address := fmt.Sprintf("127.0.0.1:%d", ctx.GlobalInt(pprofPortFlag.Name))
address := fmt.Sprintf("%s:%d", ctx.GlobalString(pprofAddrFlag.Name), ctx.GlobalInt(pprofPortFlag.Name))
go func() {
glog.V(logger.Info).Infof("starting pprof server at http://%s/debug/pprof", address)
glog.Errorln(http.ListenAndServe(address, nil))

View File

@ -614,7 +614,7 @@ func (s *PublicBlockChainAPI) rpcOutputBlock(b *types.Block, inclTx bool, fullTx
"gasUsed": rpc.NewHexNumber(head.GasUsed),
"timestamp": rpc.NewHexNumber(head.Time),
"transactionsRoot": head.TxHash,
"receiptsRoot": head.ReceiptHash,
"receiptsRoot": head.ReceiptHash,
}
if inclTx {
@ -716,6 +716,16 @@ func newRPCTransactionFromBlockIndex(b *types.Block, txIndex int) (*RPCTransacti
return nil, nil
}
// newRPCRawTransactionFromBlockIndex returns the bytes of a transaction given a block and a transaction index.
func newRPCRawTransactionFromBlockIndex(b *types.Block, txIndex int) (rpc.HexBytes, error) {
if txIndex >= 0 && txIndex < len(b.Transactions()) {
tx := b.Transactions()[txIndex]
return rlp.EncodeToBytes(tx)
}
return nil, nil
}
// newRPCTransaction returns a transaction that will serialize to the RPC representation.
func newRPCTransaction(b *types.Block, txHash common.Hash) (*RPCTransaction, error) {
for idx, tx := range b.Transactions() {
@ -801,6 +811,22 @@ func (s *PublicTransactionPoolAPI) GetTransactionByBlockHashAndIndex(ctx context
return nil, nil
}
// GetRawTransactionByBlockNumberAndIndex returns the bytes of the transaction for the given block number and index.
func (s *PublicTransactionPoolAPI) GetRawTransactionByBlockNumberAndIndex(ctx context.Context, blockNr rpc.BlockNumber, index rpc.HexNumber) (rpc.HexBytes, error) {
if block, _ := s.b.BlockByNumber(ctx, blockNr); block != nil {
return newRPCRawTransactionFromBlockIndex(block, index.Int())
}
return nil, nil
}
// GetRawTransactionByBlockHashAndIndex returns the bytes of the transaction for the given block hash and index.
func (s *PublicTransactionPoolAPI) GetRawTransactionByBlockHashAndIndex(ctx context.Context, blockHash common.Hash, index rpc.HexNumber) (rpc.HexBytes, error) {
if block, _ := s.b.GetBlock(ctx, blockHash); block != nil {
return newRPCRawTransactionFromBlockIndex(block, index.Int())
}
return nil, nil
}
// GetTransactionCount returns the number of transactions the given address has sent for the given block number
func (s *PublicTransactionPoolAPI) GetTransactionCount(ctx context.Context, address common.Address, blockNr rpc.BlockNumber) (*rpc.HexNumber, error) {
state, _, err := s.b.StateAndHeaderByNumber(ctx, blockNr)
@ -866,9 +892,23 @@ func (s *PublicTransactionPoolAPI) GetTransactionByHash(ctx context.Context, txH
return nil, nil
}
// GetRawTransactionByHash returns the bytes of the transaction for the given hash.
func (s *PublicTransactionPoolAPI) GetRawTransactionByHash(ctx context.Context, txHash common.Hash) (rpc.HexBytes, error) {
var tx *types.Transaction
var err error
if tx, _, err = getTransaction(s.b.ChainDb(), s.b, txHash); err != nil {
glog.V(logger.Debug).Infof("%v\n", err)
return nil, nil
} else if tx == nil {
return nil, nil
}
return rlp.EncodeToBytes(tx)
}
// GetTransactionReceipt returns the transaction receipt for the given transaction hash.
func (s *PublicTransactionPoolAPI) GetTransactionReceipt(txHash common.Hash) (map[string]interface{}, error) {
//fmt.Println("API GetTransactionReceipt", txHash)
receipt := core.GetReceipt(s.b.ChainDb(), txHash)
if receipt == nil {
glog.V(logger.Debug).Infof("receipt not found for transaction %s", txHash.Hex())
@ -876,14 +916,12 @@ func (s *PublicTransactionPoolAPI) GetTransactionReceipt(txHash common.Hash) (ma
}
tx, _, err := getTransaction(s.b.ChainDb(), s.b, txHash)
//fmt.Println("getTransaction", err)
if err != nil {
glog.V(logger.Debug).Infof("%v\n", err)
return nil, nil
}
txBlock, blockIndex, index, err := getTransactionBlockData(s.b.ChainDb(), txHash)
//fmt.Println("getTransactionBlockData", txBlock, blockIndex, index, err)
if err != nil {
glog.V(logger.Debug).Infof("%v\n", err)
return nil, nil

View File

@ -468,6 +468,19 @@ web3._extend({
call: 'eth_submitTransaction',
params: 1,
inputFormatter: [web3._extend.formatters.inputTransactionFormatter]
}),
new web3._extend.Method({
name: 'getRawTransaction',
call: 'eth_getRawTransactionByHash',
params: 1
}),
new web3._extend.Method({
name: 'getRawTransactionFromBlock',
call: function(args) {
return (web3._extend.utils.isString(args[0]) && args[0].indexOf('0x') === 0) ? 'eth_getRawTransactionByBlockHashAndIndex' : 'eth_getRawTransactionByBlockNumberAndIndex';
},
params: 2,
inputFormatter: [web3._extend.formatters.inputBlockNumberFormatter, web3._extend.utils.toHex]
})
],
properties:

View File

@ -144,7 +144,7 @@ func (s *LightEthereum) APIs() []rpc.API {
}, {
Namespace: "eth",
Version: "1.0",
Service: filters.NewPublicFilterAPI(s.ApiBackend),
Service: filters.NewPublicFilterAPI(s.ApiBackend, false),
Public: true,
}, {
Namespace: "net",

View File

@ -448,7 +448,6 @@ func (p *peer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis
}
p.fcServerParams = params
p.fcServer = flowcontrol.NewServerNode(params)
p.fcServerParams = params
p.fcCosts = MRC.decode()
}

View File

@ -106,8 +106,8 @@ func NewLightChain(odr OdrBackend, config *core.ChainConfig, pow pow.PoW, mux *e
// add trusted CHT
if config.DAOForkSupport {
WriteTrustedCht(bc.chainDb, TrustedCht{
Number: 564,
Root: common.HexToHash("ee31f7fc21f627dc2b8d3ed8fed5b74dbc393d146a67249a656e163148e39016"),
Number: 601,
Root: common.HexToHash("7d417d315bb8873c03fad7447bfbb0e92a22afe2937eaf064d3d48077ec1dd87"),
})
} else {
WriteTrustedCht(bc.chainDb, TrustedCht{
@ -120,8 +120,8 @@ func NewLightChain(odr OdrBackend, config *core.ChainConfig, pow pow.PoW, mux *e
if bc.genesisBlock.Hash() == (common.Hash{12, 215, 134, 162, 66, 93, 22, 241, 82, 198, 88, 49, 108, 66, 62, 108, 225, 24, 30, 21, 195, 41, 88, 38, 215, 201, 144, 76, 186, 156, 227, 3}) {
// add trusted CHT for testnet
WriteTrustedCht(bc.chainDb, TrustedCht{
Number: 319,
Root: common.HexToHash("43b679ff9b4918b0b19e6256f20e35877365ec3e20b38e3b2a02cef5606176dc"),
Number: 436,
Root: common.HexToHash("97a12df5d04d72bde4b4b840e1018e4f08aee34b7d0bf2c5dbfc052b86fe7439"),
})
glog.V(logger.Info).Infoln("Added trusted CHT for testnet")
} else {

View File

@ -121,9 +121,9 @@ func (self *LightState) GetState(ctx context.Context, a common.Address, b common
return common.Hash{}, err
}
// IsDeleted returns true if the given account has been marked for deletion
// HasSuicided returns true if the given account has been marked for deletion
// or false if the account does not exist
func (self *LightState) IsDeleted(ctx context.Context, addr common.Address) (bool, error) {
func (self *LightState) HasSuicided(ctx context.Context, addr common.Address) (bool, error) {
stateObject, err := self.GetStateObject(ctx, addr)
if err == nil && stateObject != nil {
return stateObject.remove, nil
@ -172,7 +172,7 @@ func (self *LightState) SetState(ctx context.Context, addr common.Address, key c
}
// Delete marks an account to be removed and clears its balance
func (self *LightState) Delete(ctx context.Context, addr common.Address) (bool, error) {
func (self *LightState) Suicide(ctx context.Context, addr common.Address) (bool, error) {
stateObject, err := self.GetOrNewStateObject(ctx, addr)
if err == nil && stateObject != nil {
stateObject.MarkForDeletion()

View File

@ -78,7 +78,7 @@ func (t *LightTrie) do(ctx context.Context, fallbackKey []byte, fn func() error)
func (t *LightTrie) Get(ctx context.Context, key []byte) (res []byte, err error) {
err = t.do(ctx, key, func() (err error) {
if t.trie == nil {
t.trie, err = trie.NewSecure(t.id.Root, t.db)
t.trie, err = trie.NewSecure(t.id.Root, t.db, 0)
}
if err == nil {
res, err = t.trie.TryGet(key)
@ -97,7 +97,7 @@ func (t *LightTrie) Get(ctx context.Context, key []byte) (res []byte, err error)
func (t *LightTrie) Update(ctx context.Context, key, value []byte) (err error) {
err = t.do(ctx, key, func() (err error) {
if t.trie == nil {
t.trie, err = trie.NewSecure(t.id.Root, t.db)
t.trie, err = trie.NewSecure(t.id.Root, t.db, 0)
}
if err == nil {
err = t.trie.TryUpdate(key, value)
@ -111,7 +111,7 @@ func (t *LightTrie) Update(ctx context.Context, key, value []byte) (err error) {
func (t *LightTrie) Delete(ctx context.Context, key []byte) (err error) {
err = t.do(ctx, key, func() (err error) {
if t.trie == nil {
t.trie, err = trie.NewSecure(t.id.Root, t.db)
t.trie, err = trie.NewSecure(t.id.Root, t.db, 0)
}
if err == nil {
err = t.trie.TryDelete(key)

View File

@ -204,12 +204,14 @@ func (s *VMState) GetCode(addr common.Address) []byte {
return res
}
// GetCodeHash returns the contract code hash at the given address
func (s *VMState) GetCodeHash(addr common.Address) common.Hash {
res, err := s.state.GetCode(s.ctx, addr)
s.errHandler(err)
return crypto.Keccak256Hash(res)
}
// GetCodeSize returns the contract code size at the given address
func (s *VMState) GetCodeSize(addr common.Address) int {
res, err := s.state.GetCode(s.ctx, addr)
s.errHandler(err)
@ -246,9 +248,9 @@ func (s *VMState) SetState(addr common.Address, key common.Hash, value common.Ha
s.errHandler(err)
}
// Delete marks an account to be removed and clears its balance
// Suicide marks an account to be removed and clears its balance
func (s *VMState) Suicide(addr common.Address) bool {
res, err := s.state.Delete(s.ctx, addr)
res, err := s.state.Suicide(s.ctx, addr)
s.errHandler(err)
return res
}
@ -260,10 +262,10 @@ func (s *VMState) Exist(addr common.Address) bool {
return res
}
// IsDeleted returns true if the given account has been marked for deletion
// HasSuicided returns true if the given account has been marked for deletion
// or false if the account does not exist
func (s *VMState) HasSuicided(addr common.Address) bool {
res, err := s.state.IsDeleted(s.ctx, addr)
res, err := s.state.HasSuicided(s.ctx, addr)
s.errHandler(err)
return res
}

View File

@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/rcrowley/go-metrics"
"github.com/rcrowley/go-metrics/exp"
)
// MetricsEnabledFlag is the CLI flag name to use to enable metrics collections.
@ -44,6 +45,7 @@ func init() {
Enabled = true
}
}
exp.Exp(metrics.DefaultRegistry)
}
// NewMeter create a new metrics Meter, either a real one of a NOP stub depending

View File

@ -95,12 +95,16 @@ type Config struct {
// or not. Disabling is usually useful for protocol debugging (manual topology).
NoDiscovery bool
DiscoveryV5 bool
// Bootstrap nodes used to establish connectivity with the rest of the network.
BootstrapNodes []*discover.Node
// Network interface address on which the node should listen for inbound peers.
ListenAddr string
ListenAddrV5 string
// If set to a non-nil value, the given NAT port mapper is used to make the
// listening port available to the Internet.
NAT nat.Interface

View File

@ -157,11 +157,13 @@ func (n *Node) Start() error {
PrivateKey: n.config.NodeKey(),
Name: n.config.NodeName(),
Discovery: !n.config.NoDiscovery,
DiscoveryV5: n.config.DiscoveryV5,
BootstrapNodes: n.config.BootstrapNodes,
StaticNodes: n.config.StaticNodes(),
TrustedNodes: n.config.TrusterNodes(),
NodeDatabase: n.config.NodeDB(),
ListenAddr: n.config.ListenAddr,
ListenAddrV5: n.config.ListenAddrV5,
NAT: n.config.NAT,
Dialer: n.config.Dialer,
NoDial: n.config.NoDial,

View File

@ -0,0 +1,31 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package discv5
import (
//"github.com/btcsuite/btcd/btcec"
"github.com/ethereum/go-ethereum/crypto/secp256k1"
)
func S256() *secp256k1.BitCurve {
return secp256k1.S256()
}
// This version should be used for NaCl compilation
/*func S256() *btcec.KoblitzCurve {
return S256()
}*/

View File

@ -0,0 +1,413 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Contains the node database, storing previously seen nodes and any collected
// metadata about them for QoS purposes.
package discv5
import (
"bytes"
"crypto/rand"
"encoding/binary"
"os"
"sync"
"time"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/rlp"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/storage"
"github.com/syndtr/goleveldb/leveldb/util"
)
var (
nodeDBNilNodeID = NodeID{} // Special node ID to use as a nil element.
nodeDBNodeExpiration = 24 * time.Hour // Time after which an unseen node should be dropped.
nodeDBCleanupCycle = time.Hour // Time period for running the expiration task.
)
// nodeDB stores all nodes we know about.
type nodeDB struct {
lvl *leveldb.DB // Interface to the database itself
self NodeID // Own node id to prevent adding it into the database
runner sync.Once // Ensures we can start at most one expirer
quit chan struct{} // Channel to signal the expiring thread to stop
}
// Schema layout for the node database
var (
nodeDBVersionKey = []byte("version") // Version of the database to flush if changes
nodeDBItemPrefix = []byte("n:") // Identifier to prefix node entries with
nodeDBDiscoverRoot = ":discover"
nodeDBDiscoverPing = nodeDBDiscoverRoot + ":lastping"
nodeDBDiscoverPong = nodeDBDiscoverRoot + ":lastpong"
nodeDBDiscoverFindFails = nodeDBDiscoverRoot + ":findfail"
nodeDBDiscoverLocalEndpoint = nodeDBDiscoverRoot + ":localendpoint"
nodeDBTopicRegTickets = ":tickets"
)
// newNodeDB creates a new node database for storing and retrieving infos about
// known peers in the network. If no path is given, an in-memory, temporary
// database is constructed.
func newNodeDB(path string, version int, self NodeID) (*nodeDB, error) {
if path == "" {
return newMemoryNodeDB(self)
}
return newPersistentNodeDB(path, version, self)
}
// newMemoryNodeDB creates a new in-memory node database without a persistent
// backend.
func newMemoryNodeDB(self NodeID) (*nodeDB, error) {
db, err := leveldb.Open(storage.NewMemStorage(), nil)
if err != nil {
return nil, err
}
return &nodeDB{
lvl: db,
self: self,
quit: make(chan struct{}),
}, nil
}
// newPersistentNodeDB creates/opens a leveldb backed persistent node database,
// also flushing its contents in case of a version mismatch.
func newPersistentNodeDB(path string, version int, self NodeID) (*nodeDB, error) {
opts := &opt.Options{OpenFilesCacheCapacity: 5}
db, err := leveldb.OpenFile(path, opts)
if _, iscorrupted := err.(*errors.ErrCorrupted); iscorrupted {
db, err = leveldb.RecoverFile(path, nil)
}
if err != nil {
return nil, err
}
// The nodes contained in the cache correspond to a certain protocol version.
// Flush all nodes if the version doesn't match.
currentVer := make([]byte, binary.MaxVarintLen64)
currentVer = currentVer[:binary.PutVarint(currentVer, int64(version))]
blob, err := db.Get(nodeDBVersionKey, nil)
switch err {
case leveldb.ErrNotFound:
// Version not found (i.e. empty cache), insert it
if err := db.Put(nodeDBVersionKey, currentVer, nil); err != nil {
db.Close()
return nil, err
}
case nil:
// Version present, flush if different
if !bytes.Equal(blob, currentVer) {
db.Close()
if err = os.RemoveAll(path); err != nil {
return nil, err
}
return newPersistentNodeDB(path, version, self)
}
}
return &nodeDB{
lvl: db,
self: self,
quit: make(chan struct{}),
}, nil
}
// makeKey generates the leveldb key-blob from a node id and its particular
// field of interest.
func makeKey(id NodeID, field string) []byte {
if bytes.Equal(id[:], nodeDBNilNodeID[:]) {
return []byte(field)
}
return append(nodeDBItemPrefix, append(id[:], field...)...)
}
// splitKey tries to split a database key into a node id and a field part.
func splitKey(key []byte) (id NodeID, field string) {
// If the key is not of a node, return it plainly
if !bytes.HasPrefix(key, nodeDBItemPrefix) {
return NodeID{}, string(key)
}
// Otherwise split the id and field
item := key[len(nodeDBItemPrefix):]
copy(id[:], item[:len(id)])
field = string(item[len(id):])
return id, field
}
// fetchInt64 retrieves an integer instance associated with a particular
// database key.
func (db *nodeDB) fetchInt64(key []byte) int64 {
blob, err := db.lvl.Get(key, nil)
if err != nil {
return 0
}
val, read := binary.Varint(blob)
if read <= 0 {
return 0
}
return val
}
// storeInt64 update a specific database entry to the current time instance as a
// unix timestamp.
func (db *nodeDB) storeInt64(key []byte, n int64) error {
blob := make([]byte, binary.MaxVarintLen64)
blob = blob[:binary.PutVarint(blob, n)]
return db.lvl.Put(key, blob, nil)
}
func (db *nodeDB) storeRLP(key []byte, val interface{}) error {
blob, err := rlp.EncodeToBytes(val)
if err != nil {
return err
}
return db.lvl.Put(key, blob, nil)
}
func (db *nodeDB) fetchRLP(key []byte, val interface{}) error {
blob, err := db.lvl.Get(key, nil)
if err != nil {
return err
}
err = rlp.DecodeBytes(blob, val)
if err != nil {
glog.V(logger.Warn).Infof("key %x (%T) %v", key, val, err)
}
return err
}
// node retrieves a node with a given id from the database.
func (db *nodeDB) node(id NodeID) *Node {
var node Node
if err := db.fetchRLP(makeKey(id, nodeDBDiscoverRoot), &node); err != nil {
return nil
}
node.sha = crypto.Keccak256Hash(node.ID[:])
return &node
}
// updateNode inserts - potentially overwriting - a node into the peer database.
func (db *nodeDB) updateNode(node *Node) error {
return db.storeRLP(makeKey(node.ID, nodeDBDiscoverRoot), node)
}
// deleteNode deletes all information/keys associated with a node.
func (db *nodeDB) deleteNode(id NodeID) error {
deleter := db.lvl.NewIterator(util.BytesPrefix(makeKey(id, "")), nil)
for deleter.Next() {
if err := db.lvl.Delete(deleter.Key(), nil); err != nil {
return err
}
}
return nil
}
// ensureExpirer is a small helper method ensuring that the data expiration
// mechanism is running. If the expiration goroutine is already running, this
// method simply returns.
//
// The goal is to start the data evacuation only after the network successfully
// bootstrapped itself (to prevent dumping potentially useful seed nodes). Since
// it would require significant overhead to exactly trace the first successful
// convergence, it's simpler to "ensure" the correct state when an appropriate
// condition occurs (i.e. a successful bonding), and discard further events.
func (db *nodeDB) ensureExpirer() {
db.runner.Do(func() { go db.expirer() })
}
// expirer should be started in a go routine, and is responsible for looping ad
// infinitum and dropping stale data from the database.
func (db *nodeDB) expirer() {
tick := time.Tick(nodeDBCleanupCycle)
for {
select {
case <-tick:
if err := db.expireNodes(); err != nil {
glog.V(logger.Error).Infof("Failed to expire nodedb items: %v", err)
}
case <-db.quit:
return
}
}
}
// expireNodes iterates over the database and deletes all nodes that have not
// been seen (i.e. received a pong from) for some allotted time.
func (db *nodeDB) expireNodes() error {
threshold := time.Now().Add(-nodeDBNodeExpiration)
// Find discovered nodes that are older than the allowance
it := db.lvl.NewIterator(nil, nil)
defer it.Release()
for it.Next() {
// Skip the item if not a discovery node
id, field := splitKey(it.Key())
if field != nodeDBDiscoverRoot {
continue
}
// Skip the node if not expired yet (and not self)
if bytes.Compare(id[:], db.self[:]) != 0 {
if seen := db.lastPong(id); seen.After(threshold) {
continue
}
}
// Otherwise delete all associated information
db.deleteNode(id)
}
return nil
}
// lastPing retrieves the time of the last ping packet send to a remote node,
// requesting binding.
func (db *nodeDB) lastPing(id NodeID) time.Time {
return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPing)), 0)
}
// updateLastPing updates the last time we tried contacting a remote node.
func (db *nodeDB) updateLastPing(id NodeID, instance time.Time) error {
return db.storeInt64(makeKey(id, nodeDBDiscoverPing), instance.Unix())
}
// lastPong retrieves the time of the last successful contact from remote node.
func (db *nodeDB) lastPong(id NodeID) time.Time {
return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPong)), 0)
}
// updateLastPong updates the last time a remote node successfully contacted.
func (db *nodeDB) updateLastPong(id NodeID, instance time.Time) error {
return db.storeInt64(makeKey(id, nodeDBDiscoverPong), instance.Unix())
}
// findFails retrieves the number of findnode failures since bonding.
func (db *nodeDB) findFails(id NodeID) int {
return int(db.fetchInt64(makeKey(id, nodeDBDiscoverFindFails)))
}
// updateFindFails updates the number of findnode failures since bonding.
func (db *nodeDB) updateFindFails(id NodeID, fails int) error {
return db.storeInt64(makeKey(id, nodeDBDiscoverFindFails), int64(fails))
}
// localEndpoint returns the last local endpoint communicated to the
// given remote node.
func (db *nodeDB) localEndpoint(id NodeID) *rpcEndpoint {
var ep rpcEndpoint
if err := db.fetchRLP(makeKey(id, nodeDBDiscoverLocalEndpoint), &ep); err != nil {
return nil
}
return &ep
}
func (db *nodeDB) updateLocalEndpoint(id NodeID, ep rpcEndpoint) error {
return db.storeRLP(makeKey(id, nodeDBDiscoverLocalEndpoint), &ep)
}
// querySeeds retrieves random nodes to be used as potential seed nodes
// for bootstrapping.
func (db *nodeDB) querySeeds(n int, maxAge time.Duration) []*Node {
var (
now = time.Now()
nodes = make([]*Node, 0, n)
it = db.lvl.NewIterator(nil, nil)
id NodeID
)
defer it.Release()
seek:
for seeks := 0; len(nodes) < n && seeks < n*5; seeks++ {
// Seek to a random entry. The first byte is incremented by a
// random amount each time in order to increase the likelihood
// of hitting all existing nodes in very small databases.
ctr := id[0]
rand.Read(id[:])
id[0] = ctr + id[0]%16
it.Seek(makeKey(id, nodeDBDiscoverRoot))
n := nextNode(it)
if n == nil {
id[0] = 0
continue seek // iterator exhausted
}
if n.ID == db.self {
continue seek
}
if now.Sub(db.lastPong(n.ID)) > maxAge {
continue seek
}
for i := range nodes {
if nodes[i].ID == n.ID {
continue seek // duplicate
}
}
nodes = append(nodes, n)
}
return nodes
}
func (db *nodeDB) fetchTopicRegTickets(id NodeID) (issued, used uint32) {
key := makeKey(id, nodeDBTopicRegTickets)
blob, _ := db.lvl.Get(key, nil)
if len(blob) != 8 {
return 0, 0
}
issued = binary.BigEndian.Uint32(blob[0:4])
used = binary.BigEndian.Uint32(blob[4:8])
return
}
func (db *nodeDB) updateTopicRegTickets(id NodeID, issued, used uint32) error {
key := makeKey(id, nodeDBTopicRegTickets)
blob := make([]byte, 8)
binary.BigEndian.PutUint32(blob[0:4], issued)
binary.BigEndian.PutUint32(blob[4:8], used)
return db.lvl.Put(key, blob, nil)
}
// reads the next node record from the iterator, skipping over other
// database entries.
func nextNode(it iterator.Iterator) *Node {
for end := false; !end; end = !it.Next() {
id, field := splitKey(it.Key())
if field != nodeDBDiscoverRoot {
continue
}
var n Node
if err := rlp.DecodeBytes(it.Value(), &n); err != nil {
if glog.V(logger.Warn) {
glog.Errorf("invalid node %x: %v", id, err)
}
continue
}
return &n
}
return nil
}
// close flushes and closes the database files.
func (db *nodeDB) close() {
close(db.quit)
db.lvl.Close()
}

1130
vendor/github.com/ethereum/go-ethereum/p2p/discv5/net.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,423 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package discv5
import (
"bytes"
"crypto/ecdsa"
"crypto/elliptic"
"encoding/hex"
"errors"
"fmt"
"math/big"
"math/rand"
"net"
"net/url"
"regexp"
"strconv"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
)
// Node represents a host on the network.
// The public fields of Node may not be modified.
type Node struct {
IP net.IP // len 4 for IPv4 or 16 for IPv6
UDP, TCP uint16 // port numbers
ID NodeID // the node's public key
// Network-related fields are contained in nodeNetGuts.
// These fields are not supposed to be used off the
// Network.loop goroutine.
nodeNetGuts
}
// NewNode creates a new node. It is mostly meant to be used for
// testing purposes.
func NewNode(id NodeID, ip net.IP, udpPort, tcpPort uint16) *Node {
if ipv4 := ip.To4(); ipv4 != nil {
ip = ipv4
}
return &Node{
IP: ip,
UDP: udpPort,
TCP: tcpPort,
ID: id,
nodeNetGuts: nodeNetGuts{sha: crypto.Keccak256Hash(id[:])},
}
}
func (n *Node) addr() *net.UDPAddr {
return &net.UDPAddr{IP: n.IP, Port: int(n.UDP)}
}
func (n *Node) setAddr(a *net.UDPAddr) {
n.IP = a.IP
if ipv4 := a.IP.To4(); ipv4 != nil {
n.IP = ipv4
}
n.UDP = uint16(a.Port)
}
// compares the given address against the stored values.
func (n *Node) addrEqual(a *net.UDPAddr) bool {
ip := a.IP
if ipv4 := a.IP.To4(); ipv4 != nil {
ip = ipv4
}
return n.UDP == uint16(a.Port) && bytes.Equal(n.IP, ip)
}
// Incomplete returns true for nodes with no IP address.
func (n *Node) Incomplete() bool {
return n.IP == nil
}
// checks whether n is a valid complete node.
func (n *Node) validateComplete() error {
if n.Incomplete() {
return errors.New("incomplete node")
}
if n.UDP == 0 {
return errors.New("missing UDP port")
}
if n.TCP == 0 {
return errors.New("missing TCP port")
}
if n.IP.IsMulticast() || n.IP.IsUnspecified() {
return errors.New("invalid IP (multicast/unspecified)")
}
_, err := n.ID.Pubkey() // validate the key (on curve, etc.)
return err
}
// The string representation of a Node is a URL.
// Please see ParseNode for a description of the format.
func (n *Node) String() string {
u := url.URL{Scheme: "enode"}
if n.Incomplete() {
u.Host = fmt.Sprintf("%x", n.ID[:])
} else {
addr := net.TCPAddr{IP: n.IP, Port: int(n.TCP)}
u.User = url.User(fmt.Sprintf("%x", n.ID[:]))
u.Host = addr.String()
if n.UDP != n.TCP {
u.RawQuery = "discport=" + strconv.Itoa(int(n.UDP))
}
}
return u.String()
}
var incompleteNodeURL = regexp.MustCompile("(?i)^(?:enode://)?([0-9a-f]+)$")
// ParseNode parses a node designator.
//
// There are two basic forms of node designators
// - incomplete nodes, which only have the public key (node ID)
// - complete nodes, which contain the public key and IP/Port information
//
// For incomplete nodes, the designator must look like one of these
//
// enode://<hex node id>
// <hex node id>
//
// For complete nodes, the node ID is encoded in the username portion
// of the URL, separated from the host by an @ sign. The hostname can
// only be given as an IP address, DNS domain names are not allowed.
// The port in the host name section is the TCP listening port. If the
// TCP and UDP (discovery) ports differ, the UDP port is specified as
// query parameter "discport".
//
// In the following example, the node URL describes
// a node with IP address 10.3.58.6, TCP listening port 30303
// and UDP discovery port 30301.
//
// enode://<hex node id>@10.3.58.6:30303?discport=30301
func ParseNode(rawurl string) (*Node, error) {
if m := incompleteNodeURL.FindStringSubmatch(rawurl); m != nil {
id, err := HexID(m[1])
if err != nil {
return nil, fmt.Errorf("invalid node ID (%v)", err)
}
return NewNode(id, nil, 0, 0), nil
}
return parseComplete(rawurl)
}
func parseComplete(rawurl string) (*Node, error) {
var (
id NodeID
ip net.IP
tcpPort, udpPort uint64
)
u, err := url.Parse(rawurl)
if err != nil {
return nil, err
}
if u.Scheme != "enode" {
return nil, errors.New("invalid URL scheme, want \"enode\"")
}
// Parse the Node ID from the user portion.
if u.User == nil {
return nil, errors.New("does not contain node ID")
}
if id, err = HexID(u.User.String()); err != nil {
return nil, fmt.Errorf("invalid node ID (%v)", err)
}
// Parse the IP address.
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
return nil, fmt.Errorf("invalid host: %v", err)
}
if ip = net.ParseIP(host); ip == nil {
return nil, errors.New("invalid IP address")
}
// Ensure the IP is 4 bytes long for IPv4 addresses.
if ipv4 := ip.To4(); ipv4 != nil {
ip = ipv4
}
// Parse the port numbers.
if tcpPort, err = strconv.ParseUint(port, 10, 16); err != nil {
return nil, errors.New("invalid port")
}
udpPort = tcpPort
qv := u.Query()
if qv.Get("discport") != "" {
udpPort, err = strconv.ParseUint(qv.Get("discport"), 10, 16)
if err != nil {
return nil, errors.New("invalid discport in query")
}
}
return NewNode(id, ip, uint16(udpPort), uint16(tcpPort)), nil
}
// MustParseNode parses a node URL. It panics if the URL is not valid.
func MustParseNode(rawurl string) *Node {
n, err := ParseNode(rawurl)
if err != nil {
panic("invalid node URL: " + err.Error())
}
return n
}
// type nodeQueue []*Node
//
// // pushNew adds n to the end if it is not present.
// func (nl *nodeList) appendNew(n *Node) {
// for _, entry := range n {
// if entry == n {
// return
// }
// }
// *nq = append(*nq, n)
// }
//
// // popRandom removes a random node. Nodes closer to
// // to the head of the beginning of the have a slightly higher probability.
// func (nl *nodeList) popRandom() *Node {
// ix := rand.Intn(len(*nq))
// //TODO: probability as mentioned above.
// nl.removeIndex(ix)
// }
//
// func (nl *nodeList) removeIndex(i int) *Node {
// slice = *nl
// if len(*slice) <= i {
// return nil
// }
// *nl = append(slice[:i], slice[i+1:]...)
// }
const nodeIDBits = 512
// NodeID is a unique identifier for each node.
// The node identifier is a marshaled elliptic curve public key.
type NodeID [nodeIDBits / 8]byte
// NodeID prints as a long hexadecimal number.
func (n NodeID) String() string {
return fmt.Sprintf("%x", n[:])
}
// The Go syntax representation of a NodeID is a call to HexID.
func (n NodeID) GoString() string {
return fmt.Sprintf("discover.HexID(\"%x\")", n[:])
}
// HexID converts a hex string to a NodeID.
// The string may be prefixed with 0x.
func HexID(in string) (NodeID, error) {
if strings.HasPrefix(in, "0x") {
in = in[2:]
}
var id NodeID
b, err := hex.DecodeString(in)
if err != nil {
return id, err
} else if len(b) != len(id) {
return id, fmt.Errorf("wrong length, want %d hex chars", len(id)*2)
}
copy(id[:], b)
return id, nil
}
// MustHexID converts a hex string to a NodeID.
// It panics if the string is not a valid NodeID.
func MustHexID(in string) NodeID {
id, err := HexID(in)
if err != nil {
panic(err)
}
return id
}
// PubkeyID returns a marshaled representation of the given public key.
func PubkeyID(pub *ecdsa.PublicKey) NodeID {
var id NodeID
pbytes := elliptic.Marshal(pub.Curve, pub.X, pub.Y)
if len(pbytes)-1 != len(id) {
panic(fmt.Errorf("need %d bit pubkey, got %d bits", (len(id)+1)*8, len(pbytes)))
}
copy(id[:], pbytes[1:])
return id
}
// Pubkey returns the public key represented by the node ID.
// It returns an error if the ID is not a point on the curve.
func (id NodeID) Pubkey() (*ecdsa.PublicKey, error) {
p := &ecdsa.PublicKey{Curve: S256(), X: new(big.Int), Y: new(big.Int)}
half := len(id) / 2
p.X.SetBytes(id[:half])
p.Y.SetBytes(id[half:])
if !p.Curve.IsOnCurve(p.X, p.Y) {
return nil, errors.New("id is invalid secp256k1 curve point")
}
return p, nil
}
func (id NodeID) mustPubkey() ecdsa.PublicKey {
pk, err := id.Pubkey()
if err != nil {
panic(err)
}
return *pk
}
// recoverNodeID computes the public key used to sign the
// given hash from the signature.
func recoverNodeID(hash, sig []byte) (id NodeID, err error) {
pubkey, err := crypto.Ecrecover(hash, sig)
if err != nil {
return id, err
}
if len(pubkey)-1 != len(id) {
return id, fmt.Errorf("recovered pubkey has %d bits, want %d bits", len(pubkey)*8, (len(id)+1)*8)
}
for i := range id {
id[i] = pubkey[i+1]
}
return id, nil
}
// distcmp compares the distances a->target and b->target.
// Returns -1 if a is closer to target, 1 if b is closer to target
// and 0 if they are equal.
func distcmp(target, a, b common.Hash) int {
for i := range target {
da := a[i] ^ target[i]
db := b[i] ^ target[i]
if da > db {
return 1
} else if da < db {
return -1
}
}
return 0
}
// table of leading zero counts for bytes [0..255]
var lzcount = [256]int{
8, 7, 6, 6, 5, 5, 5, 5,
4, 4, 4, 4, 4, 4, 4, 4,
3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
}
// logdist returns the logarithmic distance between a and b, log2(a ^ b).
func logdist(a, b common.Hash) int {
lz := 0
for i := range a {
x := a[i] ^ b[i]
if x == 0 {
lz += 8
} else {
lz += lzcount[x]
break
}
}
return len(a)*8 - lz
}
// hashAtDistance returns a random hash such that logdist(a, b) == n
func hashAtDistance(a common.Hash, n int) (b common.Hash) {
if n == 0 {
return a
}
// flip bit at position n, fill the rest with random bits
b = a
pos := len(a) - n/8 - 1
bit := byte(0x01) << (byte(n%8) - 1)
if bit == 0 {
pos++
bit = 0x80
}
b[pos] = a[pos]&^bit | ^a[pos]&bit // TODO: randomize end bits
for i := pos + 1; i < len(a); i++ {
b[i] = byte(rand.Intn(255))
}
return b
}

View File

@ -0,0 +1,27 @@
// Code generated by "stringer -type nodeEvent"; DO NOT EDIT
package discv5
import "fmt"
const (
_nodeEvent_name_0 = "invalidEventpingPacketpongPacketfindnodePacketneighborsPacketfindnodeHashPackettopicRegisterPackettopicQueryPackettopicNodesPacket"
_nodeEvent_name_1 = "pongTimeoutpingTimeoutneighboursTimeout"
)
var (
_nodeEvent_index_0 = [...]uint8{0, 12, 22, 32, 46, 61, 79, 98, 114, 130}
_nodeEvent_index_1 = [...]uint8{0, 11, 22, 39}
)
func (i nodeEvent) String() string {
switch {
case 0 <= i && i <= 8:
return _nodeEvent_name_0[_nodeEvent_index_0[i]:_nodeEvent_index_0[i+1]]
case 265 <= i && i <= 267:
i -= 265
return _nodeEvent_name_1[_nodeEvent_index_1[i]:_nodeEvent_index_1[i+1]]
default:
return fmt.Sprintf("nodeEvent(%d)", i)
}
}

View File

@ -0,0 +1,127 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Contains the NTP time drift detection via the SNTP protocol:
// https://tools.ietf.org/html/rfc4330
package discv5
import (
"fmt"
"net"
"sort"
"strings"
"time"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
)
const (
ntpPool = "pool.ntp.org" // ntpPool is the NTP server to query for the current time
ntpChecks = 3 // Number of measurements to do against the NTP server
)
// durationSlice attaches the methods of sort.Interface to []time.Duration,
// sorting in increasing order.
type durationSlice []time.Duration
func (s durationSlice) Len() int { return len(s) }
func (s durationSlice) Less(i, j int) bool { return s[i] < s[j] }
func (s durationSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// checkClockDrift queries an NTP server for clock drifts and warns the user if
// one large enough is detected.
func checkClockDrift() {
drift, err := sntpDrift(ntpChecks)
if err != nil {
return
}
if drift < -driftThreshold || drift > driftThreshold {
warning := fmt.Sprintf("System clock seems off by %v, which can prevent network connectivity", drift)
howtofix := fmt.Sprintf("Please enable network time synchronisation in system settings")
separator := strings.Repeat("-", len(warning))
glog.V(logger.Warn).Info(separator)
glog.V(logger.Warn).Info(warning)
glog.V(logger.Warn).Info(howtofix)
glog.V(logger.Warn).Info(separator)
} else {
glog.V(logger.Debug).Infof("Sanity NTP check reported %v drift, all ok", drift)
}
}
// sntpDrift does a naive time resolution against an NTP server and returns the
// measured drift. This method uses the simple version of NTP. It's not precise
// but should be fine for these purposes.
//
// Note, it executes two extra measurements compared to the number of requested
// ones to be able to discard the two extremes as outliers.
func sntpDrift(measurements int) (time.Duration, error) {
// Resolve the address of the NTP server
addr, err := net.ResolveUDPAddr("udp", ntpPool+":123")
if err != nil {
return 0, err
}
// Construct the time request (empty package with only 2 fields set):
// Bits 3-5: Protocol version, 3
// Bits 6-8: Mode of operation, client, 3
request := make([]byte, 48)
request[0] = 3<<3 | 3
// Execute each of the measurements
drifts := []time.Duration{}
for i := 0; i < measurements+2; i++ {
// Dial the NTP server and send the time retrieval request
conn, err := net.DialUDP("udp", nil, addr)
if err != nil {
return 0, err
}
defer conn.Close()
sent := time.Now()
if _, err = conn.Write(request); err != nil {
return 0, err
}
// Retrieve the reply and calculate the elapsed time
conn.SetDeadline(time.Now().Add(5 * time.Second))
reply := make([]byte, 48)
if _, err = conn.Read(reply); err != nil {
return 0, err
}
elapsed := time.Since(sent)
// Reconstruct the time from the reply data
sec := uint64(reply[43]) | uint64(reply[42])<<8 | uint64(reply[41])<<16 | uint64(reply[40])<<24
frac := uint64(reply[47]) | uint64(reply[46])<<8 | uint64(reply[45])<<16 | uint64(reply[44])<<24
nanosec := sec*1e9 + (frac*1e9)>>32
t := time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC).Add(time.Duration(nanosec)).Local()
// Calculate the drift based on an assumed answer time of RRT/2
drifts = append(drifts, sent.Sub(t)+elapsed/2)
}
// Calculate average drif (drop two extremities to avoid outliers)
sort.Sort(durationSlice(drifts))
drift := time.Duration(0)
for i := 1; i < len(drifts)-1; i++ {
drift += drifts[i]
}
return drift / time.Duration(measurements), nil
}

View File

@ -0,0 +1,258 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package discv5 implements the RLPx v5 Topic Discovery Protocol.
//
// The Topic Discovery protocol provides a way to find RLPx nodes that
// can be connected to. It uses a Kademlia-like protocol to maintain a
// distributed database of the IDs and endpoints of all listening
// nodes.
package discv5
import (
"crypto/rand"
"encoding/binary"
"net"
"sort"
"github.com/ethereum/go-ethereum/common"
)
const (
alpha = 3 // Kademlia concurrency factor
bucketSize = 16 // Kademlia bucket size
hashBits = len(common.Hash{}) * 8
nBuckets = hashBits + 1 // Number of buckets
maxBondingPingPongs = 16
maxFindnodeFailures = 5
)
type Table struct {
count int // number of nodes
buckets [nBuckets]*bucket // index of known nodes by distance
nodeAddedHook func(*Node) // for testing
self *Node // metadata of the local node
}
// bucket contains nodes, ordered by their last activity. the entry
// that was most recently active is the first element in entries.
type bucket struct {
entries []*Node
replacements []*Node
}
func newTable(ourID NodeID, ourAddr *net.UDPAddr) *Table {
self := NewNode(ourID, ourAddr.IP, uint16(ourAddr.Port), uint16(ourAddr.Port))
tab := &Table{self: self}
for i := range tab.buckets {
tab.buckets[i] = new(bucket)
}
return tab
}
// readRandomNodes fills the given slice with random nodes from the
// table. It will not write the same node more than once. The nodes in
// the slice are copies and can be modified by the caller.
func (tab *Table) readRandomNodes(buf []*Node) (n int) {
// TODO: tree-based buckets would help here
// Find all non-empty buckets and get a fresh slice of their entries.
var buckets [][]*Node
for _, b := range tab.buckets {
if len(b.entries) > 0 {
buckets = append(buckets, b.entries[:])
}
}
if len(buckets) == 0 {
return 0
}
// Shuffle the buckets.
for i := uint32(len(buckets)) - 1; i > 0; i-- {
j := randUint(i)
buckets[i], buckets[j] = buckets[j], buckets[i]
}
// Move head of each bucket into buf, removing buckets that become empty.
var i, j int
for ; i < len(buf); i, j = i+1, (j+1)%len(buckets) {
b := buckets[j]
buf[i] = &(*b[0])
buckets[j] = b[1:]
if len(b) == 1 {
buckets = append(buckets[:j], buckets[j+1:]...)
}
if len(buckets) == 0 {
break
}
}
return i + 1
}
func randUint(max uint32) uint32 {
if max == 0 {
return 0
}
var b [4]byte
rand.Read(b[:])
return binary.BigEndian.Uint32(b[:]) % max
}
// closest returns the n nodes in the table that are closest to the
// given id. The caller must hold tab.mutex.
func (tab *Table) closest(target common.Hash, nresults int) *nodesByDistance {
// This is a very wasteful way to find the closest nodes but
// obviously correct. I believe that tree-based buckets would make
// this easier to implement efficiently.
close := &nodesByDistance{target: target}
for _, b := range tab.buckets {
for _, n := range b.entries {
close.push(n, nresults)
}
}
return close
}
// add attempts to add the given node its corresponding bucket. If the
// bucket has space available, adding the node succeeds immediately.
// Otherwise, the node is added to the replacement cache for the bucket.
func (tab *Table) add(n *Node) (contested *Node) {
b := tab.buckets[logdist(tab.self.sha, n.sha)]
switch {
case b.bump(n):
// n exists in b.
return nil
case len(b.entries) < bucketSize:
// b has space available.
b.addFront(n)
tab.count++
if tab.nodeAddedHook != nil {
tab.nodeAddedHook(n)
}
return nil
default:
// b has no space left, add to replacement cache
// and revalidate the last entry.
// TODO: drop previous node
b.replacements = append(b.replacements, n)
if len(b.replacements) > bucketSize {
copy(b.replacements, b.replacements[1:])
b.replacements = b.replacements[:len(b.replacements)-1]
}
return b.entries[len(b.entries)-1]
}
}
// stuff adds nodes the table to the end of their corresponding bucket
// if the bucket is not full.
func (tab *Table) stuff(nodes []*Node) {
outer:
for _, n := range nodes {
if n.ID == tab.self.ID {
continue // don't add self
}
bucket := tab.buckets[logdist(tab.self.sha, n.sha)]
for i := range bucket.entries {
if bucket.entries[i].ID == n.ID {
continue outer // already in bucket
}
}
if len(bucket.entries) < bucketSize {
bucket.entries = append(bucket.entries, n)
tab.count++
if tab.nodeAddedHook != nil {
tab.nodeAddedHook(n)
}
}
}
}
// delete removes an entry from the node table (used to evacuate
// failed/non-bonded discovery peers).
func (tab *Table) delete(node *Node) {
bucket := tab.buckets[logdist(tab.self.sha, node.sha)]
for i := range bucket.entries {
if bucket.entries[i].ID == node.ID {
bucket.entries = append(bucket.entries[:i], bucket.entries[i+1:]...)
tab.count--
return
}
}
}
func (tab *Table) deleteReplace(node *Node) {
b := tab.buckets[logdist(tab.self.sha, node.sha)]
i := 0
for i < len(b.entries) {
if b.entries[i].ID == node.ID {
b.entries = append(b.entries[:i], b.entries[i+1:]...)
tab.count--
} else {
i++
}
}
// refill from replacement cache
// TODO: maybe use random index
if len(b.entries) < bucketSize && len(b.replacements) > 0 {
ri := len(b.replacements) - 1
b.addFront(b.replacements[ri])
tab.count++
b.replacements[ri] = nil
b.replacements = b.replacements[:ri]
}
}
func (b *bucket) addFront(n *Node) {
b.entries = append(b.entries, nil)
copy(b.entries[1:], b.entries)
b.entries[0] = n
}
func (b *bucket) bump(n *Node) bool {
for i := range b.entries {
if b.entries[i].ID == n.ID {
// move it to the front
copy(b.entries[1:], b.entries[:i])
b.entries[0] = n
return true
}
}
return false
}
// nodesByDistance is a list of nodes, ordered by
// distance to target.
type nodesByDistance struct {
entries []*Node
target common.Hash
}
// push adds the given node to the list, keeping the total size below maxElems.
func (h *nodesByDistance) push(n *Node, maxElems int) {
ix := sort.Search(len(h.entries), func(i int) bool {
return distcmp(h.target, h.entries[i].sha, n.sha) > 0
})
if len(h.entries) < maxElems {
h.entries = append(h.entries, n)
}
if ix == len(h.entries) {
// farther away than all nodes we already have.
// if there was room for it, the node is now the last element.
} else {
// slide existing entries down to make room
// this will overwrite the entry we just appended.
copy(h.entries[ix+1:], h.entries[ix:])
h.entries[ix] = n
}
}

View File

@ -0,0 +1,173 @@
package main
import (
"bufio"
"encoding/binary"
"fmt"
"image"
"image/png"
"os"
"sort"
"strconv"
"github.com/ethereum/go-ethereum/crypto"
)
var xs, ys, maxTime int
func set(pic *image.NRGBA, x, y, c, v int) {
if v > 255 {
v = 255
}
if x >= 0 && x < xs && y >= 0 && y < ys {
pic.Pix[y*pic.Stride+x*4+c] = uint8(v)
}
}
func main() {
topicHash := crypto.Keccak256Hash([]byte("foo"))
fmt.Println(topicHash)
topicPrefix := binary.BigEndian.Uint64(topicHash[:8])
var nodes uint64Slice
inputFile := "test.out"
if len(os.Args) > 1 {
inputFile = os.Args[1]
}
f, _ := os.Open(inputFile)
scanner := bufio.NewScanner(f)
scanner.Split(bufio.ScanWords)
for scanner.Scan() {
w := scanner.Text()
if w == "*N" {
scanner.Scan()
prefix, _ := strconv.ParseUint(scanner.Text(), 16, 64)
nodes = append(nodes, prefix^topicPrefix)
}
if w == "*R" {
scanner.Scan()
time, _ := strconv.ParseInt(scanner.Text(), 10, 64)
if int(time) > maxTime {
maxTime = int(time)
}
}
}
f.Close()
sort.Sort(nodes)
nodeIdx := make(map[uint64]int)
for i, v := range nodes {
nodeIdx[v^topicPrefix] = i
}
xs = maxTime / 10000
ys = len(nodes)
pic := image.NewNRGBA(image.Rect(0, 0, xs, ys))
for y := 0; y < ys; y++ {
for x := 0; x < xs; x++ {
set(pic, x, y, 3, 255)
}
}
pic2 := image.NewNRGBA(image.Rect(0, 0, xs, ys))
for y := 0; y < ys; y++ {
for x := 0; x < xs; x++ {
set(pic2, x, y, 3, 255)
}
}
f, _ = os.Open(inputFile)
scanner = bufio.NewScanner(f)
scanner.Split(bufio.ScanWords)
nodeRad := make(map[uint64]int)
for scanner.Scan() {
w := scanner.Text()
if w == "*R" {
scanner.Scan()
time, _ := strconv.ParseInt(scanner.Text(), 10, 64)
scanner.Scan()
prefix, _ := strconv.ParseUint(scanner.Text(), 16, 64)
scanner.Scan()
rad, _ := strconv.ParseInt(scanner.Text(), 10, 64)
if int(rad) != nodeRad[prefix] {
nodeRad[prefix] = int(rad)
radUint := uint64(rad) * ((^uint64(0)) / 1000000)
x := int(time * int64(xs) / int64(maxTime))
y := sort.Search(ys, func(i int) bool {
return nodes[i] > radUint
})
set(pic, x, y, 1, 255)
}
}
if w == "*MR" {
scanner.Scan()
time, _ := strconv.ParseInt(scanner.Text(), 10, 64)
scanner.Scan()
scanner.Scan()
rad, _ := strconv.ParseInt(scanner.Text(), 10, 64)
radUint := uint64(rad) * ((^uint64(0)) / 1000000)
x := int(time * int64(xs) / int64(maxTime))
y := sort.Search(ys, func(i int) bool {
return nodes[i] > radUint
})
set(pic, x, y, 0, 255)
}
if w == "*W" {
scanner.Scan()
time, _ := strconv.ParseInt(scanner.Text(), 10, 64)
scanner.Scan()
prefix, _ := strconv.ParseUint(scanner.Text(), 16, 64)
scanner.Scan()
wp, _ := strconv.ParseInt(scanner.Text(), 10, 64)
x := int(time * int64(xs) / int64(maxTime))
y := nodeIdx[prefix]
set(pic2, x, y, 0, int(wp/100000))
set(pic2, x, y, 1, int(wp/10000))
set(pic2, x, y, 2, int(wp/1000))
}
if w == "*+" {
scanner.Scan()
time, _ := strconv.ParseInt(scanner.Text(), 10, 64)
scanner.Scan()
prefix, _ := strconv.ParseUint(scanner.Text(), 16, 64)
x := int(time * int64(xs) / int64(maxTime))
y := nodeIdx[prefix]
set(pic, x, y, 2, 255)
scanner.Scan()
}
}
f.Close()
f, _ = os.Create("test.png")
w := bufio.NewWriter(f)
png.Encode(w, pic)
w.Flush()
f.Close()
f, _ = os.Create("test2.png")
w = bufio.NewWriter(f)
png.Encode(w, pic2)
w.Flush()
f.Close()
}
type uint64Slice []uint64
// Len is the number of elements in the collection.
func (s uint64Slice) Len() int {
return len(s)
}
// Less reports whether the element with
// index i should sort before the element with index j.
func (s uint64Slice) Less(i, j int) bool {
return s[i] < s[j]
}
// Swap swaps the elements with indexes i and j.
func (s uint64Slice) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}

View File

@ -0,0 +1,178 @@
package main
import (
"bufio"
"encoding/binary"
"fmt"
"image"
"image/png"
"os"
"sort"
"strconv"
"github.com/ethereum/go-ethereum/crypto"
)
var xs, ys, maxTime int
func set(pic *image.NRGBA, x, y, c, v int) {
if v > 255 {
v = 255
}
if x >= 0 && x < xs && y >= 0 && y < ys {
pic.Pix[y*pic.Stride+x*4+c] = uint8(v)
}
}
func main() {
topics := make(map[string]uint64)
var nodes uint64Slice
inputFile := "test.out"
if len(os.Args) > 1 {
inputFile = os.Args[1]
}
f, _ := os.Open(inputFile)
scanner := bufio.NewScanner(f)
scanner.Split(bufio.ScanWords)
for scanner.Scan() {
w := scanner.Text()
if w == "*N" {
scanner.Scan()
prefix, _ := strconv.ParseUint(scanner.Text(), 16, 64)
nodes = append(nodes, prefix^topicPrefix)
}
if w == "*R" {
scanner.Scan()
time, _ := strconv.ParseInt(scanner.Text(), 10, 64)
if int(time) > maxTime {
maxTime = int(time)
}
scanner.Scan()
topic := scanner.Text()
if _, ok := topics[topic]; !ok {
fmt.Println(topic)
topicHash := crypto.Keccak256Hash([]byte(topic))
topics[topic] := binary.BigEndian.Uint64(topicHash[:8])
}
}
}
f.Close()
sort.Sort(nodes)
nodeIdx := make(map[uint64]int)
for i, v := range nodes {
nodeIdx[v^topicPrefix] = i
}
xs = maxTime / 10000
ys = len(nodes)
pic := image.NewNRGBA(image.Rect(0, 0, xs, ys))
for y := 0; y < ys; y++ {
for x := 0; x < xs; x++ {
set(pic, x, y, 3, 255)
}
}
pic2 := image.NewNRGBA(image.Rect(0, 0, xs, ys))
for y := 0; y < ys; y++ {
for x := 0; x < xs; x++ {
set(pic2, x, y, 3, 255)
}
}
f, _ = os.Open(inputFile)
scanner = bufio.NewScanner(f)
scanner.Split(bufio.ScanWords)
nodeRad := make(map[uint64]int)
for scanner.Scan() {
w := scanner.Text()
if w == "*R" {
scanner.Scan()
time, _ := strconv.ParseInt(scanner.Text(), 10, 64)
scanner.Scan()
prefix, _ := strconv.ParseUint(scanner.Text(), 16, 64)
scanner.Scan()
rad, _ := strconv.ParseInt(scanner.Text(), 10, 64)
if int(rad) != nodeRad[prefix] {
nodeRad[prefix] = int(rad)
radUint := uint64(rad) * ((^uint64(0)) / 1000000)
x := int(time * int64(xs) / int64(maxTime))
y := sort.Search(ys, func(i int) bool {
return nodes[i] > radUint
})
set(pic, x, y, 1, 255)
}
}
if w == "*MR" {
scanner.Scan()
time, _ := strconv.ParseInt(scanner.Text(), 10, 64)
scanner.Scan()
scanner.Scan()
rad, _ := strconv.ParseInt(scanner.Text(), 10, 64)
radUint := uint64(rad) * ((^uint64(0)) / 1000000)
x := int(time * int64(xs) / int64(maxTime))
y := sort.Search(ys, func(i int) bool {
return nodes[i] > radUint
})
set(pic, x, y, 0, 255)
}
if w == "*W" {
scanner.Scan()
time, _ := strconv.ParseInt(scanner.Text(), 10, 64)
scanner.Scan()
prefix, _ := strconv.ParseUint(scanner.Text(), 16, 64)
scanner.Scan()
wp, _ := strconv.ParseInt(scanner.Text(), 10, 64)
x := int(time * int64(xs) / int64(maxTime))
y := nodeIdx[prefix]
set(pic2, x, y, 0, int(wp/100000))
set(pic2, x, y, 1, int(wp/10000))
set(pic2, x, y, 2, int(wp/1000))
}
if w == "*+" {
scanner.Scan()
time, _ := strconv.ParseInt(scanner.Text(), 10, 64)
scanner.Scan()
prefix, _ := strconv.ParseUint(scanner.Text(), 16, 64)
x := int(time * int64(xs) / int64(maxTime))
y := nodeIdx[prefix]
set(pic, x, y, 2, 255)
scanner.Scan()
}
}
f.Close()
f, _ = os.Create("test.png")
w := bufio.NewWriter(f)
png.Encode(w, pic)
w.Flush()
f.Close()
f, _ = os.Create("test2.png")
w = bufio.NewWriter(f)
png.Encode(w, pic2)
w.Flush()
f.Close()
}
type uint64Slice []uint64
// Len is the number of elements in the collection.
func (s uint64Slice) Len() int {
return len(s)
}
// Less reports whether the element with
// index i should sort before the element with index j.
func (s uint64Slice) Less(i, j int) bool {
return s[i] < s[j]
}
// Swap swaps the elements with indexes i and j.
func (s uint64Slice) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}

View File

@ -0,0 +1,191 @@
package main
import (
"bufio"
"encoding/binary"
"fmt"
"image"
"image/png"
"os"
"sort"
"strconv"
"github.com/ethereum/go-ethereum/crypto"
)
var xs, ys, maxTime int
func set(pic *image.NRGBA, x, y, c, v int) {
if v > 255 {
v = 255
}
if x >= 0 && x < xs && y >= 0 && y < ys {
pic.Pix[y*pic.Stride+x*4+c] = uint8(v)
}
}
func main() {
var nodes uint64Slice
inputFile := "test.out"
if len(os.Args) > 1 {
inputFile = os.Args[1]
}
topic := "foo"
if len(os.Args) > 2 {
topic = os.Args[2]
}
topicHash := crypto.Keccak256Hash([]byte(topic))
fmt.Println(topicHash)
topicPrefix := binary.BigEndian.Uint64(topicHash[:8])
f, _ := os.Open(inputFile)
scanner := bufio.NewScanner(f)
scanner.Split(bufio.ScanWords)
for scanner.Scan() {
w := scanner.Text()
if w == "*N" {
scanner.Scan()
prefix, _ := strconv.ParseUint(scanner.Text(), 16, 64)
nodes = append(nodes, prefix^topicPrefix)
}
if w == "*R" {
scanner.Scan()
time, _ := strconv.ParseInt(scanner.Text(), 10, 64)
if int(time) > maxTime {
maxTime = int(time)
}
}
}
f.Close()
sort.Sort(nodes)
nodeIdx := make(map[uint64]int)
for i, v := range nodes {
nodeIdx[v^topicPrefix] = i
}
xs = maxTime / 10000
ys = len(nodes)
pic := image.NewNRGBA(image.Rect(0, 0, xs, ys))
for y := 0; y < ys; y++ {
for x := 0; x < xs; x++ {
set(pic, x, y, 3, 255)
}
}
pic2 := image.NewNRGBA(image.Rect(0, 0, xs, ys))
for y := 0; y < ys; y++ {
for x := 0; x < xs; x++ {
set(pic2, x, y, 3, 255)
}
}
f, _ = os.Open(inputFile)
scanner = bufio.NewScanner(f)
scanner.Split(bufio.ScanWords)
nodeRad := make(map[uint64]int)
for scanner.Scan() {
w := scanner.Text()
if w == "*R" {
scanner.Scan()
time, _ := strconv.ParseInt(scanner.Text(), 10, 64)
scanner.Scan()
if scanner.Text() == topic {
scanner.Scan()
prefix, _ := strconv.ParseUint(scanner.Text(), 16, 64)
scanner.Scan()
rad, _ := strconv.ParseInt(scanner.Text(), 10, 64)
if int(rad) != nodeRad[prefix] {
nodeRad[prefix] = int(rad)
radUint := uint64(rad) * ((^uint64(0)) / 1000000)
x := int(time * int64(xs) / int64(maxTime))
y := sort.Search(ys, func(i int) bool {
return nodes[i] > radUint
})
set(pic, x, y, 1, 255)
}
}
}
if w == "*MR" {
scanner.Scan()
time, _ := strconv.ParseInt(scanner.Text(), 10, 64)
scanner.Scan()
if scanner.Text() == topic {
scanner.Scan()
scanner.Scan()
rad, _ := strconv.ParseInt(scanner.Text(), 10, 64)
radUint := uint64(rad) * ((^uint64(0)) / 1000000)
x := int(time * int64(xs) / int64(maxTime))
y := sort.Search(ys, func(i int) bool {
return nodes[i] > radUint
})
set(pic, x, y, 0, 255)
}
}
if w == "*W" {
scanner.Scan()
time, _ := strconv.ParseInt(scanner.Text(), 10, 64)
scanner.Scan()
if scanner.Text() == topic {
scanner.Scan()
prefix, _ := strconv.ParseUint(scanner.Text(), 16, 64)
scanner.Scan()
wp, _ := strconv.ParseInt(scanner.Text(), 10, 64)
x := int(time * int64(xs) / int64(maxTime))
y := nodeIdx[prefix]
set(pic2, x, y, 0, int(wp/100000))
set(pic2, x, y, 1, int(wp/10000))
set(pic2, x, y, 2, int(wp/1000))
}
}
if w == "*+" {
scanner.Scan()
time, _ := strconv.ParseInt(scanner.Text(), 10, 64)
scanner.Scan()
if scanner.Text() == topic {
scanner.Scan()
prefix, _ := strconv.ParseUint(scanner.Text(), 16, 64)
x := int(time * int64(xs) / int64(maxTime))
y := nodeIdx[prefix]
set(pic, x, y, 2, 255)
scanner.Scan()
}
}
}
f.Close()
f, _ = os.Create("test.png")
w := bufio.NewWriter(f)
png.Encode(w, pic)
w.Flush()
f.Close()
f, _ = os.Create("test2.png")
w = bufio.NewWriter(f)
png.Encode(w, pic2)
w.Flush()
f.Close()
}
type uint64Slice []uint64
// Len is the number of elements in the collection.
func (s uint64Slice) Len() int {
return len(s)
}
// Less reports whether the element with
// index i should sort before the element with index j.
func (s uint64Slice) Less(i, j int) bool {
return s[i] < s[j]
}
// Swap swaps the elements with indexes i and j.
func (s uint64Slice) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}

View File

@ -0,0 +1,324 @@
package main
import (
"bufio"
"encoding/binary"
"fmt"
"image"
"image/png"
"os"
"sort"
"strconv"
"github.com/ethereum/go-ethereum/crypto"
)
var xs, ys, maxTime int
func set(pic *image.NRGBA, x, y, c, v int) {
if v > 255 {
v = 255
}
if x >= 0 && x < xs && y >= 0 && y < ys {
pic.Pix[y*pic.Stride+x*4+c] = uint8(v)
}
}
type nodeStats []struct{ wpSum, wpCnt, wpXcnt, regCnt, regXcnt uint64 }
type nodeInfo struct {
maxMR int
topics map[string]struct{}
}
const (
regStatDiv = 60
regStatYdiv = 30
)
type topicInfo struct {
prefix uint64
nodes uint64Slice
nodeStats nodeStats
nodeIdx map[uint64]int
pic, pic2 *image.NRGBA
nodeRad map[uint64]int
regStats []int
}
func main() {
var nodes uint64Slice
topics := make(map[string]*topicInfo)
inputFile := "test.out"
if len(os.Args) > 1 {
inputFile = os.Args[1]
}
f, _ := os.Open(inputFile)
scanner := bufio.NewScanner(f)
scanner.Split(bufio.ScanWords)
for scanner.Scan() {
w := scanner.Text()
if w == "*N" {
scanner.Scan()
prefix, _ := strconv.ParseUint(scanner.Text(), 16, 64)
nodes = append(nodes, prefix)
}
if w == "*R" {
scanner.Scan()
time, _ := strconv.ParseInt(scanner.Text(), 10, 64)
if int(time) > maxTime {
maxTime = int(time)
}
scanner.Scan()
topic := scanner.Text()
if _, ok := topics[topic]; !ok {
fmt.Println(topic)
topicHash := crypto.Keccak256Hash([]byte(topic))
topics[topic] = &topicInfo{prefix: binary.BigEndian.Uint64(topicHash[:8])}
}
}
}
f.Close()
xs = maxTime / 10000
ys = len(nodes)
nodeIdx := make(map[uint64]int)
for i, v := range nodes {
nodeIdx[v] = i
}
nodeInfo := make([]nodeInfo, len(nodes))
for _, t := range topics {
t.nodes = make(uint64Slice, len(nodes))
t.nodeStats = make(nodeStats, len(nodes))
for i, v := range nodes {
t.nodes[i] = v ^ t.prefix
}
sort.Sort(t.nodes)
t.nodeIdx = make(map[uint64]int)
for i, v := range t.nodes {
t.nodeIdx[v^t.prefix] = i
}
t.pic = image.NewNRGBA(image.Rect(0, 0, xs, ys))
for y := 0; y < ys; y++ {
for x := 0; x < xs; x++ {
set(t.pic, x, y, 3, 255)
}
}
t.pic2 = image.NewNRGBA(image.Rect(0, 0, xs, ys))
for y := 0; y < ys; y++ {
for x := 0; x < xs; x++ {
set(t.pic2, x, y, 3, 255)
}
}
t.nodeRad = make(map[uint64]int)
t.regStats = make([]int, xs/regStatDiv+1)
}
f, _ = os.Open(inputFile)
scanner = bufio.NewScanner(f)
scanner.Split(bufio.ScanWords)
statBegin := int64(40000000)
statEnd := int64(maxTime - 10000000)
for scanner.Scan() {
w := scanner.Text()
if w == "*R" {
scanner.Scan()
time, _ := strconv.ParseInt(scanner.Text(), 10, 64)
scanner.Scan()
t := topics[scanner.Text()]
scanner.Scan()
prefix, _ := strconv.ParseUint(scanner.Text(), 16, 64)
scanner.Scan()
rad, _ := strconv.ParseInt(scanner.Text(), 10, 64)
if int(rad) != t.nodeRad[prefix] {
t.nodeRad[prefix] = int(rad)
radUint := uint64(rad) * ((^uint64(0)) / 1000000)
x := int(time * int64(xs) / int64(maxTime))
y := sort.Search(ys, func(i int) bool {
return t.nodes[i] > radUint
})
set(t.pic, x, y, 1, 255)
}
}
if w == "*MR" {
scanner.Scan()
time, _ := strconv.ParseInt(scanner.Text(), 10, 64)
scanner.Scan()
topic := scanner.Text()
t := topics[topic]
scanner.Scan()
prefix, _ := strconv.ParseUint(scanner.Text(), 16, 64)
scanner.Scan()
rad, _ := strconv.ParseInt(scanner.Text(), 10, 64)
radUint := uint64(rad) * ((^uint64(0)) / 1000000)
x := int(time * int64(xs) / int64(maxTime))
y := sort.Search(ys, func(i int) bool {
return t.nodes[i] > radUint
})
set(t.pic, x, y, 0, 255)
ni := nodeInfo[nodeIdx[prefix]]
if int(rad) > ni.maxMR {
ni.maxMR = int(rad)
if ni.topics == nil {
ni.topics = make(map[string]struct{})
}
ni.topics[topic] = struct{}{}
}
nodeInfo[nodeIdx[prefix]] = ni
}
if w == "*W" {
scanner.Scan()
time, _ := strconv.ParseInt(scanner.Text(), 10, 64)
scanner.Scan()
t := topics[scanner.Text()]
scanner.Scan()
prefix, _ := strconv.ParseUint(scanner.Text(), 16, 64)
scanner.Scan()
wp, _ := strconv.ParseInt(scanner.Text(), 10, 64)
x := int(time * int64(xs) / int64(maxTime))
y := t.nodeIdx[prefix]
if time >= statBegin && time < statEnd {
t.nodeStats[y].wpSum += uint64(wp)
if wp >= 600000 {
t.nodeStats[y].wpXcnt++
}
t.nodeStats[y].wpCnt++
}
/*set(t.pic2, x, y, 0, int(wp/100000))
set(t.pic2, x, y, 1, int(wp/10000))
set(t.pic2, x, y, 2, int(wp/1000))*/
if wp >= 1800000 {
set(t.pic2, x, y, 0, 255)
}
if wp >= 600000 {
set(t.pic2, x, y, 1, 255)
}
if wp >= 60000 {
set(t.pic2, x, y, 2, 255)
}
}
if w == "*+" {
scanner.Scan()
time, _ := strconv.ParseInt(scanner.Text(), 10, 64)
scanner.Scan()
t := topics[scanner.Text()]
scanner.Scan()
prefix, _ := strconv.ParseUint(scanner.Text(), 16, 64)
x := int(time * int64(xs) / int64(maxTime))
if x < xs {
t.regStats[x/regStatDiv]++
}
y := t.nodeIdx[prefix]
set(t.pic, x, y, 2, 255)
scanner.Scan()
prefix2, _ := strconv.ParseUint(scanner.Text(), 16, 64)
y2 := t.nodeIdx[prefix2]
if time >= statBegin && time < statEnd {
t.nodeStats[y].regCnt++
t.nodeStats[y2].regXcnt++
}
}
}
f.Close()
for tt, t := range topics {
f, _ = os.Create("test_" + tt + ".png")
w := bufio.NewWriter(f)
png.Encode(w, t.pic)
w.Flush()
f.Close()
for x := 0; x < xs; x++ {
yy := t.regStats[x/regStatDiv] / regStatYdiv
if yy > ys {
yy = ys
}
for y := 0; y < yy; y++ {
set(t.pic2, x, ys-1-y, 1, 255)
}
}
f, _ = os.Create("test2_" + tt + ".png")
w = bufio.NewWriter(f)
png.Encode(w, t.pic2)
w.Flush()
f.Close()
if statEnd > statBegin {
xxs := len(t.nodeStats)
yys := 1000
yyh := yys / 2
pic3 := image.NewNRGBA(image.Rect(0, 0, xxs, yys))
for y := 0; y < yys; y++ {
for x := 0; x < xxs; x++ {
set(pic3, x, y, 3, 255)
}
}
for x := 0; x < xxs; x++ {
wpy := 0
if t.nodeStats[x].wpCnt > 0 {
// wpy = int(t.nodeStats[x].wpSum / t.nodeStats[x].wpCnt / 10000)
wpy = int(uint64(yyh) * t.nodeStats[x].wpXcnt / t.nodeStats[x].wpCnt)
}
if wpy > yyh {
wpy = yyh
}
for y := 0; y < wpy; y++ {
set(pic3, x, yys-1-y, 1, 255)
}
regy := int(t.nodeStats[x].regCnt * 2400000 / uint64(statEnd-statBegin))
if regy > yyh {
regy = yyh
}
for y := 0; y < regy; y++ {
set(pic3, x, yyh-1-y, 2, 255)
}
regy2 := int(t.nodeStats[x].regXcnt * 2400000 / uint64(statEnd-statBegin))
if regy2 > yyh {
regy2 = yyh
}
for y := 0; y < regy2; y++ {
set(pic3, x, yyh-1-y, 0, 255)
}
}
f, _ = os.Create("test3_" + tt + ".png")
w = bufio.NewWriter(f)
png.Encode(w, pic3)
w.Flush()
f.Close()
}
}
for i, ni := range nodeInfo {
fmt.Printf("%d %016x maxMR = %d ", i, nodes[i], ni.maxMR)
for t, _ := range ni.topics {
fmt.Printf(" %s", t)
}
fmt.Println()
}
}
type uint64Slice []uint64
// Len is the number of elements in the collection.
func (s uint64Slice) Len() int {
return len(s)
}
// Less reports whether the element with
// index i should sort before the element with index j.
func (s uint64Slice) Less(i, j int) bool {
return s[i] < s[j]
}
// Swap swaps the elements with indexes i and j.
func (s uint64Slice) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}

View File

@ -0,0 +1,684 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package discv5
import (
"bytes"
"encoding/binary"
"fmt"
"math/rand"
"sort"
"time"
"github.com/aristanetworks/goarista/atime"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
)
const (
ticketTimeBucketLen = time.Minute
timeWindow = 10 // * ticketTimeBucketLen
wantTicketsInWindow = 10
collectFrequency = time.Second * 30
registerFrequency = time.Second * 60
maxCollectDebt = 10
maxRegisterDebt = 5
keepTicketConst = time.Minute * 10
keepTicketExp = time.Minute * 5
maxRadius = 0xffffffffffffffff
minRadAverage = 100
minRadStableAfter = 50
targetWaitTime = time.Minute * 10
adjustRatio = 0.001
adjustCooldownStart = 0.1
adjustCooldownStep = 0.01
radiusExtendRatio = 1.5
)
// absTime represents absolute monotonic time in nanoseconds.
type absTime time.Duration
func monotonicTime() absTime {
return absTime(atime.NanoTime())
}
// timeBucket represents absolute monotonic time in minutes.
// It is used as the index into the per-topic ticket buckets.
type timeBucket int
type ticket struct {
topics []Topic
regTime []absTime // Per-topic local absolute time when the ticket can be used.
// The serial number that was issued by the server.
serial uint32
// Used by registrar, tracks absolute time when the ticket was created.
issueTime absTime
// Fields used only by registrants
node *Node // the registrar node that signed this ticket
refCnt int // tracks number of topics that will be registered using this ticket
pong []byte // encoded pong packet signed by the registrar
}
// ticketRef refers to a single topic in a ticket.
type ticketRef struct {
t *ticket
idx int // index of the topic in t.topics and t.regTime
}
func (ref ticketRef) topic() Topic {
return ref.t.topics[ref.idx]
}
func (ref ticketRef) topicRegTime() absTime {
return ref.t.regTime[ref.idx]
}
func pongToTicket(localTime absTime, topics []Topic, node *Node, p *ingressPacket) (*ticket, error) {
wps := p.data.(*pong).WaitPeriods
if len(topics) != len(wps) {
return nil, fmt.Errorf("bad wait period list: got %d values, want %d", len(topics), len(wps))
}
if rlpHash(topics) != p.data.(*pong).TopicHash {
return nil, fmt.Errorf("bad topic hash")
}
t := &ticket{
issueTime: localTime,
node: node,
topics: topics,
pong: p.rawData,
regTime: make([]absTime, len(wps)),
}
// Convert wait periods to local absolute time.
for i, wp := range wps {
t.regTime[i] = localTime + absTime(time.Second*time.Duration(wp))
}
return t, nil
}
func ticketToPong(t *ticket, pong *pong) {
pong.Expiration = uint64(t.issueTime / absTime(time.Second))
pong.TopicHash = rlpHash(t.topics)
pong.TicketSerial = t.serial
pong.WaitPeriods = make([]uint32, len(t.regTime))
for i, regTime := range t.regTime {
pong.WaitPeriods[i] = uint32(time.Duration(regTime-t.issueTime) / time.Second)
}
}
type ticketStore struct {
// radius detector and target address generator
// exists for both searched and registered topics
radius map[Topic]*topicRadius
// Contains buckets (for each absolute minute) of tickets
// that can be used in that minute.
// This is only set if the topic is being registered.
tickets map[Topic]topicTickets
regtopics []Topic
nodes map[*Node]*ticket
nodeLastReq map[*Node]reqInfo
lastBucketFetched timeBucket
nextTicketCached *ticketRef
nextTicketReg absTime
minRadCnt, minRadPtr uint64
minRadius, minRadSum uint64
lastMinRads [minRadAverage]uint64
}
type topicTickets struct {
buckets map[timeBucket][]ticketRef
nextLookup, nextReg absTime
}
func newTicketStore() *ticketStore {
return &ticketStore{
radius: make(map[Topic]*topicRadius),
tickets: make(map[Topic]topicTickets),
nodes: make(map[*Node]*ticket),
nodeLastReq: make(map[*Node]reqInfo),
}
}
// addTopic starts tracking a topic. If register is true,
// the local node will register the topic and tickets will be collected.
// It can be called even
func (s *ticketStore) addTopic(t Topic, register bool) {
debugLog(fmt.Sprintf(" addTopic(%v, %v)", t, register))
if s.radius[t] == nil {
s.radius[t] = newTopicRadius(t)
}
if register && s.tickets[t].buckets == nil {
s.tickets[t] = topicTickets{buckets: make(map[timeBucket][]ticketRef)}
}
}
// removeRegisterTopic deletes all tickets for the given topic.
func (s *ticketStore) removeRegisterTopic(topic Topic) {
debugLog(fmt.Sprintf(" removeRegisterTopic(%v)", topic))
for _, list := range s.tickets[topic].buckets {
for _, ref := range list {
ref.t.refCnt--
if ref.t.refCnt == 0 {
delete(s.nodes, ref.t.node)
delete(s.nodeLastReq, ref.t.node)
}
}
}
delete(s.tickets, topic)
}
func (s *ticketStore) regTopicSet() []Topic {
topics := make([]Topic, 0, len(s.tickets))
for topic := range s.tickets {
topics = append(topics, topic)
}
return topics
}
// nextRegisterLookup returns the target of the next lookup for ticket collection.
func (s *ticketStore) nextRegisterLookup() (lookup lookupInfo, delay time.Duration) {
debugLog("nextRegisterLookup()")
firstTopic, ok := s.iterRegTopics()
for topic := firstTopic; ok; {
debugLog(fmt.Sprintf(" checking topic %v, len(s.tickets[topic]) = %d", topic, len(s.tickets[topic].buckets)))
if s.tickets[topic].buckets != nil && s.needMoreTickets(topic) {
next := s.radius[topic].nextTarget()
debugLog(fmt.Sprintf(" %x 1s", next[:8]))
return lookupInfo{target: next, topic: topic}, 1 * time.Second
}
topic, ok = s.iterRegTopics()
if topic == firstTopic {
break // We have checked all topics.
}
}
debugLog(" null, 40s")
return lookupInfo{}, 40 * time.Second
}
// iterRegTopics returns topics to register in arbitrary order.
// The second return value is false if there are no topics.
func (s *ticketStore) iterRegTopics() (Topic, bool) {
debugLog("iterRegTopics()")
if len(s.regtopics) == 0 {
if len(s.tickets) == 0 {
debugLog(" false")
return "", false
}
// Refill register list.
for t := range s.tickets {
s.regtopics = append(s.regtopics, t)
}
}
topic := s.regtopics[len(s.regtopics)-1]
s.regtopics = s.regtopics[:len(s.regtopics)-1]
debugLog(" " + string(topic) + " true")
return topic, true
}
func (s *ticketStore) needMoreTickets(t Topic) bool {
return s.tickets[t].nextLookup < monotonicTime()
}
// ticketsInWindow returns the tickets of a given topic in the registration window.
func (s *ticketStore) ticketsInWindow(t Topic) []ticketRef {
ltBucket := s.lastBucketFetched
var res []ticketRef
tickets := s.tickets[t].buckets
for g := ltBucket; g < ltBucket+timeWindow; g++ {
res = append(res, tickets[g]...)
}
debugLog(fmt.Sprintf("ticketsInWindow(%v) = %v", t, len(res)))
return res
}
func (s *ticketStore) removeExcessTickets(t Topic) {
tickets := s.ticketsInWindow(t)
if len(tickets) <= wantTicketsInWindow {
return
}
sort.Sort(ticketRefByWaitTime(tickets))
for _, r := range tickets[wantTicketsInWindow:] {
s.removeTicketRef(r)
}
}
type ticketRefByWaitTime []ticketRef
// Len is the number of elements in the collection.
func (s ticketRefByWaitTime) Len() int {
return len(s)
}
func (r ticketRef) waitTime() absTime {
return r.t.regTime[r.idx] - r.t.issueTime
}
// Less reports whether the element with
// index i should sort before the element with index j.
func (s ticketRefByWaitTime) Less(i, j int) bool {
return s[i].waitTime() < s[j].waitTime()
}
// Swap swaps the elements with indexes i and j.
func (s ticketRefByWaitTime) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s *ticketStore) addTicketRef(r ticketRef) {
topic := r.t.topics[r.idx]
t := s.tickets[topic]
if t.buckets == nil {
return
}
bucket := timeBucket(r.t.regTime[r.idx] / absTime(ticketTimeBucketLen))
t.buckets[bucket] = append(t.buckets[bucket], r)
r.t.refCnt++
min := monotonicTime() - absTime(collectFrequency)*maxCollectDebt
if t.nextLookup < min {
t.nextLookup = min
}
t.nextLookup += absTime(collectFrequency)
s.tickets[topic] = t
//s.removeExcessTickets(topic)
}
func (s *ticketStore) nextFilteredTicket() (t *ticketRef, wait time.Duration) {
now := monotonicTime()
for {
t, wait = s.nextRegisterableTicket()
if t == nil {
return
}
regTime := now + absTime(wait)
topic := t.t.topics[t.idx]
if regTime >= s.tickets[topic].nextReg {
return
}
s.removeTicketRef(*t)
}
}
func (s *ticketStore) ticketRegistered(t ticketRef) {
now := monotonicTime()
topic := t.t.topics[t.idx]
tt := s.tickets[topic]
min := now - absTime(registerFrequency)*maxRegisterDebt
if min > tt.nextReg {
tt.nextReg = min
}
tt.nextReg += absTime(registerFrequency)
s.tickets[topic] = tt
s.removeTicketRef(t)
}
// nextRegisterableTicket returns the next ticket that can be used
// to register.
//
// If the returned wait time <= zero the ticket can be used. For a positive
// wait time, the caller should requery the next ticket later.
//
// A ticket can be returned more than once with <= zero wait time in case
// the ticket contains multiple topics.
func (s *ticketStore) nextRegisterableTicket() (t *ticketRef, wait time.Duration) {
defer func() {
if t == nil {
debugLog(" nil")
} else {
debugLog(fmt.Sprintf(" node = %x sn = %v wait = %v", t.t.node.ID[:8], t.t.serial, wait))
}
}()
debugLog("nextRegisterableTicket()")
now := monotonicTime()
if s.nextTicketCached != nil {
return s.nextTicketCached, time.Duration(s.nextTicketCached.topicRegTime() - now)
}
for bucket := s.lastBucketFetched; ; bucket++ {
var (
empty = true // true if there are no tickets
nextTicket ticketRef // uninitialized if this bucket is empty
)
for _, tickets := range s.tickets {
//s.removeExcessTickets(topic)
if len(tickets.buckets) != 0 {
empty = false
if list := tickets.buckets[bucket]; list != nil {
for _, ref := range list {
//debugLog(fmt.Sprintf(" nrt bucket = %d node = %x sn = %v wait = %v", bucket, ref.t.node.ID[:8], ref.t.serial, time.Duration(ref.topicRegTime()-now)))
if nextTicket.t == nil || ref.topicRegTime() < nextTicket.topicRegTime() {
nextTicket = ref
}
}
}
}
}
if empty {
return nil, 0
}
if nextTicket.t != nil {
wait = time.Duration(nextTicket.topicRegTime() - now)
s.nextTicketCached = &nextTicket
return &nextTicket, wait
}
s.lastBucketFetched = bucket
}
}
// removeTicket removes a ticket from the ticket store
func (s *ticketStore) removeTicketRef(ref ticketRef) {
debugLog(fmt.Sprintf("removeTicketRef(node = %x sn = %v)", ref.t.node.ID[:8], ref.t.serial))
topic := ref.topic()
tickets := s.tickets[topic].buckets
if tickets == nil {
return
}
bucket := timeBucket(ref.t.regTime[ref.idx] / absTime(ticketTimeBucketLen))
list := tickets[bucket]
idx := -1
for i, bt := range list {
if bt.t == ref.t {
idx = i
break
}
}
if idx == -1 {
panic(nil)
}
list = append(list[:idx], list[idx+1:]...)
if len(list) != 0 {
tickets[bucket] = list
} else {
delete(tickets, bucket)
}
ref.t.refCnt--
if ref.t.refCnt == 0 {
delete(s.nodes, ref.t.node)
delete(s.nodeLastReq, ref.t.node)
}
// Make nextRegisterableTicket return the next available ticket.
s.nextTicketCached = nil
}
type lookupInfo struct {
target common.Hash
topic Topic
}
type reqInfo struct {
pingHash []byte
topic Topic
}
// returns -1 if not found
func (t *ticket) findIdx(topic Topic) int {
for i, tt := range t.topics {
if tt == topic {
return i
}
}
return -1
}
func (s *ticketStore) registerLookupDone(lookup lookupInfo, nodes []*Node, ping func(n *Node) []byte) {
now := monotonicTime()
//fmt.Printf("registerLookupDone target = %016x\n", target[:8])
if len(nodes) > 0 {
s.adjustMinRadius(lookup.target, nodes[0].sha)
}
for i, n := range nodes {
if i == 0 || (binary.BigEndian.Uint64(n.sha[:8])^binary.BigEndian.Uint64(lookup.target[:8])) < s.minRadius {
if t := s.nodes[n]; t != nil {
// adjust radius with already stored ticket
if idx := t.findIdx(lookup.topic); idx != -1 {
s.adjustWithTicket(now, t, idx, false)
}
} else {
// request a new pong packet
s.nodeLastReq[n] = reqInfo{pingHash: ping(n), topic: lookup.topic}
}
}
}
}
func (s *ticketStore) adjustWithTicket(localTime absTime, t *ticket, idx int, onlyConverging bool) {
if onlyConverging {
for i, topic := range t.topics {
if tt, ok := s.radius[topic]; ok && !tt.converged && tt.isInRadius(t, true) {
tt.adjust(localTime, ticketRef{t, i}, s.minRadius, s.minRadCnt >= minRadStableAfter)
debugLog(fmt.Sprintf("adjust converging topic: %v, rad: %v, cd: %v, converged: %v", topic, float64(tt.radius)/maxRadius, tt.adjustCooldown, tt.converged))
}
}
} else {
topic := t.topics[idx]
if tt, ok := s.radius[topic]; ok && tt.isInRadius(t, true) {
tt.adjust(localTime, ticketRef{t, idx}, s.minRadius, s.minRadCnt >= minRadStableAfter)
debugLog(fmt.Sprintf("adjust topic: %v, rad: %v, cd: %v, converged: %v", topic, float64(tt.radius)/maxRadius, tt.adjustCooldown, tt.converged))
}
}
}
func (s *ticketStore) addTicket(localTime absTime, pingHash []byte, t *ticket) {
debugLog(fmt.Sprintf("add(node = %x sn = %v)", t.node.ID[:8], t.serial))
if s.nodes[t.node] != nil {
return
}
lastReq, ok := s.nodeLastReq[t.node]
if !(ok && bytes.Equal(pingHash, lastReq.pingHash)) {
s.adjustWithTicket(localTime, t, -1, true)
return
}
topic := lastReq.topic
topicIdx := t.findIdx(topic)
if topicIdx == -1 {
return
}
s.adjustWithTicket(localTime, t, topicIdx, false)
bucket := timeBucket(localTime / absTime(ticketTimeBucketLen))
if s.lastBucketFetched == 0 || bucket < s.lastBucketFetched {
s.lastBucketFetched = bucket
}
for topicIdx, topic := range t.topics {
if tt, ok := s.radius[topic]; ok && tt.isInRadius(t, false) {
if _, ok := s.tickets[topic]; ok && tt.converged {
wait := t.regTime[topicIdx] - localTime
rnd := rand.ExpFloat64()
if rnd > 10 {
rnd = 10
}
if float64(wait) < float64(keepTicketConst)+float64(keepTicketExp)*rnd {
// use the ticket to register this topic
s.addTicketRef(ticketRef{t, topicIdx})
}
}
}
}
if t.refCnt > 0 {
s.nextTicketCached = nil
s.nodes[t.node] = t
}
}
func (s *ticketStore) getNodeTicket(node *Node) *ticket {
if s.nodes[node] == nil {
debugLog(fmt.Sprintf("getNodeTicket(%x) sn = nil", node.ID[:8]))
} else {
debugLog(fmt.Sprintf("getNodeTicket(%x) sn = %v", node.ID[:8], s.nodes[node].serial))
}
return s.nodes[node]
}
func (s *ticketStore) adjustMinRadius(target, found common.Hash) {
tp := binary.BigEndian.Uint64(target[0:8])
fp := binary.BigEndian.Uint64(found[0:8])
dist := tp ^ fp
var mr uint64
if dist < maxRadius/16 {
mr = dist * 16
} else {
mr = maxRadius
}
mr /= minRadAverage
s.minRadSum -= s.lastMinRads[s.minRadPtr]
s.lastMinRads[s.minRadPtr] = mr
s.minRadSum += mr
s.minRadPtr++
if s.minRadPtr == minRadAverage {
s.minRadPtr = 0
}
s.minRadCnt++
if s.minRadCnt < minRadAverage {
s.minRadius = (s.minRadSum / s.minRadCnt) * minRadAverage
} else {
s.minRadius = s.minRadSum
}
debugLog(fmt.Sprintf("adjustMinRadius() %v", float64(s.minRadius)/maxRadius))
}
type topicRadius struct {
topic Topic
topicHashPrefix uint64
radius uint64
adjustCooldown float64 // only for convergence detection
converged bool
intExtBalance float64
}
func newTopicRadius(t Topic) *topicRadius {
topicHash := crypto.Keccak256Hash([]byte(t))
topicHashPrefix := binary.BigEndian.Uint64(topicHash[0:8])
return &topicRadius{
topic: t,
topicHashPrefix: topicHashPrefix,
radius: maxRadius,
adjustCooldown: adjustCooldownStart,
converged: false,
}
}
func (r *topicRadius) isInRadius(t *ticket, extRadius bool) bool {
nodePrefix := binary.BigEndian.Uint64(t.node.sha[0:8])
dist := nodePrefix ^ r.topicHashPrefix
if extRadius {
return float64(dist) < float64(r.radius)*radiusExtendRatio
}
return dist < r.radius
}
func randUint64n(n uint64) uint64 { // don't care about lowest bit, 63 bit randomness is more than enough
if n < 4 {
return 0
}
return uint64(rand.Int63n(int64(n/2))) * 2
}
func (r *topicRadius) nextTarget() common.Hash {
var rnd uint64
if r.intExtBalance < 0 {
// select target from inner region
rnd = randUint64n(r.radius)
} else {
// select target from outer region
e := float64(r.radius) * radiusExtendRatio
extRadius := uint64(maxRadius)
if e < maxRadius {
extRadius = uint64(e)
}
rnd = r.radius + randUint64n(extRadius-r.radius)
}
prefix := r.topicHashPrefix ^ rnd
var target common.Hash
binary.BigEndian.PutUint64(target[0:8], prefix)
return target
}
func (r *topicRadius) adjust(localTime absTime, t ticketRef, minRadius uint64, minRadStable bool) {
var balanceStep, stepSign float64
if r.isInRadius(t.t, false) {
balanceStep = radiusExtendRatio - 1
stepSign = 1
} else {
balanceStep = -1
stepSign = -1
}
if r.intExtBalance*stepSign > 3 {
return
}
r.intExtBalance += balanceStep
wait := t.t.regTime[t.idx] - t.t.issueTime // localTime
adjust := (float64(wait)/float64(targetWaitTime) - 1) * 2
if adjust > 1 {
adjust = 1
}
if adjust < -1 {
adjust = -1
}
/*var adjust float64
if wait > absTime(targetWaitTime) {
adjust = 1
} else {
adjust = -1
}*/
if r.converged {
adjust *= adjustRatio
} else {
adjust *= r.adjustCooldown
}
/*if adjust > 0 {
adjust *= radiusExtendRatio*2 - 1
}*/
radius := float64(r.radius) * (1 + adjust)
if radius > float64(maxRadius) {
r.radius = maxRadius
} else {
r.radius = uint64(radius)
if r.radius < minRadius {
r.radius = minRadius
}
}
if !r.converged && (adjust > 0 || (r.radius == minRadius && minRadStable)) {
r.adjustCooldown *= (1 - adjustCooldownStep)
if r.adjustCooldown <= adjustRatio {
r.converged = true
}
}
}

View File

@ -0,0 +1,399 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package discv5
import (
"container/heap"
"fmt"
"math"
"math/rand"
"time"
)
const (
maxEntries = 10000
maxEntriesPerTopic = 50
fallbackRegistrationExpiry = 1 * time.Hour
)
type Topic string
type topicEntry struct {
topic Topic
fifoIdx uint64
node *Node
expire absTime
}
type topicInfo struct {
entries map[uint64]*topicEntry
fifoHead, fifoTail uint64
rqItem *topicRequestQueueItem
wcl waitControlLoop
}
// removes tail element from the fifo
func (t *topicInfo) getFifoTail() *topicEntry {
for t.entries[t.fifoTail] == nil {
t.fifoTail++
}
tail := t.entries[t.fifoTail]
t.fifoTail++
return tail
}
type nodeInfo struct {
entries map[Topic]*topicEntry
lastIssuedTicket, lastUsedTicket uint32
// you can't register a ticket newer than lastUsedTicket before noRegUntil (absolute time)
noRegUntil absTime
}
type topicTable struct {
db *nodeDB
self *Node
nodes map[*Node]*nodeInfo
topics map[Topic]*topicInfo
globalEntries uint64
requested topicRequestQueue
requestCnt uint64
lastGarbageCollection absTime
}
func newTopicTable(db *nodeDB, self *Node) *topicTable {
if printTestImgLogs {
fmt.Printf("*N %016x\n", self.sha[:8])
}
return &topicTable{
db: db,
nodes: make(map[*Node]*nodeInfo),
topics: make(map[Topic]*topicInfo),
self: self,
}
}
func (t *topicTable) getOrNewTopic(topic Topic) *topicInfo {
ti := t.topics[topic]
if ti == nil {
rqItem := &topicRequestQueueItem{
topic: topic,
priority: t.requestCnt,
}
ti = &topicInfo{
entries: make(map[uint64]*topicEntry),
rqItem: rqItem,
}
t.topics[topic] = ti
heap.Push(&t.requested, rqItem)
}
return ti
}
func (t *topicTable) checkDeleteTopic(topic Topic) {
ti := t.topics[topic]
if ti == nil {
return
}
if len(ti.entries) == 0 && ti.wcl.hasMinimumWaitPeriod() {
delete(t.topics, topic)
heap.Remove(&t.requested, ti.rqItem.index)
}
}
func (t *topicTable) getOrNewNode(node *Node) *nodeInfo {
n := t.nodes[node]
if n == nil {
//fmt.Printf("newNode %016x %016x\n", t.self.sha[:8], node.sha[:8])
var issued, used uint32
if t.db != nil {
issued, used = t.db.fetchTopicRegTickets(node.ID)
}
n = &nodeInfo{
entries: make(map[Topic]*topicEntry),
lastIssuedTicket: issued,
lastUsedTicket: used,
}
t.nodes[node] = n
}
return n
}
func (t *topicTable) checkDeleteNode(node *Node) {
if n, ok := t.nodes[node]; ok && len(n.entries) == 0 && n.noRegUntil < monotonicTime() {
//fmt.Printf("deleteNode %016x %016x\n", t.self.sha[:8], node.sha[:8])
delete(t.nodes, node)
}
}
func (t *topicTable) storeTicketCounters(node *Node) {
n := t.getOrNewNode(node)
if t.db != nil {
t.db.updateTopicRegTickets(node.ID, n.lastIssuedTicket, n.lastUsedTicket)
}
}
func (t *topicTable) getEntries(topic Topic) []*Node {
t.collectGarbage()
te := t.topics[topic]
if te == nil {
return nil
}
nodes := make([]*Node, len(te.entries))
i := 0
for _, e := range te.entries {
nodes[i] = e.node
i++
}
t.requestCnt++
t.requested.update(te.rqItem, t.requestCnt)
return nodes
}
func (t *topicTable) addEntry(node *Node, topic Topic) {
n := t.getOrNewNode(node)
// clear previous entries by the same node
for _, e := range n.entries {
t.deleteEntry(e)
}
// ***
n = t.getOrNewNode(node)
tm := monotonicTime()
te := t.getOrNewTopic(topic)
if len(te.entries) == maxEntriesPerTopic {
t.deleteEntry(te.getFifoTail())
}
if t.globalEntries == maxEntries {
t.deleteEntry(t.leastRequested()) // not empty, no need to check for nil
}
fifoIdx := te.fifoHead
te.fifoHead++
entry := &topicEntry{
topic: topic,
fifoIdx: fifoIdx,
node: node,
expire: tm + absTime(fallbackRegistrationExpiry),
}
if printTestImgLogs {
fmt.Printf("*+ %d %v %016x %016x\n", tm/1000000, topic, t.self.sha[:8], node.sha[:8])
}
te.entries[fifoIdx] = entry
n.entries[topic] = entry
t.globalEntries++
te.wcl.registered(tm)
}
// removes least requested element from the fifo
func (t *topicTable) leastRequested() *topicEntry {
for t.requested.Len() > 0 && t.topics[t.requested[0].topic] == nil {
heap.Pop(&t.requested)
}
if t.requested.Len() == 0 {
return nil
}
return t.topics[t.requested[0].topic].getFifoTail()
}
// entry should exist
func (t *topicTable) deleteEntry(e *topicEntry) {
if printTestImgLogs {
fmt.Printf("*- %d %v %016x %016x\n", monotonicTime()/1000000, e.topic, t.self.sha[:8], e.node.sha[:8])
}
ne := t.nodes[e.node].entries
delete(ne, e.topic)
if len(ne) == 0 {
t.checkDeleteNode(e.node)
}
te := t.topics[e.topic]
delete(te.entries, e.fifoIdx)
if len(te.entries) == 0 {
t.checkDeleteTopic(e.topic)
}
t.globalEntries--
}
// It is assumed that topics and waitPeriods have the same length.
func (t *topicTable) useTicket(node *Node, serialNo uint32, topics []Topic, idx int, issueTime uint64, waitPeriods []uint32) (registered bool) {
debugLog(fmt.Sprintf("useTicket", serialNo, topics, waitPeriods))
t.collectGarbage()
n := t.getOrNewNode(node)
if serialNo < n.lastUsedTicket {
return false
}
tm := monotonicTime()
if serialNo > n.lastUsedTicket && tm < n.noRegUntil {
return false
}
if serialNo != n.lastUsedTicket {
n.lastUsedTicket = serialNo
n.noRegUntil = tm + absTime(noRegTimeout())
t.storeTicketCounters(node)
}
currTime := uint64(tm / absTime(time.Second))
regTime := issueTime + uint64(waitPeriods[idx])
relTime := int64(currTime - regTime)
if relTime >= -1 && relTime <= regTimeWindow+1 && // give clients a little security margin on both ends
n.entries[topics[idx]] == nil { // don't register again if there is an active entry
t.addEntry(node, topics[idx])
return true
}
return false
}
func (topictab *topicTable) getTicket(node *Node, topics []Topic) *ticket {
topictab.collectGarbage()
now := monotonicTime()
n := topictab.getOrNewNode(node)
n.lastIssuedTicket++
topictab.storeTicketCounters(node)
t := &ticket{
issueTime: now,
topics: topics,
serial: n.lastIssuedTicket,
regTime: make([]absTime, len(topics)),
}
for i, topic := range topics {
var waitPeriod time.Duration
if topic := topictab.topics[topic]; topic != nil {
waitPeriod = topic.wcl.waitPeriod
} else {
waitPeriod = minWaitPeriod
}
t.regTime[i] = now + absTime(waitPeriod)
}
return t
}
const gcInterval = time.Minute
func (t *topicTable) collectGarbage() {
tm := monotonicTime()
if time.Duration(tm-t.lastGarbageCollection) < gcInterval {
return
}
t.lastGarbageCollection = tm
for node, n := range t.nodes {
for _, e := range n.entries {
if e.expire <= tm {
t.deleteEntry(e)
}
}
t.checkDeleteNode(node)
}
for topic, _ := range t.topics {
t.checkDeleteTopic(topic)
}
}
const (
minWaitPeriod = time.Minute
regTimeWindow = 10 // seconds
avgnoRegTimeout = time.Minute * 10
// target average interval between two incoming ad requests
wcTargetRegInterval = time.Minute * 10 / maxEntriesPerTopic
//
wcTimeConst = time.Minute * 10
)
// initialization is not required, will set to minWaitPeriod at first registration
type waitControlLoop struct {
lastIncoming absTime
waitPeriod time.Duration
}
func (w *waitControlLoop) registered(tm absTime) {
w.waitPeriod = w.nextWaitPeriod(tm)
w.lastIncoming = tm
}
func (w *waitControlLoop) nextWaitPeriod(tm absTime) time.Duration {
period := tm - w.lastIncoming
wp := time.Duration(float64(w.waitPeriod) * math.Exp((float64(wcTargetRegInterval)-float64(period))/float64(wcTimeConst)))
if wp < minWaitPeriod {
wp = minWaitPeriod
}
return wp
}
func (w *waitControlLoop) hasMinimumWaitPeriod() bool {
return w.nextWaitPeriod(monotonicTime()) == minWaitPeriod
}
func noRegTimeout() time.Duration {
e := rand.ExpFloat64()
if e > 100 {
e = 100
}
return time.Duration(float64(avgnoRegTimeout) * e)
}
type topicRequestQueueItem struct {
topic Topic
priority uint64
index int
}
// A topicRequestQueue implements heap.Interface and holds topicRequestQueueItems.
type topicRequestQueue []*topicRequestQueueItem
func (tq topicRequestQueue) Len() int { return len(tq) }
func (tq topicRequestQueue) Less(i, j int) bool {
return tq[i].priority < tq[j].priority
}
func (tq topicRequestQueue) Swap(i, j int) {
tq[i], tq[j] = tq[j], tq[i]
tq[i].index = i
tq[j].index = j
}
func (tq *topicRequestQueue) Push(x interface{}) {
n := len(*tq)
item := x.(*topicRequestQueueItem)
item.index = n
*tq = append(*tq, item)
}
func (tq *topicRequestQueue) Pop() interface{} {
old := *tq
n := len(old)
item := old[n-1]
item.index = -1
*tq = old[0 : n-1]
return item
}
func (tq *topicRequestQueue) update(item *topicRequestQueueItem, priority uint64) {
item.priority = priority
heap.Fix(tq, item.index)
}

View File

@ -0,0 +1,449 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package discv5
import (
"bytes"
"crypto/ecdsa"
"errors"
"fmt"
"net"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/rlp"
)
const Version = 4
// Errors
var (
errPacketTooSmall = errors.New("too small")
errBadHash = errors.New("bad hash")
errExpired = errors.New("expired")
errUnsolicitedReply = errors.New("unsolicited reply")
errUnknownNode = errors.New("unknown node")
errTimeout = errors.New("RPC timeout")
errClockWarp = errors.New("reply deadline too far in the future")
errClosed = errors.New("socket closed")
)
// Timeouts
const (
respTimeout = 500 * time.Millisecond
sendTimeout = 500 * time.Millisecond
expiration = 20 * time.Second
ntpFailureThreshold = 32 // Continuous timeouts after which to check NTP
ntpWarningCooldown = 10 * time.Minute // Minimum amount of time to pass before repeating NTP warning
driftThreshold = 10 * time.Second // Allowed clock drift before warning user
)
// RPC request structures
type (
ping struct {
Version uint
From, To rpcEndpoint
Expiration uint64
// v5
Topics []Topic
// Ignore additional fields (for forward compatibility).
Rest []rlp.RawValue `rlp:"tail"`
}
// pong is the reply to ping.
pong struct {
// This field should mirror the UDP envelope address
// of the ping packet, which provides a way to discover the
// the external address (after NAT).
To rpcEndpoint
ReplyTok []byte // This contains the hash of the ping packet.
Expiration uint64 // Absolute timestamp at which the packet becomes invalid.
// v5
TopicHash common.Hash
TicketSerial uint32
WaitPeriods []uint32
// Ignore additional fields (for forward compatibility).
Rest []rlp.RawValue `rlp:"tail"`
}
// findnode is a query for nodes close to the given target.
findnode struct {
Target NodeID // doesn't need to be an actual public key
Expiration uint64
// Ignore additional fields (for forward compatibility).
Rest []rlp.RawValue `rlp:"tail"`
}
// findnode is a query for nodes close to the given target.
findnodeHash struct {
Target common.Hash
Expiration uint64
// Ignore additional fields (for forward compatibility).
Rest []rlp.RawValue `rlp:"tail"`
}
// reply to findnode
neighbors struct {
Nodes []rpcNode
Expiration uint64
// Ignore additional fields (for forward compatibility).
Rest []rlp.RawValue `rlp:"tail"`
}
topicRegister struct {
Topics []Topic
Idx int
Pong []byte
}
topicQuery struct {
Topic Topic
Expiration uint64
}
// reply to topicQuery
topicNodes struct {
Echo common.Hash
Nodes []rpcNode
}
rpcNode struct {
IP net.IP // len 4 for IPv4 or 16 for IPv6
UDP uint16 // for discovery protocol
TCP uint16 // for RLPx protocol
ID NodeID
}
rpcEndpoint struct {
IP net.IP // len 4 for IPv4 or 16 for IPv6
UDP uint16 // for discovery protocol
TCP uint16 // for RLPx protocol
}
)
const (
macSize = 256 / 8
sigSize = 520 / 8
headSize = macSize + sigSize // space of packet frame data
)
// Neighbors replies are sent across multiple packets to
// stay below the 1280 byte limit. We compute the maximum number
// of entries by stuffing a packet until it grows too large.
var maxNeighbors = func() int {
p := neighbors{Expiration: ^uint64(0)}
maxSizeNode := rpcNode{IP: make(net.IP, 16), UDP: ^uint16(0), TCP: ^uint16(0)}
for n := 0; ; n++ {
p.Nodes = append(p.Nodes, maxSizeNode)
size, _, err := rlp.EncodeToReader(p)
if err != nil {
// If this ever happens, it will be caught by the unit tests.
panic("cannot encode: " + err.Error())
}
if headSize+size+1 >= 1280 {
return n
}
}
}()
var maxTopicNodes = func() int {
p := topicNodes{}
maxSizeNode := rpcNode{IP: make(net.IP, 16), UDP: ^uint16(0), TCP: ^uint16(0)}
for n := 0; ; n++ {
p.Nodes = append(p.Nodes, maxSizeNode)
size, _, err := rlp.EncodeToReader(p)
if err != nil {
// If this ever happens, it will be caught by the unit tests.
panic("cannot encode: " + err.Error())
}
if headSize+size+1 >= 1280 {
return n
}
}
}()
func makeEndpoint(addr *net.UDPAddr, tcpPort uint16) rpcEndpoint {
ip := addr.IP.To4()
if ip == nil {
ip = addr.IP.To16()
}
return rpcEndpoint{IP: ip, UDP: uint16(addr.Port), TCP: tcpPort}
}
func (e1 rpcEndpoint) equal(e2 rpcEndpoint) bool {
return e1.UDP == e2.UDP && e1.TCP == e2.TCP && bytes.Equal(e1.IP, e2.IP)
}
func nodeFromRPC(rn rpcNode) (*Node, error) {
// TODO: don't accept localhost, LAN addresses from internet hosts
n := NewNode(rn.ID, rn.IP, rn.UDP, rn.TCP)
err := n.validateComplete()
return n, err
}
func nodeToRPC(n *Node) rpcNode {
return rpcNode{ID: n.ID, IP: n.IP, UDP: n.UDP, TCP: n.TCP}
}
type ingressPacket struct {
remoteID NodeID
remoteAddr *net.UDPAddr
ev nodeEvent
hash []byte
data interface{} // one of the RPC structs
rawData []byte
}
type conn interface {
ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err error)
WriteToUDP(b []byte, addr *net.UDPAddr) (n int, err error)
Close() error
LocalAddr() net.Addr
}
// udp implements the RPC protocol.
type udp struct {
conn conn
priv *ecdsa.PrivateKey
ourEndpoint rpcEndpoint
nat nat.Interface
net *Network
}
// ListenUDP returns a new table that listens for UDP packets on laddr.
func ListenUDP(priv *ecdsa.PrivateKey, laddr string, natm nat.Interface, nodeDBPath string) (*Network, error) {
transport, err := listenUDP(priv, laddr)
if err != nil {
return nil, err
}
net, err := newNetwork(transport, priv.PublicKey, natm, nodeDBPath)
if err != nil {
return nil, err
}
transport.net = net
go transport.readLoop()
return net, nil
}
func listenUDP(priv *ecdsa.PrivateKey, laddr string) (*udp, error) {
addr, err := net.ResolveUDPAddr("udp", laddr)
if err != nil {
return nil, err
}
conn, err := net.ListenUDP("udp", addr)
if err != nil {
return nil, err
}
return &udp{conn: conn, priv: priv}, nil
}
func (t *udp) localAddr() *net.UDPAddr {
return t.conn.LocalAddr().(*net.UDPAddr)
}
func (t *udp) Close() {
t.conn.Close()
}
func (t *udp) send(remote *Node, ptype nodeEvent, data interface{}) {
t.sendPacket(remote.ID, remote.addr(), byte(ptype), data)
}
func (t *udp) sendPing(remote *Node, toaddr *net.UDPAddr, topics []Topic) (hash []byte) {
hash, _ = t.sendPacket(remote.ID, toaddr, byte(pingPacket), ping{
Version: Version,
From: t.ourEndpoint,
To: makeEndpoint(toaddr, 0), // TODO: maybe use known TCP port from DB
Expiration: uint64(time.Now().Add(expiration).Unix()),
Topics: topics,
})
return hash
}
func (t *udp) sendFindnode(remote *Node, target NodeID) {
t.sendPacket(remote.ID, remote.addr(), byte(findnodePacket), findnode{
Target: target,
Expiration: uint64(time.Now().Add(expiration).Unix()),
})
}
func (t *udp) sendNeighbours(remote *Node, results []*Node) {
// Send neighbors in chunks with at most maxNeighbors per packet
// to stay below the 1280 byte limit.
p := neighbors{Expiration: uint64(time.Now().Add(expiration).Unix())}
for i, result := range results {
p.Nodes = append(p.Nodes, nodeToRPC(result))
if len(p.Nodes) == maxNeighbors || i == len(results)-1 {
t.sendPacket(remote.ID, remote.addr(), byte(neighborsPacket), p)
p.Nodes = p.Nodes[:0]
}
}
}
func (t *udp) sendFindnodeHash(remote *Node, target common.Hash) {
t.sendPacket(remote.ID, remote.addr(), byte(findnodeHashPacket), findnodeHash{
Target: target,
Expiration: uint64(time.Now().Add(expiration).Unix()),
})
}
func (t *udp) sendTopicRegister(remote *Node, topics []Topic, idx int, pong []byte) {
t.sendPacket(remote.ID, remote.addr(), byte(topicRegisterPacket), topicRegister{
Topics: topics,
Idx: idx,
Pong: pong,
})
}
func (t *udp) sendTopicNodes(remote *Node, queryHash common.Hash, nodes []*Node) {
p := topicNodes{Echo: queryHash}
for i, result := range nodes {
p.Nodes = append(p.Nodes, nodeToRPC(result))
if len(p.Nodes) == maxTopicNodes || i == len(nodes)-1 {
t.sendPacket(remote.ID, remote.addr(), byte(neighborsPacket), p)
p.Nodes = p.Nodes[:0]
}
}
}
func (t *udp) sendPacket(toid NodeID, toaddr *net.UDPAddr, ptype byte, req interface{}) (hash []byte, err error) {
packet, hash, err := encodePacket(t.priv, ptype, req)
if err != nil {
return hash, err
}
glog.V(logger.Detail).Infof(">>> %v to %x@%v\n", nodeEvent(ptype), toid[:8], toaddr)
if _, err = t.conn.WriteToUDP(packet, toaddr); err != nil {
glog.V(logger.Detail).Infoln("UDP send failed:", err)
}
return hash, err
}
// zeroed padding space for encodePacket.
var headSpace = make([]byte, headSize)
func encodePacket(priv *ecdsa.PrivateKey, ptype byte, req interface{}) (p, hash []byte, err error) {
b := new(bytes.Buffer)
b.Write(headSpace)
b.WriteByte(ptype)
if err := rlp.Encode(b, req); err != nil {
glog.V(logger.Error).Infoln("error encoding packet:", err)
return nil, nil, err
}
packet := b.Bytes()
sig, err := crypto.Sign(crypto.Keccak256(packet[headSize:]), priv)
if err != nil {
glog.V(logger.Error).Infoln("could not sign packet:", err)
return nil, nil, err
}
copy(packet[macSize:], sig)
// add the hash to the front. Note: this doesn't protect the
// packet in any way.
hash = crypto.Keccak256(packet[macSize:])
copy(packet, hash)
return packet, hash, nil
}
// readLoop runs in its own goroutine. it injects ingress UDP packets
// into the network loop.
func (t *udp) readLoop() {
defer t.conn.Close()
// Discovery packets are defined to be no larger than 1280 bytes.
// Packets larger than this size will be cut at the end and treated
// as invalid because their hash won't match.
buf := make([]byte, 1280)
for {
nbytes, from, err := t.conn.ReadFromUDP(buf)
if isTemporaryError(err) {
// Ignore temporary read errors.
glog.V(logger.Debug).Infof("Temporary read error: %v", err)
continue
} else if err != nil {
// Shut down the loop for permament errors.
glog.V(logger.Debug).Infof("Read error: %v", err)
return
}
t.handlePacket(from, buf[:nbytes])
}
}
func isTemporaryError(err error) bool {
tempErr, ok := err.(interface {
Temporary() bool
})
return ok && tempErr.Temporary() || isPacketTooBig(err)
}
func (t *udp) handlePacket(from *net.UDPAddr, buf []byte) error {
pkt := ingressPacket{remoteAddr: from}
if err := decodePacket(buf, &pkt); err != nil {
glog.V(logger.Debug).Infof("Bad packet from %v: %v\n", from, err)
return err
}
t.net.reqReadPacket(pkt)
return nil
}
func decodePacket(buf []byte, pkt *ingressPacket) error {
if len(buf) < headSize+1 {
return errPacketTooSmall
}
hash, sig, sigdata := buf[:macSize], buf[macSize:headSize], buf[headSize:]
shouldhash := crypto.Keccak256(buf[macSize:])
if !bytes.Equal(hash, shouldhash) {
return errBadHash
}
fromID, err := recoverNodeID(crypto.Keccak256(buf[headSize:]), sig)
if err != nil {
return err
}
pkt.rawData = buf
pkt.hash = hash
pkt.remoteID = fromID
switch pkt.ev = nodeEvent(sigdata[0]); pkt.ev {
case pingPacket:
pkt.data = new(ping)
case pongPacket:
pkt.data = new(pong)
case findnodePacket:
pkt.data = new(findnode)
case neighborsPacket:
pkt.data = new(neighbors)
case findnodeHashPacket:
pkt.data = new(findnodeHash)
case topicRegisterPacket:
pkt.data = new(topicRegister)
case topicQueryPacket:
pkt.data = new(topicQuery)
case topicNodesPacket:
pkt.data = new(topicNodes)
default:
return fmt.Errorf("unknown packet type: %d", sigdata[0])
}
s := rlp.NewStream(bytes.NewReader(sigdata[1:]), 0)
err = s.Decode(pkt.data)
return err
}

View File

@ -0,0 +1,26 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//+build !windows
package discv5
// reports whether err indicates that a UDP packet didn't
// fit the receive buffer. There is no such error on
// non-Windows platforms.
func isPacketTooBig(err error) bool {
return false
}

View File

@ -0,0 +1,40 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//+build windows
package discv5
import (
"net"
"os"
"syscall"
)
const _WSAEMSGSIZE = syscall.Errno(10040)
// reports whether err indicates that a UDP packet didn't
// fit the receive buffer. On Windows, WSARecvFrom returns
// code WSAEMSGSIZE and no data if this happens.
func isPacketTooBig(err error) bool {
if opErr, ok := err.(*net.OpError); ok {
if scErr, ok := opErr.Err.(*os.SyscallError); ok {
return scErr.Err == _WSAEMSGSIZE
}
return opErr.Err == _WSAEMSGSIZE
}
return false
}

View File

@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/discv5"
"github.com/ethereum/go-ethereum/p2p/nat"
)
@ -72,6 +73,8 @@ type Config struct {
// or not. Disabling is usually useful for protocol debugging (manual topology).
Discovery bool
DiscoveryV5 bool
// Name sets the node name of this server.
// Use common.MakeName to create a name that follows existing conventions.
Name string
@ -105,6 +108,8 @@ type Config struct {
// the server is started.
ListenAddr string
ListenAddrV5 string
// If set to a non-nil value, the given NAT port mapper
// is used to make the listening port available to the
// Internet.
@ -352,6 +357,17 @@ func (srv *Server) Start() (err error) {
srv.ntab = ntab
}
if srv.DiscoveryV5 {
ntab, err := discv5.ListenUDP(srv.PrivateKey, srv.ListenAddrV5, srv.NAT, "") //srv.NodeDatabase)
if err != nil {
return err
}
if err := ntab.SetFallbackNodes(discv5.BootNodes); err != nil {
return err
}
//srv.ntab = ntab
}
dynPeers := (srv.MaxPeers + 1) / 2
if !srv.Discovery {
dynPeers = 0

View File

@ -0,0 +1,65 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package params
import "math/big"
type GasTable struct {
ExtcodeSize *big.Int
ExtcodeCopy *big.Int
Balance *big.Int
SLoad *big.Int
Calls *big.Int
Suicide *big.Int
// CreateBySuicide occurs when the
// refunded account is one that does
// not exist. This logic is similar
// to call. May be left nil. Nil means
// not charged.
CreateBySuicide *big.Int
}
var (
// GasTableHomestead contain the gas prices for
// the homestead phase.
GasTableHomestead = GasTable{
ExtcodeSize: big.NewInt(20),
ExtcodeCopy: big.NewInt(20),
Balance: big.NewInt(20),
SLoad: big.NewInt(50),
Calls: big.NewInt(40),
Suicide: big.NewInt(0),
// explicitly set to nil to indicate
// this rule does not apply to homestead.
CreateBySuicide: nil,
}
// GasTableHomestead contain the gas re-prices for
// the homestead phase.
GasTableHomesteadGasRepriceFork = GasTable{
ExtcodeSize: big.NewInt(700),
ExtcodeCopy: big.NewInt(700),
Balance: big.NewInt(400),
SLoad: big.NewInt(200),
Calls: big.NewInt(700),
Suicide: big.NewInt(5000),
CreateBySuicide: big.NewInt(25000),
}
)

View File

@ -71,4 +71,5 @@ var (
SuicideRefundGas = big.NewInt(24000) // Refunded following a suicide operation.
MemoryGas = big.NewInt(3) // Times the address of the (highest referenced byte in memory + 1). NOTE: referencing happens on read, write and in instructions such as RETURN and CALL.
TxDataNonZeroGas = big.NewInt(68) // Per byte of data attached to a transaction that is not equal to zero. NOTE: Not payable on data of calls between transactions.
)

View File

@ -19,6 +19,8 @@ package params
import "math/big"
var (
TestNetHomesteadBlock = big.NewInt(494000) // Testnet homestead block
MainNetHomesteadBlock = big.NewInt(1150000) // Mainnet homestead block
TestNetHomesteadBlock = big.NewInt(494000) // Testnet homestead block
MainNetHomesteadBlock = big.NewInt(1150000) // Mainnet homestead block
TestNetHomesteadGasRepriceBlock = big.NewInt(1783000) // Test net gas reprice block
MainNetHomesteadGasRepriceBlock = big.NewInt(2463000) // Main net gas reprice block
)

View File

@ -337,13 +337,7 @@ func (s *Server) exec(ctx context.Context, codec ServerCodec, req *serverRequest
if req.err != nil {
response = codec.CreateErrorResponse(&req.id, req.err)
} else {
/*fmt.Println()
fmt.Println("SREQ")
fmt.Println(*req)*/
response, callback = s.handle(ctx, codec, req)
/*fmt.Println("RESP")
fmt.Println(response)
fmt.Println()*/
}
if err := codec.Write(response); err != nil {
@ -367,15 +361,9 @@ func (s *Server) execBatch(ctx context.Context, codec ServerCodec, requests []*s
responses[i] = codec.CreateErrorResponse(&req.id, req.err)
} else {
var callback func()
/*fmt.Println()
fmt.Println("SREQ batch")
fmt.Println(*req)*/
if responses[i], callback = s.handle(ctx, codec, req); callback != nil {
callbacks = append(callbacks, callback)
}
/*fmt.Println("RESP")
fmt.Println(responses[i])
fmt.Println()*/
}
}
@ -403,10 +391,6 @@ func (s *Server) readRequest(codec ServerCodec) ([]*serverRequest, bool, Error)
// verify requests
for i, r := range reqs {
/*fmt.Println()
fmt.Println(time.Now())
fmt.Println("REQ")
fmt.Println(r)*/
var ok bool
var svc *service

View File

@ -302,32 +302,3 @@ func (b *HexBytes) UnmarshalJSON(input []byte) error {
_, err := hex.Decode(*b, input)
return err
}
type ClientRestartWrapper struct {
client *Client
newClientFn func() *Client
mu sync.RWMutex
}
func NewClientRestartWrapper(newClientFn func() *Client) *ClientRestartWrapper {
return &ClientRestartWrapper{
client: newClientFn(),
newClientFn: newClientFn,
}
}
func (rw *ClientRestartWrapper) Client() *Client {
rw.mu.RLock()
defer rw.mu.RUnlock()
return rw.client
}
func (rw *ClientRestartWrapper) Restart() {
rw.mu.Lock()
defer rw.mu.Unlock()
rw.client.Close()
rw.client = rw.newClientFn()
}

View File

@ -23,8 +23,6 @@ import (
"hash"
"io"
"sync"
// "github.com/ethereum/go-ethereum/logger"
// "github.com/ethereum/go-ethereum/logger/glog"
)
/*
@ -124,12 +122,13 @@ func (self *TreeChunker) Split(data io.Reader, size int64, chunkC chan *Chunk, s
jobC := make(chan *hashJob, 2*processors)
wg := &sync.WaitGroup{}
errC := make(chan error)
quitC := make(chan bool)
// wwg = workers waitgroup keeps track of hashworkers spawned by this split call
if wwg != nil {
wwg.Add(1)
}
go self.hashWorker(jobC, chunkC, errC, swg, wwg)
go self.hashWorker(jobC, chunkC, errC, quitC, swg, wwg)
depth := 0
treeSize := self.chunkSize
@ -141,11 +140,10 @@ func (self *TreeChunker) Split(data io.Reader, size int64, chunkC chan *Chunk, s
}
key := make([]byte, self.hashFunc().Size())
// glog.V(logger.Detail).Infof("split request received for data (%v bytes, depth: %v)", size, depth)
// this waitgroup member is released after the root hash is calculated
wg.Add(1)
//launch actual recursive function passing the waitgroups
go self.split(depth, treeSize/self.branches, key, data, size, jobC, chunkC, errC, wg, swg, wwg)
go self.split(depth, treeSize/self.branches, key, data, size, jobC, chunkC, errC, quitC, wg, swg, wwg)
// closes internal error channel if all subprocesses in the workgroup finished
go func() {
@ -153,7 +151,6 @@ func (self *TreeChunker) Split(data io.Reader, size int64, chunkC chan *Chunk, s
wg.Wait()
// if storage waitgroup is non-nil, we wait for storage to finish too
if swg != nil {
// glog.V(logger.Detail).Infof("Waiting for storage to finish")
swg.Wait()
}
close(errC)
@ -162,14 +159,15 @@ func (self *TreeChunker) Split(data io.Reader, size int64, chunkC chan *Chunk, s
select {
case err := <-errC:
if err != nil {
close(quitC)
return nil, err
}
//
//TODO: add a timeout
}
return key, nil
}
func (self *TreeChunker) split(depth int, treeSize int64, key Key, data io.Reader, size int64, jobC chan *hashJob, chunkC chan *Chunk, errC chan error, parentWg, swg, wwg *sync.WaitGroup) {
func (self *TreeChunker) split(depth int, treeSize int64, key Key, data io.Reader, size int64, jobC chan *hashJob, chunkC chan *Chunk, errC chan error, quitC chan bool, parentWg, swg, wwg *sync.WaitGroup) {
for depth > 0 && size < treeSize {
treeSize /= self.branches
@ -180,17 +178,24 @@ func (self *TreeChunker) split(depth int, treeSize int64, key Key, data io.Reade
// leaf nodes -> content chunks
chunkData := make([]byte, size+8)
binary.LittleEndian.PutUint64(chunkData[0:8], uint64(size))
data.Read(chunkData[8:])
var readBytes int64
for readBytes < size {
n, err := data.Read(chunkData[8+readBytes:])
readBytes += int64(n)
if err != nil && !(err == io.EOF && readBytes == size) {
errC <- err
return
}
}
select {
case jobC <- &hashJob{key, chunkData, size, parentWg}:
case <-errC:
case <-quitC:
}
// glog.V(logger.Detail).Infof("read %v", size)
return
}
// dept > 0
// intermediate chunk containing child nodes hashes
branchCnt := int64((size + treeSize - 1) / treeSize)
// glog.V(logger.Detail).Infof("intermediate node: setting branches: %v, depth: %v, max subtree size: %v, data size: %v", branches, depth, treeSize, size)
var chunk []byte = make([]byte, branchCnt*self.hashSize+8)
var pos, i int64
@ -210,7 +215,7 @@ func (self *TreeChunker) split(depth int, treeSize int64, key Key, data io.Reade
subTreeKey := chunk[8+i*self.hashSize : 8+(i+1)*self.hashSize]
childrenWg.Add(1)
self.split(depth-1, treeSize/self.branches, subTreeKey, data, secSize, jobC, chunkC, errC, childrenWg, swg, wwg)
self.split(depth-1, treeSize/self.branches, subTreeKey, data, secSize, jobC, chunkC, errC, quitC, childrenWg, swg, wwg)
i++
pos += treeSize
@ -224,15 +229,15 @@ func (self *TreeChunker) split(depth int, treeSize int64, key Key, data io.Reade
wwg.Add(1)
}
self.workerCount++
go self.hashWorker(jobC, chunkC, errC, swg, wwg)
go self.hashWorker(jobC, chunkC, errC, quitC, swg, wwg)
}
select {
case jobC <- &hashJob{key, chunk, size, parentWg}:
case <-errC:
case <-quitC:
}
}
func (self *TreeChunker) hashWorker(jobC chan *hashJob, chunkC chan *Chunk, errC chan error, swg, wwg *sync.WaitGroup) {
func (self *TreeChunker) hashWorker(jobC chan *hashJob, chunkC chan *Chunk, errC chan error, quitC chan bool, swg, wwg *sync.WaitGroup) {
hasher := self.hashFunc()
if wwg != nil {
defer wwg.Done()
@ -247,8 +252,7 @@ func (self *TreeChunker) hashWorker(jobC chan *hashJob, chunkC chan *Chunk, errC
// now we got the hashes in the chunk, then hash the chunks
hasher.Reset()
self.hashChunk(hasher, job, chunkC, swg)
// glog.V(logger.Detail).Infof("hash chunk (%v)", job.size)
case <-errC:
case <-quitC:
return
}
}
@ -276,6 +280,7 @@ func (self *TreeChunker) hashChunk(hasher hash.Hash, job *hashJob, chunkC chan *
}
}
job.parentWg.Done()
if chunkC != nil {
chunkC <- newChunk
}
@ -328,7 +333,6 @@ func (self *LazyChunkReader) Size(quitC chan bool) (n int64, err error) {
func (self *LazyChunkReader) ReadAt(b []byte, off int64) (read int, err error) {
// this is correct, a swarm doc cannot be zero length, so no EOF is expected
if len(b) == 0 {
// glog.V(logger.Detail).Infof("Size query for %v", chunk.Key.Log())
return 0, nil
}
quitC := make(chan bool)
@ -336,13 +340,10 @@ func (self *LazyChunkReader) ReadAt(b []byte, off int64) (read int, err error) {
if err != nil {
return 0, err
}
// glog.V(logger.Detail).Infof("readAt: len(b): %v, off: %v, size: %v ", len(b), off, size)
errC := make(chan error)
// glog.V(logger.Detail).Infof("readAt: reading %v into %d bytes at offset %d.", self.chunk.Key.Log(), len(b), off)
// }
// glog.V(logger.Detail).Infof("-> want: %v, off: %v size: %v ", want, off, self.size)
var treeSize int64
var depth int
// calculate depth and max treeSize
@ -364,22 +365,15 @@ func (self *LazyChunkReader) ReadAt(b []byte, off int64) (read int, err error) {
return 0, err
}
// glog.V(logger.Detail).Infof("ReadAt received %v", err)
// glog.V(logger.Detail).Infof("end: len(b): %v, off: %v, size: %v ", len(b), off, size)
if off+int64(len(b)) >= size {
// glog.V(logger.Detail).Infof(" len(b): %v EOF", len(b))
return len(b), io.EOF
}
// glog.V(logger.Detail).Infof("ReadAt returning at %d: %v", read, err)
return len(b), nil
}
func (self *LazyChunkReader) join(b []byte, off int64, eoff int64, depth int, treeSize int64, chunk *Chunk, parentWg *sync.WaitGroup, errC chan error, quitC chan bool) {
defer parentWg.Done()
// return NewDPA(&LocalStore{})
// glog.V(logger.Detail).Infof("inh len(b): %v, off: %v eoff: %v ", len(b), off, eoff)
// glog.V(logger.Detail).Infof("depth: %v, loff: %v, eoff: %v, chunk.Size: %v, treeSize: %v", depth, off, eoff, chunk.Size, treeSize)
// chunk.Size = int64(binary.LittleEndian.Uint64(chunk.SData[0:8]))
@ -391,7 +385,6 @@ func (self *LazyChunkReader) join(b []byte, off int64, eoff int64, depth int, tr
// leaf chunk found
if depth == 0 {
// glog.V(logger.Detail).Infof("depth: %v, len(b): %v, off: %v, eoff: %v, chunk.Size: %v %v, treeSize: %v", depth, len(b), off, eoff, chunk.Size, len(chunk.SData), treeSize)
extra := 8 + eoff - int64(len(chunk.SData))
if extra > 0 {
eoff -= extra
@ -406,7 +399,6 @@ func (self *LazyChunkReader) join(b []byte, off int64, eoff int64, depth int, tr
wg := &sync.WaitGroup{}
defer wg.Wait()
// glog.V(logger.Detail).Infof("start %v,end %v", start, end)
for i := start; i < end; i++ {
soff := i * treeSize
@ -425,7 +417,6 @@ func (self *LazyChunkReader) join(b []byte, off int64, eoff int64, depth int, tr
wg.Add(1)
go func(j int64) {
childKey := chunk.SData[8+j*self.hashSize : 8+(j+1)*self.hashSize]
// glog.V(logger.Detail).Infof("subtree ind.ex: %v -> %v", j, childKey.Log())
chunk := retrieve(childKey, self.chunkC, quitC)
if chunk == nil {
select {
@ -450,7 +441,6 @@ func retrieve(key Key, chunkC chan *Chunk, quitC chan bool) *Chunk {
Key: key,
C: make(chan bool), // close channel to signal data delivery
}
// glog.V(logger.Detail).Infof("chunk data sent for %v (key interval in chunk %v-%v)", ch.Key.Log(), j*self.chunker.hashSize, (j+1)*self.chunker.hashSize)
// submit chunk for retrieval
select {
case chunkC <- chunk: // submit retrieval request, someone should be listening on the other side (or we will time out globally)
@ -464,7 +454,6 @@ func retrieve(key Key, chunkC chan *Chunk, quitC chan bool) *Chunk {
// this is how we control process leakage (quitC is closed once join is finished (after timeout))
return nil
case <-chunk.C: // bells are ringing, data have been delivered
// glog.V(logger.Detail).Infof("chunk data received")
}
if len(chunk.SData) == 0 {
return nil // chunk.Size = int64(binary.LittleEndian.Uint64(chunk.SData[0:8]))
@ -476,7 +465,6 @@ func retrieve(key Key, chunkC chan *Chunk, quitC chan bool) *Chunk {
// Read keeps a cursor so cannot be called simulateously, see ReadAt
func (self *LazyChunkReader) Read(b []byte) (read int, err error) {
read, err = self.ReadAt(b, self.off)
// glog.V(logger.Detail).Infof("read: %v, off: %v, error: %v", read, self.off, err)
self.off += int64(read)
return

View File

@ -81,7 +81,6 @@ func (self *PyramidChunker) Split(data io.Reader, size int64, chunkC chan *Chunk
chunks := (size + self.chunkSize - 1) / self.chunkSize
depth := int(math.Ceil(math.Log(float64(chunks))/math.Log(float64(self.branches)))) + 1
// glog.V(logger.Detail).Infof("chunks: %v, depth: %v", chunks, depth)
results := Tree{
Chunks: chunks,
@ -99,26 +98,24 @@ func (self *PyramidChunker) Split(data io.Reader, size int64, chunkC chan *Chunk
go self.processor(pend, swg, tasks, chunkC, &results)
}
// Feed the chunks into the task pool
read := 0
for index := 0; ; index++ {
buffer := make([]byte, self.chunkSize+8)
n, err := data.Read(buffer[8:])
last := err == io.ErrUnexpectedEOF || err == io.EOF
// glog.V(logger.Detail).Infof("n: %v, index: %v, depth: %v", n, index, depth)
read += n
last := int64(read) == size || err == io.ErrUnexpectedEOF || err == io.EOF
if err != nil && !last {
// glog.V(logger.Info).Infof("error: %v", err)
close(abortC)
break
}
binary.LittleEndian.PutUint64(buffer[:8], uint64(n))
pend.Add(1)
// glog.V(logger.Info).Infof("-> task %v (%v)", index, n)
select {
case tasks <- &Task{Index: int64(index), Size: uint64(n), Data: buffer[:n+8], Last: last}:
case <-abortC:
return nil, err
}
if last {
// glog.V(logger.Info).Infof("last task %v (%v)", index, n)
break
}
}
@ -126,7 +123,6 @@ func (self *PyramidChunker) Split(data io.Reader, size int64, chunkC chan *Chunk
close(tasks)
pend.Wait()
// glog.V(logger.Info).Infof("len: %v", results.Levels[0][0])
key := results.Levels[0][0].Children[0][:]
return key, nil
}
@ -134,12 +130,10 @@ func (self *PyramidChunker) Split(data io.Reader, size int64, chunkC chan *Chunk
func (self *PyramidChunker) processor(pend, swg *sync.WaitGroup, tasks chan *Task, chunkC chan *Chunk, results *Tree) {
defer pend.Done()
// glog.V(logger.Info).Infof("processor started")
// Start processing leaf chunks ad infinitum
hasher := self.hashFunc()
for task := range tasks {
depth, pow := len(results.Levels)-1, self.branches
// glog.V(logger.Info).Infof("task: %v, last: %v", task.Index, task.Last)
size := task.Size
data := task.Data
var node *Node
@ -171,10 +165,8 @@ func (self *PyramidChunker) processor(pend, swg *sync.WaitGroup, tasks chan *Tas
}
node = &Node{pending, 0, make([]common.Hash, pending), last}
results.Levels[depth][task.Index/pow] = node
// glog.V(logger.Info).Infof("create node %v, %v (%v children, all pending)", depth, task.Index/pow, pending)
}
node.Pending--
// glog.V(logger.Info).Infof("pending now: %v", node.Pending)
i := task.Index / (pow / self.branches) % self.branches
if last {
node.Last = true
@ -182,7 +174,6 @@ func (self *PyramidChunker) processor(pend, swg *sync.WaitGroup, tasks chan *Tas
copy(node.Children[i][:], hash)
node.Size += size
left := node.Pending
// glog.V(logger.Info).Infof("left pending now: %v, node size: %v", left, node.Size)
if chunkC != nil {
if swg != nil {
swg.Add(1)
@ -198,7 +189,6 @@ func (self *PyramidChunker) processor(pend, swg *sync.WaitGroup, tasks chan *Tas
results.Lock.Unlock()
// If there's more work to be done, leave for others
// glog.V(logger.Info).Infof("left %v", left)
if left > 0 {
break
}

View File

@ -27,8 +27,9 @@ import (
)
type hasher struct {
tmp *bytes.Buffer
sha hash.Hash
tmp *bytes.Buffer
sha hash.Hash
cachegen, cachelimit uint16
}
// hashers live in a global pool.
@ -38,8 +39,10 @@ var hasherPool = sync.Pool{
},
}
func newHasher() *hasher {
return hasherPool.Get().(*hasher)
func newHasher(cachegen, cachelimit uint16) *hasher {
h := hasherPool.Get().(*hasher)
h.cachegen, h.cachelimit = cachegen, cachelimit
return h
}
func returnHasherToPool(h *hasher) {
@ -50,8 +53,18 @@ func returnHasherToPool(h *hasher) {
// original node initialzied with the computed hash to replace the original one.
func (h *hasher) hash(n node, db DatabaseWriter, force bool) (node, node, error) {
// If we're not storing the node, just hashing, use avaialble cached data
if hash, dirty := n.cache(); hash != nil && (db == nil || !dirty) {
return hash, n, nil
if hash, dirty := n.cache(); hash != nil {
if db == nil {
return hash, n, nil
}
if n.canUnload(h.cachegen, h.cachelimit) {
// Evict the node from cache. All of its subnodes will have a lower or equal
// cache generation number.
return hash, hash, nil
}
if !dirty {
return hash, n, nil
}
}
// Trie not processed yet or needs storage, walk the children
collapsed, cached, err := h.hashChildren(n, db)
@ -62,19 +75,21 @@ func (h *hasher) hash(n node, db DatabaseWriter, force bool) (node, node, error)
if err != nil {
return hashNode{}, n, err
}
// Cache the hash and RLP blob of the ndoe for later reuse
// Cache the hash of the ndoe for later reuse.
if hash, ok := hashed.(hashNode); ok && !force {
switch cached := cached.(type) {
case shortNode:
cached.hash = hash
case *shortNode:
cached = cached.copy()
cached.flags.hash = hash
if db != nil {
cached.dirty = false
cached.flags.dirty = false
}
return hashed, cached, nil
case fullNode:
cached.hash = hash
case *fullNode:
cached = cached.copy()
cached.flags.hash = hash
if db != nil {
cached.dirty = false
cached.flags.dirty = false
}
return hashed, cached, nil
}
@ -89,40 +104,42 @@ func (h *hasher) hashChildren(original node, db DatabaseWriter) (node, node, err
var err error
switch n := original.(type) {
case shortNode:
case *shortNode:
// Hash the short node's child, caching the newly hashed subtree
cached := n
cached.Key = common.CopyBytes(cached.Key)
collapsed, cached := n.copy(), n.copy()
collapsed.Key = compactEncode(n.Key)
cached.Key = common.CopyBytes(n.Key)
n.Key = compactEncode(n.Key)
if _, ok := n.Val.(valueNode); !ok {
if n.Val, cached.Val, err = h.hash(n.Val, db, false); err != nil {
return n, original, err
collapsed.Val, cached.Val, err = h.hash(n.Val, db, false)
if err != nil {
return original, original, err
}
}
if n.Val == nil {
n.Val = valueNode(nil) // Ensure that nil children are encoded as empty strings.
if collapsed.Val == nil {
collapsed.Val = valueNode(nil) // Ensure that nil children are encoded as empty strings.
}
return n, cached, nil
return collapsed, cached, nil
case fullNode:
case *fullNode:
// Hash the full node's children, caching the newly hashed subtrees
cached := fullNode{dirty: n.dirty}
collapsed, cached := n.copy(), n.copy()
for i := 0; i < 16; i++ {
if n.Children[i] != nil {
if n.Children[i], cached.Children[i], err = h.hash(n.Children[i], db, false); err != nil {
return n, original, err
collapsed.Children[i], cached.Children[i], err = h.hash(n.Children[i], db, false)
if err != nil {
return original, original, err
}
} else {
n.Children[i] = valueNode(nil) // Ensure that nil children are encoded as empty strings.
collapsed.Children[i] = valueNode(nil) // Ensure that nil children are encoded as empty strings.
}
}
cached.Children[16] = n.Children[16]
if n.Children[16] == nil {
n.Children[16] = valueNode(nil)
if collapsed.Children[16] == nil {
collapsed.Children[16] = valueNode(nil)
}
return n, cached, nil
return collapsed, cached, nil
default:
// Value and hash nodes don't have children so they're left as were
@ -140,6 +157,7 @@ func (h *hasher) store(n node, db DatabaseWriter, force bool) (node, error) {
if err := rlp.Encode(h.tmp, n); err != nil {
panic("encode error: " + err.Error())
}
if h.tmp.Len() < 32 && !force {
return n, nil // Nodes smaller than 32 bytes are stored inside their parent
}

View File

@ -56,11 +56,11 @@ func (it *Iterator) makeKey() []byte {
key := it.keyBuf[:0]
for _, se := range it.nodeIt.stack {
switch node := se.node.(type) {
case fullNode:
case *fullNode:
if se.child <= 16 {
key = append(key, byte(se.child))
}
case shortNode:
case *shortNode:
if hasTerm(node.Key) {
key = append(key, node.Key[:len(node.Key)-1]...)
} else {
@ -148,7 +148,7 @@ func (it *NodeIterator) step() error {
if (ancestor == common.Hash{}) {
ancestor = parent.parent
}
if node, ok := parent.node.(fullNode); ok {
if node, ok := parent.node.(*fullNode); ok {
// Full node, traverse all children, then the node itself
if parent.child >= len(node.Children) {
break
@ -156,7 +156,7 @@ func (it *NodeIterator) step() error {
for parent.child++; parent.child < len(node.Children); parent.child++ {
if current := node.Children[parent.child]; current != nil {
it.stack = append(it.stack, &nodeIteratorState{
hash: common.BytesToHash(node.hash),
hash: common.BytesToHash(node.flags.hash),
node: current,
parent: ancestor,
child: -1,
@ -164,14 +164,14 @@ func (it *NodeIterator) step() error {
break
}
}
} else if node, ok := parent.node.(shortNode); ok {
} else if node, ok := parent.node.(*shortNode); ok {
// Short node, traverse the pointer singleton child, then the node itself
if parent.child >= 0 {
break
}
parent.child++
it.stack = append(it.stack, &nodeIteratorState{
hash: common.BytesToHash(node.hash),
hash: common.BytesToHash(node.flags.hash),
node: node.Val,
parent: ancestor,
child: -1,

View File

@ -30,42 +30,60 @@ var indices = []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b
type node interface {
fstring(string) string
cache() (hashNode, bool)
canUnload(cachegen, cachelimit uint16) bool
}
type (
fullNode struct {
Children [17]node // Actual trie node data to encode/decode (needs custom encoder)
hash hashNode // Cached hash of the node to prevent rehashing (may be nil)
dirty bool // Cached flag whether the node's new or already stored
flags nodeFlag
}
shortNode struct {
Key []byte
Val node
hash hashNode // Cached hash of the node to prevent rehashing (may be nil)
dirty bool // Cached flag whether the node's new or already stored
flags nodeFlag
}
hashNode []byte
valueNode []byte
)
// EncodeRLP encodes a full node into the consensus RLP format.
func (n fullNode) EncodeRLP(w io.Writer) error {
func (n *fullNode) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, n.Children)
}
// Cache accessors to retrieve precalculated values (avoid lengthy type switches).
func (n fullNode) cache() (hashNode, bool) { return n.hash, n.dirty }
func (n shortNode) cache() (hashNode, bool) { return n.hash, n.dirty }
func (n hashNode) cache() (hashNode, bool) { return nil, true }
func (n valueNode) cache() (hashNode, bool) { return nil, true }
func (n *fullNode) copy() *fullNode { copy := *n; return &copy }
func (n *shortNode) copy() *shortNode { copy := *n; return &copy }
// nodeFlag contains caching-related metadata about a node.
type nodeFlag struct {
hash hashNode // cached hash of the node (may be nil)
gen uint16 // cache generation counter
dirty bool // whether the node has changes that must be written to the database
}
// canUnload tells whether a node can be unloaded.
func (n *nodeFlag) canUnload(cachegen, cachelimit uint16) bool {
return !n.dirty && cachegen-n.gen >= cachelimit
}
func (n *fullNode) canUnload(gen, limit uint16) bool { return n.flags.canUnload(gen, limit) }
func (n *shortNode) canUnload(gen, limit uint16) bool { return n.flags.canUnload(gen, limit) }
func (n hashNode) canUnload(uint16, uint16) bool { return false }
func (n valueNode) canUnload(uint16, uint16) bool { return false }
func (n *fullNode) cache() (hashNode, bool) { return n.flags.hash, n.flags.dirty }
func (n *shortNode) cache() (hashNode, bool) { return n.flags.hash, n.flags.dirty }
func (n hashNode) cache() (hashNode, bool) { return nil, true }
func (n valueNode) cache() (hashNode, bool) { return nil, true }
// Pretty printing.
func (n fullNode) String() string { return n.fstring("") }
func (n shortNode) String() string { return n.fstring("") }
func (n hashNode) String() string { return n.fstring("") }
func (n valueNode) String() string { return n.fstring("") }
func (n *fullNode) String() string { return n.fstring("") }
func (n *shortNode) String() string { return n.fstring("") }
func (n hashNode) String() string { return n.fstring("") }
func (n valueNode) String() string { return n.fstring("") }
func (n fullNode) fstring(ind string) string {
func (n *fullNode) fstring(ind string) string {
resp := fmt.Sprintf("[\n%s ", ind)
for i, node := range n.Children {
if node == nil {
@ -76,7 +94,7 @@ func (n fullNode) fstring(ind string) string {
}
return resp + fmt.Sprintf("\n%s] ", ind)
}
func (n shortNode) fstring(ind string) string {
func (n *shortNode) fstring(ind string) string {
return fmt.Sprintf("{%x: %v} ", n.Key, n.Val.fstring(ind+" "))
}
func (n hashNode) fstring(ind string) string {
@ -120,6 +138,7 @@ func decodeShort(hash, buf, elems []byte) (node, error) {
if err != nil {
return nil, err
}
flag := nodeFlag{hash: hash}
key := compactDecode(kbuf)
if key[len(key)-1] == 16 {
// value node
@ -127,17 +146,17 @@ func decodeShort(hash, buf, elems []byte) (node, error) {
if err != nil {
return nil, fmt.Errorf("invalid value node: %v", err)
}
return shortNode{key, valueNode(val), hash, false}, nil
return &shortNode{key, append(valueNode{}, val...), flag}, nil
}
r, _, err := decodeRef(rest)
if err != nil {
return nil, wrapError(err, "val")
}
return shortNode{key, r, hash, false}, nil
return &shortNode{key, r, flag}, nil
}
func decodeFull(hash, buf, elems []byte) (fullNode, error) {
n := fullNode{hash: hash}
func decodeFull(hash, buf, elems []byte) (*fullNode, error) {
n := &fullNode{flags: nodeFlag{hash: hash}}
for i := 0; i < 16; i++ {
cld, rest, err := decodeRef(elems)
if err != nil {
@ -150,7 +169,7 @@ func decodeFull(hash, buf, elems []byte) (fullNode, error) {
return n, err
}
if len(val) > 0 {
n.Children[16] = valueNode(val)
n.Children[16] = append(valueNode{}, val...)
}
return n, nil
}
@ -176,7 +195,7 @@ func decodeRef(buf []byte) (node, []byte, error) {
// empty node
return nil, rest, nil
case kind == rlp.String && len(val) == 32:
return hashNode(val), rest, nil
return append(hashNode{}, val...), rest, nil
default:
return nil, nil, fmt.Errorf("invalid RLP string size %d (want 0 or 32)", len(val))
}

View File

@ -44,7 +44,7 @@ func (t *Trie) Prove(key []byte) []rlp.RawValue {
tn := t.root
for len(key) > 0 && tn != nil {
switch n := tn.(type) {
case shortNode:
case *shortNode:
if len(key) < len(n.Key) || !bytes.Equal(n.Key, key[:len(n.Key)]) {
// The trie doesn't contain the key.
tn = nil
@ -53,7 +53,7 @@ func (t *Trie) Prove(key []byte) []rlp.RawValue {
key = key[len(n.Key):]
}
nodes = append(nodes, n)
case fullNode:
case *fullNode:
tn = n.Children[key[0]]
key = key[1:]
nodes = append(nodes, n)
@ -70,7 +70,7 @@ func (t *Trie) Prove(key []byte) []rlp.RawValue {
panic(fmt.Sprintf("%T: invalid node: %v", tn, tn))
}
}
hasher := newHasher()
hasher := newHasher(0, 0)
proof := make([]rlp.RawValue, 0, len(nodes))
for i, n := range nodes {
// Don't bother checking for errors here since hasher panics
@ -130,13 +130,13 @@ func VerifyProof(rootHash common.Hash, key []byte, proof []rlp.RawValue) (value
func get(tn node, key []byte) ([]byte, node) {
for len(key) > 0 {
switch n := tn.(type) {
case shortNode:
case *shortNode:
if len(key) < len(n.Key) || !bytes.Equal(n.Key, key[:len(n.Key)]) {
return nil, nil
}
tn = n.Val
key = key[len(n.Key):]
case fullNode:
case *fullNode:
tn = n.Children[key[0]]
key = key[1:]
case hashNode:

View File

@ -49,8 +49,12 @@ type SecureTrie struct {
// If root is the zero hash or the sha3 hash of an empty string, the
// trie is initially empty. Otherwise, New will panic if db is nil
// and returns MissingNodeError if the root node cannot be found.
//
// Accessing the trie loads nodes from db on demand.
func NewSecure(root common.Hash, db Database) (*SecureTrie, error) {
// Loaded nodes are kept around until their 'cache generation' expires.
// A new cache generation is created by each call to Commit.
// cachelimit sets the number of past cache generations to keep.
func NewSecure(root common.Hash, db Database, cachelimit uint16) (*SecureTrie, error) {
if db == nil {
panic("NewSecure called with nil database")
}
@ -58,9 +62,8 @@ func NewSecure(root common.Hash, db Database) (*SecureTrie, error) {
if err != nil {
return nil, err
}
return &SecureTrie{
trie: *trie,
}, nil
trie.SetCacheLimit(cachelimit)
return &SecureTrie{trie: *trie}, nil
}
// Get returns the value for key stored in the trie.
@ -191,7 +194,7 @@ func (t *SecureTrie) secKey(key []byte) []byte {
// The caller must not hold onto the return value because it will become
// invalid on the next call to hashKey or secKey.
func (t *SecureTrie) hashKey(key []byte) []byte {
h := newHasher()
h := newHasher(0, 0)
h.sha.Reset()
h.sha.Write(key)
buf := h.sha.Sum(t.hashKeyBuf[:0])

View File

@ -212,12 +212,14 @@ func (s *TrieSync) children(req *request) ([]*request, error) {
children := []child{}
switch node := (*req.object).(type) {
case shortNode:
case *shortNode:
node = node.copy() // Prevents linking all downloaded nodes together.
children = []child{{
node: &node.Val,
depth: req.depth + len(node.Key),
}}
case fullNode:
case *fullNode:
node = node.copy()
for i := 0; i < 17; i++ {
if node.Children[i] != nil {
children = append(children, child{

View File

@ -62,6 +62,23 @@ type Trie struct {
root node
db Database
originalRoot common.Hash
// Cache generation values.
// cachegen increase by one with each commit operation.
// new nodes are tagged with the current generation and unloaded
// when their generation is older than than cachegen-cachelimit.
cachegen, cachelimit uint16
}
// SetCacheLimit sets the number of 'cache generations' to keep.
// A cache generations is created by a call to Commit.
func (t *Trie) SetCacheLimit(l uint16) {
t.cachelimit = l
}
// newFlag returns the cache flag value for a newly created node.
func (t *Trie) newFlag() nodeFlag {
return nodeFlag{dirty: true, gen: t.cachegen}
}
// New creates a trie with an existing root node from db.
@ -120,27 +137,25 @@ func (t *Trie) tryGet(origNode node, key []byte, pos int) (value []byte, newnode
return nil, nil, false, nil
case valueNode:
return n, n, false, nil
case shortNode:
case *shortNode:
if len(key)-pos < len(n.Key) || !bytes.Equal(n.Key, key[pos:pos+len(n.Key)]) {
// key not found in trie
return nil, n, false, nil
}
value, newnode, didResolve, err = t.tryGet(n.Val, key, pos+len(n.Key))
if err == nil && didResolve {
n = n.copy()
n.Val = newnode
return value, n, didResolve, err
} else {
return value, origNode, didResolve, err
}
case fullNode:
child := n.Children[key[pos]]
value, newnode, didResolve, err = t.tryGet(child, key, pos+1)
return value, n, didResolve, err
case *fullNode:
value, newnode, didResolve, err = t.tryGet(n.Children[key[pos]], key, pos+1)
if err == nil && didResolve {
n = n.copy()
n.Children[key[pos]] = newnode
return value, n, didResolve, err
} else {
return value, origNode, didResolve, err
}
return value, n, didResolve, err
case hashNode:
child, err := t.resolveHash(n, key[:pos], key[pos:])
if err != nil {
@ -199,22 +214,19 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error
return true, value, nil
}
switch n := n.(type) {
case shortNode:
case *shortNode:
matchlen := prefixLen(key, n.Key)
// If the whole key matches, keep this short node as is
// and only update the value.
if matchlen == len(n.Key) {
dirty, nn, err := t.insert(n.Val, append(prefix, key[:matchlen]...), key[matchlen:], value)
if err != nil {
return false, nil, err
if !dirty || err != nil {
return false, n, err
}
if !dirty {
return false, n, nil
}
return true, shortNode{n.Key, nn, nil, true}, nil
return true, &shortNode{n.Key, nn, t.newFlag()}, nil
}
// Otherwise branch out at the index where they differ.
branch := fullNode{dirty: true}
branch := &fullNode{flags: t.newFlag()}
var err error
_, branch.Children[n.Key[matchlen]], err = t.insert(nil, append(prefix, n.Key[:matchlen+1]...), n.Key[matchlen+1:], n.Val)
if err != nil {
@ -229,21 +241,19 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error
return true, branch, nil
}
// Otherwise, replace it with a short node leading up to the branch.
return true, shortNode{key[:matchlen], branch, nil, true}, nil
return true, &shortNode{key[:matchlen], branch, t.newFlag()}, nil
case fullNode:
case *fullNode:
dirty, nn, err := t.insert(n.Children[key[0]], append(prefix, key[0]), key[1:], value)
if err != nil {
return false, nil, err
if !dirty || err != nil {
return false, n, err
}
if !dirty {
return false, n, nil
}
n.Children[key[0]], n.hash, n.dirty = nn, nil, true
n = n.copy()
n.Children[key[0]], n.flags.hash, n.flags.dirty = nn, nil, true
return true, n, nil
case nil:
return true, shortNode{key, value, nil, true}, nil
return true, &shortNode{key, value, t.newFlag()}, nil
case hashNode:
// We've hit a part of the trie that isn't loaded yet. Load
@ -254,11 +264,8 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error
return false, nil, err
}
dirty, nn, err := t.insert(rn, prefix, key, value)
if err != nil {
return false, nil, err
}
if !dirty {
return false, rn, nil
if !dirty || err != nil {
return false, rn, err
}
return true, nn, nil
@ -291,7 +298,7 @@ func (t *Trie) TryDelete(key []byte) error {
// nodes on the way up after deleting recursively.
func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) {
switch n := n.(type) {
case shortNode:
case *shortNode:
matchlen := prefixLen(key, n.Key)
if matchlen < len(n.Key) {
return false, n, nil // don't replace n on mismatch
@ -304,34 +311,29 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) {
// subtrie must contain at least two other values with keys
// longer than n.Key.
dirty, child, err := t.delete(n.Val, append(prefix, key[:len(n.Key)]...), key[len(n.Key):])
if err != nil {
return false, nil, err
}
if !dirty {
return false, n, nil
if !dirty || err != nil {
return false, n, err
}
switch child := child.(type) {
case shortNode:
case *shortNode:
// Deleting from the subtrie reduced it to another
// short node. Merge the nodes to avoid creating a
// shortNode{..., shortNode{...}}. Use concat (which
// always creates a new slice) instead of append to
// avoid modifying n.Key since it might be shared with
// other nodes.
return true, shortNode{concat(n.Key, child.Key...), child.Val, nil, true}, nil
return true, &shortNode{concat(n.Key, child.Key...), child.Val, t.newFlag()}, nil
default:
return true, shortNode{n.Key, child, nil, true}, nil
return true, &shortNode{n.Key, child, t.newFlag()}, nil
}
case fullNode:
case *fullNode:
dirty, nn, err := t.delete(n.Children[key[0]], append(prefix, key[0]), key[1:])
if err != nil {
return false, nil, err
if !dirty || err != nil {
return false, n, err
}
if !dirty {
return false, n, nil
}
n.Children[key[0]], n.hash, n.dirty = nn, nil, true
n = n.copy()
n.Children[key[0]], n.flags.hash, n.flags.dirty = nn, nil, true
// Check how many non-nil entries are left after deleting and
// reduce the full node to a short node if only one entry is
@ -365,14 +367,14 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) {
if err != nil {
return false, nil, err
}
if cnode, ok := cnode.(shortNode); ok {
if cnode, ok := cnode.(*shortNode); ok {
k := append([]byte{byte(pos)}, cnode.Key...)
return true, shortNode{k, cnode.Val, nil, true}, nil
return true, &shortNode{k, cnode.Val, t.newFlag()}, nil
}
}
// Otherwise, n is replaced by a one-nibble short node
// containing the child.
return true, shortNode{[]byte{byte(pos)}, n.Children[pos], nil, true}, nil
return true, &shortNode{[]byte{byte(pos)}, n.Children[pos], t.newFlag()}, nil
}
// n still contains at least two values and cannot be reduced.
return true, n, nil
@ -392,11 +394,8 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) {
return false, nil, err
}
dirty, nn, err := t.delete(rn, prefix, key)
if err != nil {
return false, nil, err
}
if !dirty {
return false, rn, nil
if !dirty || err != nil {
return false, rn, err
}
return true, nn, nil
@ -471,6 +470,7 @@ func (t *Trie) CommitTo(db DatabaseWriter) (root common.Hash, err error) {
return (common.Hash{}), err
}
t.root = cached
t.cachegen++
return common.BytesToHash(hash.(hashNode)), nil
}
@ -478,7 +478,7 @@ func (t *Trie) hashRoot(db DatabaseWriter) (node, node, error) {
if t.root == nil {
return hashNode(emptyRoot.Bytes()), nil, nil
}
h := newHasher()
h := newHasher(t.cachegen, t.cachelimit)
defer returnHasherToPool(h)
return h.hash(t.root, db, true)
}

View File

@ -21,6 +21,9 @@ g := metrics.NewGauge()
metrics.Register("bar", g)
g.Update(47)
r := NewRegistry()
g := metrics.NewRegisteredFunctionalGauge("cache-evictions", r, func() int64 { return cache.getEvictionsCount() })
s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028)
h := metrics.NewHistogram(s)
metrics.Register("baz", h)
@ -36,6 +39,15 @@ t.Time(func() {})
t.Update(47)
```
Register() is not threadsafe. For threadsafe metric registration use
GetOrRegister:
```
t := metrics.GetOrRegisterTimer("account.create.latency", nil)
t.Time(func() {})
t.Update(47)
```
Periodically log every metric in human-readable form to standard error:
```go
@ -67,7 +79,7 @@ issues [#121](https://github.com/rcrowley/go-metrics/issues/121) and
[#124](https://github.com/rcrowley/go-metrics/issues/124) for progress and details.
```go
import "github.com/rcrowley/go-metrics/influxdb"
import "github.com/vrischmann/go-metrics-influxdb"
go influxdb.Influxdb(metrics.DefaultRegistry, 10e9, &influxdb.Config{
Host: "127.0.0.1:8086",
@ -137,3 +149,5 @@ Clients are available for the following destinations:
* Librato - [https://github.com/mihasya/go-metrics-librato](https://github.com/mihasya/go-metrics-librato)
* Graphite - [https://github.com/cyberdelia/go-metrics-graphite](https://github.com/cyberdelia/go-metrics-graphite)
* InfluxDB - [https://github.com/vrischmann/go-metrics-influxdb](https://github.com/vrischmann/go-metrics-influxdb)
* Ganglia - [https://github.com/appscode/metlia](https://github.com/appscode/metlia)
* Prometheus - [https://github.com/deathowl/go-metrics-prometheus](https://github.com/deathowl/go-metrics-prometheus)

View File

@ -0,0 +1,20 @@
package main
import (
"fmt"
"github.com/rcrowley/go-metrics"
"time"
)
func main() {
r := metrics.NewRegistry()
for i := 0; i < 10000; i++ {
r.Register(fmt.Sprintf("counter-%d", i), metrics.NewCounter())
r.Register(fmt.Sprintf("gauge-%d", i), metrics.NewGauge())
r.Register(fmt.Sprintf("gaugefloat64-%d", i), metrics.NewGaugeFloat64())
r.Register(fmt.Sprintf("histogram-uniform-%d", i), metrics.NewHistogram(metrics.NewUniformSample(1028)))
r.Register(fmt.Sprintf("histogram-exp-%d", i), metrics.NewHistogram(metrics.NewExpDecaySample(1028, 0.015)))
r.Register(fmt.Sprintf("meter-%d", i), metrics.NewMeter())
}
time.Sleep(600e9)
}

View File

@ -0,0 +1,154 @@
package main
import (
"errors"
"github.com/rcrowley/go-metrics"
// "github.com/rcrowley/go-metrics/stathat"
"log"
"math/rand"
"os"
// "syslog"
"time"
)
const fanout = 10
func main() {
r := metrics.NewRegistry()
c := metrics.NewCounter()
r.Register("foo", c)
for i := 0; i < fanout; i++ {
go func() {
for {
c.Dec(19)
time.Sleep(300e6)
}
}()
go func() {
for {
c.Inc(47)
time.Sleep(400e6)
}
}()
}
g := metrics.NewGauge()
r.Register("bar", g)
for i := 0; i < fanout; i++ {
go func() {
for {
g.Update(19)
time.Sleep(300e6)
}
}()
go func() {
for {
g.Update(47)
time.Sleep(400e6)
}
}()
}
gf := metrics.NewGaugeFloat64()
r.Register("barfloat64", gf)
for i := 0; i < fanout; i++ {
go func() {
for {
g.Update(19.0)
time.Sleep(300e6)
}
}()
go func() {
for {
g.Update(47.0)
time.Sleep(400e6)
}
}()
}
hc := metrics.NewHealthcheck(func(h metrics.Healthcheck) {
if 0 < rand.Intn(2) {
h.Healthy()
} else {
h.Unhealthy(errors.New("baz"))
}
})
r.Register("baz", hc)
s := metrics.NewExpDecaySample(1028, 0.015)
//s := metrics.NewUniformSample(1028)
h := metrics.NewHistogram(s)
r.Register("bang", h)
for i := 0; i < fanout; i++ {
go func() {
for {
h.Update(19)
time.Sleep(300e6)
}
}()
go func() {
for {
h.Update(47)
time.Sleep(400e6)
}
}()
}
m := metrics.NewMeter()
r.Register("quux", m)
for i := 0; i < fanout; i++ {
go func() {
for {
m.Mark(19)
time.Sleep(300e6)
}
}()
go func() {
for {
m.Mark(47)
time.Sleep(400e6)
}
}()
}
t := metrics.NewTimer()
r.Register("hooah", t)
for i := 0; i < fanout; i++ {
go func() {
for {
t.Time(func() { time.Sleep(300e6) })
}
}()
go func() {
for {
t.Time(func() { time.Sleep(400e6) })
}
}()
}
metrics.RegisterDebugGCStats(r)
go metrics.CaptureDebugGCStats(r, 5e9)
metrics.RegisterRuntimeMemStats(r)
go metrics.CaptureRuntimeMemStats(r, 5e9)
metrics.Log(r, 60e9, log.New(os.Stderr, "metrics: ", log.Lmicroseconds))
/*
w, err := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics")
if nil != err { log.Fatalln(err) }
metrics.Syslog(r, 60e9, w)
*/
/*
addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003")
metrics.Graphite(r, 10e9, "metrics", addr)
*/
/*
stathat.Stathat(r, 10e9, "example@example.com")
*/
}

View File

@ -0,0 +1,22 @@
package main
import (
"log"
"net"
)
func main() {
addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003")
l, err := net.ListenTCP("tcp", addr)
if nil != err {
log.Fatalln(err)
}
log.Println("listening", l.Addr())
for {
c, err := l.AcceptTCP()
if nil != err {
log.Fatalln(err)
}
log.Println("accepted", c.RemoteAddr())
}
}

156
vendor/github.com/rcrowley/go-metrics/exp/exp.go generated vendored Normal file
View File

@ -0,0 +1,156 @@
// Hook go-metrics into expvar
// on any /debug/metrics request, load all vars from the registry into expvar, and execute regular expvar handler
package exp
import (
"expvar"
"fmt"
"net/http"
"sync"
"github.com/rcrowley/go-metrics"
)
type exp struct {
expvarLock sync.Mutex // expvar panics if you try to register the same var twice, so we must probe it safely
registry metrics.Registry
}
func (exp *exp) expHandler(w http.ResponseWriter, r *http.Request) {
// load our variables into expvar
exp.syncToExpvar()
// now just run the official expvar handler code (which is not publicly callable, so pasted inline)
w.Header().Set("Content-Type", "application/json; charset=utf-8")
fmt.Fprintf(w, "{\n")
first := true
expvar.Do(func(kv expvar.KeyValue) {
if !first {
fmt.Fprintf(w, ",\n")
}
first = false
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
})
fmt.Fprintf(w, "\n}\n")
}
// Exp will register an expvar powered metrics handler with http.DefaultServeMux on "/debug/vars"
func Exp(r metrics.Registry) {
h := ExpHandler(r)
// this would cause a panic:
// panic: http: multiple registrations for /debug/vars
// http.HandleFunc("/debug/vars", e.expHandler)
// haven't found an elegant way, so just use a different endpoint
http.Handle("/debug/metrics", h)
}
// ExpHandler will return an expvar powered metrics handler.
func ExpHandler(r metrics.Registry) http.Handler {
e := exp{sync.Mutex{}, r}
return http.HandlerFunc(e.expHandler)
}
func (exp *exp) getInt(name string) *expvar.Int {
var v *expvar.Int
exp.expvarLock.Lock()
p := expvar.Get(name)
if p != nil {
v = p.(*expvar.Int)
} else {
v = new(expvar.Int)
expvar.Publish(name, v)
}
exp.expvarLock.Unlock()
return v
}
func (exp *exp) getFloat(name string) *expvar.Float {
var v *expvar.Float
exp.expvarLock.Lock()
p := expvar.Get(name)
if p != nil {
v = p.(*expvar.Float)
} else {
v = new(expvar.Float)
expvar.Publish(name, v)
}
exp.expvarLock.Unlock()
return v
}
func (exp *exp) publishCounter(name string, metric metrics.Counter) {
v := exp.getInt(name)
v.Set(metric.Count())
}
func (exp *exp) publishGauge(name string, metric metrics.Gauge) {
v := exp.getInt(name)
v.Set(metric.Value())
}
func (exp *exp) publishGaugeFloat64(name string, metric metrics.GaugeFloat64) {
exp.getFloat(name).Set(metric.Value())
}
func (exp *exp) publishHistogram(name string, metric metrics.Histogram) {
h := metric.Snapshot()
ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
exp.getInt(name + ".count").Set(h.Count())
exp.getFloat(name + ".min").Set(float64(h.Min()))
exp.getFloat(name + ".max").Set(float64(h.Max()))
exp.getFloat(name + ".mean").Set(float64(h.Mean()))
exp.getFloat(name + ".std-dev").Set(float64(h.StdDev()))
exp.getFloat(name + ".50-percentile").Set(float64(ps[0]))
exp.getFloat(name + ".75-percentile").Set(float64(ps[1]))
exp.getFloat(name + ".95-percentile").Set(float64(ps[2]))
exp.getFloat(name + ".99-percentile").Set(float64(ps[3]))
exp.getFloat(name + ".999-percentile").Set(float64(ps[4]))
}
func (exp *exp) publishMeter(name string, metric metrics.Meter) {
m := metric.Snapshot()
exp.getInt(name + ".count").Set(m.Count())
exp.getFloat(name + ".one-minute").Set(float64(m.Rate1()))
exp.getFloat(name + ".five-minute").Set(float64(m.Rate5()))
exp.getFloat(name + ".fifteen-minute").Set(float64((m.Rate15())))
exp.getFloat(name + ".mean").Set(float64(m.RateMean()))
}
func (exp *exp) publishTimer(name string, metric metrics.Timer) {
t := metric.Snapshot()
ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
exp.getInt(name + ".count").Set(t.Count())
exp.getFloat(name + ".min").Set(float64(t.Min()))
exp.getFloat(name + ".max").Set(float64(t.Max()))
exp.getFloat(name + ".mean").Set(float64(t.Mean()))
exp.getFloat(name + ".std-dev").Set(float64(t.StdDev()))
exp.getFloat(name + ".50-percentile").Set(float64(ps[0]))
exp.getFloat(name + ".75-percentile").Set(float64(ps[1]))
exp.getFloat(name + ".95-percentile").Set(float64(ps[2]))
exp.getFloat(name + ".99-percentile").Set(float64(ps[3]))
exp.getFloat(name + ".999-percentile").Set(float64(ps[4]))
exp.getFloat(name + ".one-minute").Set(float64(t.Rate1()))
exp.getFloat(name + ".five-minute").Set(float64(t.Rate5()))
exp.getFloat(name + ".fifteen-minute").Set(float64((t.Rate15())))
exp.getFloat(name + ".mean-rate").Set(float64(t.RateMean()))
}
func (exp *exp) syncToExpvar() {
exp.registry.Each(func(name string, i interface{}) {
switch i.(type) {
case metrics.Counter:
exp.publishCounter(name, i.(metrics.Counter))
case metrics.Gauge:
exp.publishGauge(name, i.(metrics.Gauge))
case metrics.GaugeFloat64:
exp.publishGaugeFloat64(name, i.(metrics.GaugeFloat64))
case metrics.Histogram:
exp.publishHistogram(name, i.(metrics.Histogram))
case metrics.Meter:
exp.publishMeter(name, i.(metrics.Meter))
case metrics.Timer:
exp.publishTimer(name, i.(metrics.Timer))
default:
panic(fmt.Sprintf("unsupported type for '%s': %T", name, i))
}
})
}

View File

@ -36,6 +36,25 @@ func NewRegisteredGauge(name string, r Registry) Gauge {
return c
}
// NewFunctionalGauge constructs a new FunctionalGauge.
func NewFunctionalGauge(f func() int64) Gauge {
if UseNilMetrics {
return NilGauge{}
}
return &FunctionalGauge{value: f}
}
// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
func NewRegisteredFunctionalGauge(name string, r Registry, f func() int64) Gauge {
c := NewFunctionalGauge(f)
if nil == r {
r = DefaultRegistry
}
r.Register(name, c)
return c
}
// GaugeSnapshot is a read-only copy of another Gauge.
type GaugeSnapshot int64
@ -82,3 +101,20 @@ func (g *StandardGauge) Update(v int64) {
func (g *StandardGauge) Value() int64 {
return atomic.LoadInt64(&g.value)
}
// FunctionalGauge returns value from given function
type FunctionalGauge struct {
value func() int64
}
// Value returns the gauge's current value.
func (g FunctionalGauge) Value() int64 {
return g.value()
}
// Snapshot returns the snapshot.
func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) }
// Update panics.
func (FunctionalGauge) Update(int64) {
panic("Update called on a FunctionalGauge")
}

View File

@ -38,6 +38,24 @@ func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 {
return c
}
// NewFunctionalGauge constructs a new FunctionalGauge.
func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 {
if UseNilMetrics {
return NilGaugeFloat64{}
}
return &FunctionalGaugeFloat64{value: f}
}
// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
func NewRegisteredFunctionalGaugeFloat64(name string, r Registry, f func() float64) GaugeFloat64 {
c := NewFunctionalGaugeFloat64(f)
if nil == r {
r = DefaultRegistry
}
r.Register(name, c)
return c
}
// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64.
type GaugeFloat64Snapshot float64
@ -89,3 +107,21 @@ func (g *StandardGaugeFloat64) Value() float64 {
defer g.mutex.Unlock()
return g.value
}
// FunctionalGaugeFloat64 returns value from given function
type FunctionalGaugeFloat64 struct {
value func() float64
}
// Value returns the gauge's current value.
func (g FunctionalGaugeFloat64) Value() float64 {
return g.value()
}
// Snapshot returns the snapshot.
func (g FunctionalGaugeFloat64) Snapshot() GaugeFloat64 { return GaugeFloat64Snapshot(g.Value()) }
// Update panics.
func (FunctionalGaugeFloat64) Update(float64) {
panic("Update called on a FunctionalGaugeFloat64")
}

View File

@ -81,3 +81,7 @@ func WriteJSON(r Registry, d time.Duration, w io.Writer) {
func WriteJSONOnce(r Registry, w io.Writer) {
json.NewEncoder(w).Encode(r)
}
func (p *PrefixedRegistry) MarshalJSON() ([]byte, error) {
return json.Marshal(p.underlying)
}

102
vendor/github.com/rcrowley/go-metrics/librato/client.go generated vendored Normal file
View File

@ -0,0 +1,102 @@
package librato
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
)
const Operations = "operations"
const OperationsShort = "ops"
type LibratoClient struct {
Email, Token string
}
// property strings
const (
// display attributes
Color = "color"
DisplayMax = "display_max"
DisplayMin = "display_min"
DisplayUnitsLong = "display_units_long"
DisplayUnitsShort = "display_units_short"
DisplayStacked = "display_stacked"
DisplayTransform = "display_transform"
// special gauge display attributes
SummarizeFunction = "summarize_function"
Aggregate = "aggregate"
// metric keys
Name = "name"
Period = "period"
Description = "description"
DisplayName = "display_name"
Attributes = "attributes"
// measurement keys
MeasureTime = "measure_time"
Source = "source"
Value = "value"
// special gauge keys
Count = "count"
Sum = "sum"
Max = "max"
Min = "min"
SumSquares = "sum_squares"
// batch keys
Counters = "counters"
Gauges = "gauges"
MetricsPostUrl = "https://metrics-api.librato.com/v1/metrics"
)
type Measurement map[string]interface{}
type Metric map[string]interface{}
type Batch struct {
Gauges []Measurement `json:"gauges,omitempty"`
Counters []Measurement `json:"counters,omitempty"`
MeasureTime int64 `json:"measure_time"`
Source string `json:"source"`
}
func (self *LibratoClient) PostMetrics(batch Batch) (err error) {
var (
js []byte
req *http.Request
resp *http.Response
)
if len(batch.Counters) == 0 && len(batch.Gauges) == 0 {
return nil
}
if js, err = json.Marshal(batch); err != nil {
return
}
if req, err = http.NewRequest("POST", MetricsPostUrl, bytes.NewBuffer(js)); err != nil {
return
}
req.Header.Set("Content-Type", "application/json")
req.SetBasicAuth(self.Email, self.Token)
if resp, err = http.DefaultClient.Do(req); err != nil {
return
}
if resp.StatusCode != http.StatusOK {
var body []byte
if body, err = ioutil.ReadAll(resp.Body); err != nil {
body = []byte(fmt.Sprintf("(could not fetch response body for error: %s)", err))
}
err = fmt.Errorf("Unable to post to Librato: %d %s %s", resp.StatusCode, resp.Status, string(body))
}
return
}

View File

@ -0,0 +1,235 @@
package librato
import (
"fmt"
"log"
"math"
"regexp"
"time"
"github.com/rcrowley/go-metrics"
)
// a regexp for extracting the unit from time.Duration.String
var unitRegexp = regexp.MustCompile("[^\\d]+$")
// a helper that turns a time.Duration into librato display attributes for timer metrics
func translateTimerAttributes(d time.Duration) (attrs map[string]interface{}) {
attrs = make(map[string]interface{})
attrs[DisplayTransform] = fmt.Sprintf("x/%d", int64(d))
attrs[DisplayUnitsShort] = string(unitRegexp.Find([]byte(d.String())))
return
}
type Reporter struct {
Email, Token string
Namespace string
Source string
Interval time.Duration
Registry metrics.Registry
Percentiles []float64 // percentiles to report on histogram metrics
TimerAttributes map[string]interface{} // units in which timers will be displayed
intervalSec int64
}
func NewReporter(r metrics.Registry, d time.Duration, e string, t string, s string, p []float64, u time.Duration) *Reporter {
return &Reporter{e, t, "", s, d, r, p, translateTimerAttributes(u), int64(d / time.Second)}
}
func Librato(r metrics.Registry, d time.Duration, e string, t string, s string, p []float64, u time.Duration) {
NewReporter(r, d, e, t, s, p, u).Run()
}
func (self *Reporter) Run() {
log.Printf("WARNING: This client has been DEPRECATED! It has been moved to https://github.com/mihasya/go-metrics-librato and will be removed from rcrowley/go-metrics on August 5th 2015")
ticker := time.Tick(self.Interval)
metricsApi := &LibratoClient{self.Email, self.Token}
for now := range ticker {
var metrics Batch
var err error
if metrics, err = self.BuildRequest(now, self.Registry); err != nil {
log.Printf("ERROR constructing librato request body %s", err)
continue
}
if err := metricsApi.PostMetrics(metrics); err != nil {
log.Printf("ERROR sending metrics to librato %s", err)
continue
}
}
}
// calculate sum of squares from data provided by metrics.Histogram
// see http://en.wikipedia.org/wiki/Standard_deviation#Rapid_calculation_methods
func sumSquares(s metrics.Sample) float64 {
count := float64(s.Count())
sumSquared := math.Pow(count*s.Mean(), 2)
sumSquares := math.Pow(count*s.StdDev(), 2) + sumSquared/count
if math.IsNaN(sumSquares) {
return 0.0
}
return sumSquares
}
func sumSquaresTimer(t metrics.Timer) float64 {
count := float64(t.Count())
sumSquared := math.Pow(count*t.Mean(), 2)
sumSquares := math.Pow(count*t.StdDev(), 2) + sumSquared/count
if math.IsNaN(sumSquares) {
return 0.0
}
return sumSquares
}
func (self *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot Batch, err error) {
snapshot = Batch{
// coerce timestamps to a stepping fn so that they line up in Librato graphs
MeasureTime: (now.Unix() / self.intervalSec) * self.intervalSec,
Source: self.Source,
}
snapshot.Gauges = make([]Measurement, 0)
snapshot.Counters = make([]Measurement, 0)
histogramGaugeCount := 1 + len(self.Percentiles)
r.Each(func(name string, metric interface{}) {
if self.Namespace != "" {
name = fmt.Sprintf("%s.%s", self.Namespace, name)
}
measurement := Measurement{}
measurement[Period] = self.Interval.Seconds()
switch m := metric.(type) {
case metrics.Counter:
if m.Count() > 0 {
measurement[Name] = fmt.Sprintf("%s.%s", name, "count")
measurement[Value] = float64(m.Count())
measurement[Attributes] = map[string]interface{}{
DisplayUnitsLong: Operations,
DisplayUnitsShort: OperationsShort,
DisplayMin: "0",
}
snapshot.Counters = append(snapshot.Counters, measurement)
}
case metrics.Gauge:
measurement[Name] = name
measurement[Value] = float64(m.Value())
snapshot.Gauges = append(snapshot.Gauges, measurement)
case metrics.GaugeFloat64:
measurement[Name] = name
measurement[Value] = float64(m.Value())
snapshot.Gauges = append(snapshot.Gauges, measurement)
case metrics.Histogram:
if m.Count() > 0 {
gauges := make([]Measurement, histogramGaugeCount, histogramGaugeCount)
s := m.Sample()
measurement[Name] = fmt.Sprintf("%s.%s", name, "hist")
measurement[Count] = uint64(s.Count())
measurement[Max] = float64(s.Max())
measurement[Min] = float64(s.Min())
measurement[Sum] = float64(s.Sum())
measurement[SumSquares] = sumSquares(s)
gauges[0] = measurement
for i, p := range self.Percentiles {
gauges[i+1] = Measurement{
Name: fmt.Sprintf("%s.%.2f", measurement[Name], p),
Value: s.Percentile(p),
Period: measurement[Period],
}
}
snapshot.Gauges = append(snapshot.Gauges, gauges...)
}
case metrics.Meter:
measurement[Name] = name
measurement[Value] = float64(m.Count())
snapshot.Counters = append(snapshot.Counters, measurement)
snapshot.Gauges = append(snapshot.Gauges,
Measurement{
Name: fmt.Sprintf("%s.%s", name, "1min"),
Value: m.Rate1(),
Period: int64(self.Interval.Seconds()),
Attributes: map[string]interface{}{
DisplayUnitsLong: Operations,
DisplayUnitsShort: OperationsShort,
DisplayMin: "0",
},
},
Measurement{
Name: fmt.Sprintf("%s.%s", name, "5min"),
Value: m.Rate5(),
Period: int64(self.Interval.Seconds()),
Attributes: map[string]interface{}{
DisplayUnitsLong: Operations,
DisplayUnitsShort: OperationsShort,
DisplayMin: "0",
},
},
Measurement{
Name: fmt.Sprintf("%s.%s", name, "15min"),
Value: m.Rate15(),
Period: int64(self.Interval.Seconds()),
Attributes: map[string]interface{}{
DisplayUnitsLong: Operations,
DisplayUnitsShort: OperationsShort,
DisplayMin: "0",
},
},
)
case metrics.Timer:
measurement[Name] = name
measurement[Value] = float64(m.Count())
snapshot.Counters = append(snapshot.Counters, measurement)
if m.Count() > 0 {
libratoName := fmt.Sprintf("%s.%s", name, "timer.mean")
gauges := make([]Measurement, histogramGaugeCount, histogramGaugeCount)
gauges[0] = Measurement{
Name: libratoName,
Count: uint64(m.Count()),
Sum: m.Mean() * float64(m.Count()),
Max: float64(m.Max()),
Min: float64(m.Min()),
SumSquares: sumSquaresTimer(m),
Period: int64(self.Interval.Seconds()),
Attributes: self.TimerAttributes,
}
for i, p := range self.Percentiles {
gauges[i+1] = Measurement{
Name: fmt.Sprintf("%s.timer.%2.0f", name, p*100),
Value: m.Percentile(p),
Period: int64(self.Interval.Seconds()),
Attributes: self.TimerAttributes,
}
}
snapshot.Gauges = append(snapshot.Gauges, gauges...)
snapshot.Gauges = append(snapshot.Gauges,
Measurement{
Name: fmt.Sprintf("%s.%s", name, "rate.1min"),
Value: m.Rate1(),
Period: int64(self.Interval.Seconds()),
Attributes: map[string]interface{}{
DisplayUnitsLong: Operations,
DisplayUnitsShort: OperationsShort,
DisplayMin: "0",
},
},
Measurement{
Name: fmt.Sprintf("%s.%s", name, "rate.5min"),
Value: m.Rate5(),
Period: int64(self.Interval.Seconds()),
Attributes: map[string]interface{}{
DisplayUnitsLong: Operations,
DisplayUnitsShort: OperationsShort,
DisplayMin: "0",
},
},
Measurement{
Name: fmt.Sprintf("%s.%s", name, "rate.15min"),
Value: m.Rate15(),
Period: int64(self.Interval.Seconds()),
Attributes: map[string]interface{}{
DisplayUnitsLong: Operations,
DisplayUnitsShort: OperationsShort,
DisplayMin: "0",
},
},
)
}
}
})
return
}

View File

@ -1,17 +1,20 @@
package metrics
import (
"log"
"time"
)
func Log(r Registry, freq time.Duration, l *log.Logger) {
type Logger interface {
Printf(format string, v ...interface{})
}
func Log(r Registry, freq time.Duration, l Logger) {
LogScaled(r, freq, time.Nanosecond, l)
}
// Output each metric in the given registry periodically using the given
// logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos.
func LogScaled(r Registry, freq time.Duration, scale time.Duration, l *log.Logger) {
func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) {
du := float64(scale)
duSuffix := scale.String()[1:]

View File

@ -3,6 +3,7 @@ package metrics
import (
"fmt"
"reflect"
"strings"
"sync"
)
@ -166,12 +167,34 @@ func NewPrefixedChildRegistry(parent Registry, prefix string) Registry {
// Call the given function for each registered metric.
func (r *PrefixedRegistry) Each(fn func(string, interface{})) {
r.underlying.Each(fn)
wrappedFn := func (prefix string) func(string, interface{}) {
return func(name string, iface interface{}) {
if strings.HasPrefix(name,prefix) {
fn(name, iface)
} else {
return
}
}
}
baseRegistry, prefix := findPrefix(r, "")
baseRegistry.Each(wrappedFn(prefix))
}
func findPrefix(registry Registry, prefix string) (Registry, string) {
switch r := registry.(type) {
case *PrefixedRegistry:
return findPrefix(r.underlying, r.prefix + prefix)
case *StandardRegistry:
return r, prefix
}
return nil, ""
}
// Get the metric by the given name or nil if none is registered.
func (r *PrefixedRegistry) Get(name string) interface{} {
return r.underlying.Get(name)
realName := r.prefix + name
return r.underlying.Get(realName)
}
// Gets an existing metric or registers the given one.

View File

@ -0,0 +1,69 @@
// Metrics output to StatHat.
package stathat
import (
"github.com/rcrowley/go-metrics"
"github.com/stathat/go"
"log"
"time"
)
func Stathat(r metrics.Registry, d time.Duration, userkey string) {
for {
if err := sh(r, userkey); nil != err {
log.Println(err)
}
time.Sleep(d)
}
}
func sh(r metrics.Registry, userkey string) error {
r.Each(func(name string, i interface{}) {
switch metric := i.(type) {
case metrics.Counter:
stathat.PostEZCount(name, userkey, int(metric.Count()))
case metrics.Gauge:
stathat.PostEZValue(name, userkey, float64(metric.Value()))
case metrics.GaugeFloat64:
stathat.PostEZValue(name, userkey, float64(metric.Value()))
case metrics.Histogram:
h := metric.Snapshot()
ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
stathat.PostEZCount(name+".count", userkey, int(h.Count()))
stathat.PostEZValue(name+".min", userkey, float64(h.Min()))
stathat.PostEZValue(name+".max", userkey, float64(h.Max()))
stathat.PostEZValue(name+".mean", userkey, float64(h.Mean()))
stathat.PostEZValue(name+".std-dev", userkey, float64(h.StdDev()))
stathat.PostEZValue(name+".50-percentile", userkey, float64(ps[0]))
stathat.PostEZValue(name+".75-percentile", userkey, float64(ps[1]))
stathat.PostEZValue(name+".95-percentile", userkey, float64(ps[2]))
stathat.PostEZValue(name+".99-percentile", userkey, float64(ps[3]))
stathat.PostEZValue(name+".999-percentile", userkey, float64(ps[4]))
case metrics.Meter:
m := metric.Snapshot()
stathat.PostEZCount(name+".count", userkey, int(m.Count()))
stathat.PostEZValue(name+".one-minute", userkey, float64(m.Rate1()))
stathat.PostEZValue(name+".five-minute", userkey, float64(m.Rate5()))
stathat.PostEZValue(name+".fifteen-minute", userkey, float64(m.Rate15()))
stathat.PostEZValue(name+".mean", userkey, float64(m.RateMean()))
case metrics.Timer:
t := metric.Snapshot()
ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
stathat.PostEZCount(name+".count", userkey, int(t.Count()))
stathat.PostEZValue(name+".min", userkey, float64(t.Min()))
stathat.PostEZValue(name+".max", userkey, float64(t.Max()))
stathat.PostEZValue(name+".mean", userkey, float64(t.Mean()))
stathat.PostEZValue(name+".std-dev", userkey, float64(t.StdDev()))
stathat.PostEZValue(name+".50-percentile", userkey, float64(ps[0]))
stathat.PostEZValue(name+".75-percentile", userkey, float64(ps[1]))
stathat.PostEZValue(name+".95-percentile", userkey, float64(ps[2]))
stathat.PostEZValue(name+".99-percentile", userkey, float64(ps[3]))
stathat.PostEZValue(name+".999-percentile", userkey, float64(ps[4]))
stathat.PostEZValue(name+".one-minute", userkey, float64(t.Rate1()))
stathat.PostEZValue(name+".five-minute", userkey, float64(t.Rate5()))
stathat.PostEZValue(name+".fifteen-minute", userkey, float64(t.Rate15()))
stathat.PostEZValue(name+".mean-rate", userkey, float64(t.RateMean()))
}
})
return nil
}