update go-ethereum to 1.8.17

This commit is contained in:
b00ris 2018-10-28 12:05:51 +03:00
parent 14e1bbfd9b
commit 64c74e62c1
226 changed files with 8624 additions and 5017 deletions

12
Gopkg.lock generated
View File

@ -26,11 +26,12 @@
version = "v1.7.1" version = "v1.7.1"
[[projects]] [[projects]]
digest = "1:1dc638d013f2a6f6ae13568584e8fee7e172cbf295e82064ded18bcf6d63c04c" digest = "1:7957b1d07e06b705902c0187bb2b20eaf04cae8e00d170f2b15e0a525bc2b52b"
name = "github.com/ethereum/go-ethereum" name = "github.com/ethereum/go-ethereum"
packages = [ packages = [
".", ".",
"common", "common",
"common/bitutil",
"common/hexutil", "common/hexutil",
"common/math", "common/math",
"common/mclock", "common/mclock",
@ -47,18 +48,19 @@
"p2p", "p2p",
"p2p/discover", "p2p/discover",
"p2p/discv5", "p2p/discv5",
"p2p/enode",
"p2p/enr",
"p2p/nat", "p2p/nat",
"p2p/netutil", "p2p/netutil",
"params", "params",
"rlp", "rlp",
"rpc", "rpc",
"trie", "trie",
"whisper/whisperv6",
] ]
pruneopts = "T" pruneopts = "T"
revision = "31f0afca344c53678e98032d6306e0e4ff7b9be1" revision = "b193c5ce7a45b2d1423e722cd3cf6c4409cb4f03"
source = "github.com/status-im/go-ethereum" source = "github.com/status-im/go-ethereum"
version = "v1.8.16" version = "v1.8.17"
[[projects]] [[projects]]
digest = "1:586ea76dbd0374d6fb649a91d70d652b7fe0ccffb8910a77468e7702e7901f3d" digest = "1:586ea76dbd0374d6fb649a91d70d652b7fe0ccffb8910a77468e7702e7901f3d"
@ -208,10 +210,10 @@
"github.com/ethereum/go-ethereum/metrics", "github.com/ethereum/go-ethereum/metrics",
"github.com/ethereum/go-ethereum/p2p", "github.com/ethereum/go-ethereum/p2p",
"github.com/ethereum/go-ethereum/p2p/discover", "github.com/ethereum/go-ethereum/p2p/discover",
"github.com/ethereum/go-ethereum/p2p/enode",
"github.com/ethereum/go-ethereum/p2p/nat", "github.com/ethereum/go-ethereum/p2p/nat",
"github.com/ethereum/go-ethereum/rlp", "github.com/ethereum/go-ethereum/rlp",
"github.com/ethereum/go-ethereum/rpc", "github.com/ethereum/go-ethereum/rpc",
"github.com/ethereum/go-ethereum/whisper/whisperv6",
"github.com/syndtr/goleveldb/leveldb/errors", "github.com/syndtr/goleveldb/leveldb/errors",
"golang.org/x/crypto/pbkdf2", "golang.org/x/crypto/pbkdf2",
"golang.org/x/sync/syncmap", "golang.org/x/sync/syncmap",

View File

@ -30,7 +30,7 @@
[[constraint]] [[constraint]]
name = "github.com/ethereum/go-ethereum" name = "github.com/ethereum/go-ethereum"
version = "=v1.8.16" version = ">v1.8.16"
source = "github.com/status-im/go-ethereum" source = "github.com/status-im/go-ethereum"
[[constraint]] [[constraint]]

View File

@ -27,6 +27,6 @@ swarm/services @zelig
swarm/state @justelad swarm/state @justelad
swarm/storage/encryption @gbalint @zelig @nagydani swarm/storage/encryption @gbalint @zelig @nagydani
swarm/storage/mock @janos swarm/storage/mock @janos
swarm/storage/mru @nolash swarm/storage/feed @nolash @jpeletier
swarm/testutil @lmars swarm/testutil @lmars
whisper/ @gballet @gluk256 whisper/ @gballet @gluk256

View File

@ -1,16 +1,40 @@
# Contributing
Thank you for considering to help out with the source code! We welcome
contributions from anyone on the internet, and are grateful for even the
smallest of fixes!
If you'd like to contribute to go-ethereum, please fork, fix, commit and send a
pull request for the maintainers to review and merge into the main code base. If
you wish to submit more complex changes though, please check up with the core
devs first on [our gitter channel](https://gitter.im/ethereum/go-ethereum) to
ensure those changes are in line with the general philosophy of the project
and/or get some early feedback which can make both your efforts much lighter as
well as our review and merge procedures quick and simple.
## Coding guidelines
Please make sure your contributions adhere to our coding guidelines:
* Code must adhere to the official Go
[formatting](https://golang.org/doc/effective_go.html#formatting) guidelines
(i.e. uses [gofmt](https://golang.org/cmd/gofmt/)).
* Code must be documented adhering to the official Go
[commentary](https://golang.org/doc/effective_go.html#commentary) guidelines.
* Pull requests need to be based on and opened against the `master` branch.
* Commit messages should be prefixed with the package(s) they modify.
* E.g. "eth, rpc: make trace configs optional"
## Can I have feature X ## Can I have feature X
Before you do a feature request please check and make sure that it isn't possible Before you submit a feature request, please check and make sure that it isn't
through some other means. The JavaScript enabled console is a powerful feature possible through some other means. The JavaScript-enabled console is a powerful
in the right hands. Please check our [Wiki page](https://github.com/ethereum/go-ethereum/wiki) for more info feature in the right hands. Please check our
[Wiki page](https://github.com/ethereum/go-ethereum/wiki) for more info
and help. and help.
## Contributing ## Configuration, dependencies, and tests
If you'd like to contribute to go-ethereum please fork, fix, commit and Please see the [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide)
send a pull request. Commits which do not comply with the coding standards for more details on configuring your environment, managing project dependencies
are ignored (use gofmt!). and testing procedures.
See [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide)
for more details on configuring your environment, testing, and
dependency management.

View File

@ -3,17 +3,6 @@ go_import_path: github.com/ethereum/go-ethereum
sudo: false sudo: false
matrix: matrix:
include: include:
- os: linux
dist: trusty
sudo: required
go: 1.9.x
script:
- sudo modprobe fuse
- sudo chmod 666 /dev/fuse
- sudo chown root:$USER /etc/fuse.conf
- go run build/ci.go install
- go run build/ci.go test -coverage $TEST_PACKAGES
- os: linux - os: linux
dist: trusty dist: trusty
sudo: required sudo: required
@ -56,7 +45,8 @@ matrix:
- go run build/ci.go lint - go run build/ci.go lint
# This builder does the Ubuntu PPA upload # This builder does the Ubuntu PPA upload
- os: linux - if: type = push
os: linux
dist: trusty dist: trusty
go: 1.11.x go: 1.11.x
env: env:
@ -74,7 +64,8 @@ matrix:
- go run build/ci.go debsrc -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>" -upload ppa:ethereum/ethereum - go run build/ci.go debsrc -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>" -upload ppa:ethereum/ethereum
# This builder does the Linux Azure uploads # This builder does the Linux Azure uploads
- os: linux - if: type = push
os: linux
dist: trusty dist: trusty
sudo: required sudo: required
go: 1.11.x go: 1.11.x
@ -107,7 +98,8 @@ matrix:
- go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
# This builder does the Linux Azure MIPS xgo uploads # This builder does the Linux Azure MIPS xgo uploads
- os: linux - if: type = push
os: linux
dist: trusty dist: trusty
services: services:
- docker - docker
@ -134,7 +126,8 @@ matrix:
- go run build/ci.go archive -arch mips64le -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - go run build/ci.go archive -arch mips64le -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
# This builder does the Android Maven and Azure uploads # This builder does the Android Maven and Azure uploads
- os: linux - if: type = push
os: linux
dist: trusty dist: trusty
addons: addons:
apt: apt:
@ -155,7 +148,7 @@ matrix:
git: git:
submodules: false # avoid cloning ethereum/tests submodules: false # avoid cloning ethereum/tests
before_install: before_install:
- curl https://storage.googleapis.com/golang/go1.11.linux-amd64.tar.gz | tar -xz - curl https://storage.googleapis.com/golang/go1.11.1.linux-amd64.tar.gz | tar -xz
- export PATH=`pwd`/go/bin:$PATH - export PATH=`pwd`/go/bin:$PATH
- export GOROOT=`pwd`/go - export GOROOT=`pwd`/go
- export GOPATH=$HOME/go - export GOPATH=$HOME/go
@ -171,7 +164,8 @@ matrix:
- go run build/ci.go aar -signer ANDROID_SIGNING_KEY -deploy https://oss.sonatype.org -upload gethstore/builds - go run build/ci.go aar -signer ANDROID_SIGNING_KEY -deploy https://oss.sonatype.org -upload gethstore/builds
# This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads # This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads
- os: osx - if: type = push
os: osx
go: 1.11.x go: 1.11.x
env: env:
- azure-osx - azure-osx
@ -199,7 +193,8 @@ matrix:
- go run build/ci.go xcode -signer IOS_SIGNING_KEY -deploy trunk -upload gethstore/builds - go run build/ci.go xcode -signer IOS_SIGNING_KEY -deploy trunk -upload gethstore/builds
# This builder does the Azure archive purges to avoid accumulating junk # This builder does the Azure archive purges to avoid accumulating junk
- os: linux - if: type = cron
os: linux
dist: trusty dist: trusty
go: 1.11.x go: 1.11.x
env: env:
@ -208,10 +203,3 @@ matrix:
submodules: false # avoid cloning ethereum/tests submodules: false # avoid cloning ethereum/tests
script: script:
- go run build/ci.go purge -store gethstore/builds -days 14 - go run build/ci.go purge -store gethstore/builds -days 14
notifications:
webhooks:
urls:
- https://webhooks.gitter.im/e/e09ccdce1048c5e03445
on_success: change
on_failure: always

View File

@ -57,6 +57,9 @@ devtools:
@type "solc" 2> /dev/null || echo 'Please install solc' @type "solc" 2> /dev/null || echo 'Please install solc'
@type "protoc" 2> /dev/null || echo 'Please install protoc' @type "protoc" 2> /dev/null || echo 'Please install protoc'
swarm-devtools:
env GOBIN= go install ./cmd/swarm/mimegen
# Cross Compilation Targets (xgo) # Cross Compilation Targets (xgo)
geth-cross: geth-linux geth-darwin geth-windows geth-android geth-ios geth-cross: geth-linux geth-darwin geth-windows geth-android geth-ios

View File

@ -1,8 +1,8 @@
diff --git c/p2p/peer.go w/p2p/peer.go diff --git a/p2p/peer.go b/p2p/peer.go
index 73e33418e..322268b28 100644 index af019d0..cfd63af 100644
--- c/p2p/peer.go --- a/p2p/peer.go
+++ w/p2p/peer.go +++ b/p2p/peer.go
@@ -22,6 +22,7 @@ import ( @@ -23,6 +23,7 @@ import (
"net" "net"
"sort" "sort"
"sync" "sync"
@ -10,7 +10,7 @@ index 73e33418e..322268b28 100644
"time" "time"
"github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/common/mclock"
@@ -38,7 +39,10 @@ const ( @@ -44,7 +45,10 @@ const (
snappyProtocolVersion = 5 snappyProtocolVersion = 5
@ -22,7 +22,7 @@ index 73e33418e..322268b28 100644
) )
const ( const (
@@ -100,6 +104,7 @@ type Peer struct { @@ -106,6 +110,7 @@ type Peer struct {
log log.Logger log log.Logger
created mclock.AbsTime created mclock.AbsTime
@ -30,7 +30,7 @@ index 73e33418e..322268b28 100644
wg sync.WaitGroup wg sync.WaitGroup
protoErr chan error protoErr chan error
closed chan struct{} closed chan struct{}
@@ -118,6 +123,11 @@ func NewPeer(id discover.NodeID, name string, caps []Cap) *Peer { @@ -125,6 +130,11 @@ func NewPeer(id enode.ID, name string, caps []Cap) *Peer {
return peer return peer
} }
@ -40,9 +40,9 @@ index 73e33418e..322268b28 100644
+} +}
+ +
// ID returns the node's public key. // ID returns the node's public key.
func (p *Peer) ID() discover.NodeID { func (p *Peer) ID() enode.ID {
return p.rw.id return p.rw.node.ID()
@@ -188,8 +198,10 @@ func (p *Peer) run() (remoteRequested bool, err error) { @@ -201,8 +211,10 @@ func (p *Peer) run() (remoteRequested bool, err error) {
readErr = make(chan error, 1) readErr = make(chan error, 1)
reason DiscReason // sent to the peer reason DiscReason // sent to the peer
) )
@ -55,7 +55,7 @@ index 73e33418e..322268b28 100644
go p.pingLoop() go p.pingLoop()
// Start all protocol handlers. // Start all protocol handlers.
@@ -248,7 +260,24 @@ func (p *Peer) pingLoop() { @@ -262,7 +274,24 @@ func (p *Peer) pingLoop() {
} }
} }
@ -81,7 +81,7 @@ index 73e33418e..322268b28 100644
defer p.wg.Done() defer p.wg.Done()
for { for {
msg, err := p.rw.ReadMsg() msg, err := p.rw.ReadMsg()
@@ -261,6 +290,7 @@ func (p *Peer) readLoop(errc chan<- error) { @@ -275,6 +304,7 @@ func (p *Peer) readLoop(errc chan<- error) {
errc <- err errc <- err
return return
} }
@ -89,11 +89,11 @@ index 73e33418e..322268b28 100644
} }
} }
diff --git c/p2p/server.go w/p2p/server.go diff --git a/p2p/server.go b/p2p/server.go
index c41d1dc15..04c6f7147 100644 index 40db758..8546b02 100644
--- c/p2p/server.go --- a/p2p/server.go
+++ w/p2p/server.go +++ b/p2p/server.go
@@ -45,7 +45,7 @@ const ( @@ -49,7 +49,7 @@ const (
// Maximum time allowed for reading a complete message. // Maximum time allowed for reading a complete message.
// This is effectively the amount of time a connection can be idle. // This is effectively the amount of time a connection can be idle.
@ -102,11 +102,11 @@ index c41d1dc15..04c6f7147 100644
// Maximum amount of time allowed for writing a complete message. // Maximum amount of time allowed for writing a complete message.
frameWriteTimeout = 20 * time.Second frameWriteTimeout = 20 * time.Second
diff --git c/whisper/whisperv6/peer.go w/whisper/whisperv6/peer.go diff --git a/whisper/whisperv6/peer.go b/whisper/whisperv6/peer.go
index 427127290..c30e92d1c 100644 index eb17d2d..2b7687e 100644
--- c/whisper/whisperv6/peer.go --- a/whisper/whisperv6/peer.go
+++ w/whisper/whisperv6/peer.go +++ b/whisper/whisperv6/peer.go
@@ -187,6 +187,10 @@ func (peer *Peer) expire() { @@ -195,6 +195,10 @@ func (peer *Peer) expire() {
// broadcast iterates over the collection of envelopes and transmits yet unknown // broadcast iterates over the collection of envelopes and transmits yet unknown
// ones over the network. // ones over the network.
func (peer *Peer) broadcast() error { func (peer *Peer) broadcast() error {

View File

@ -35,7 +35,7 @@ index e03ec9d..1665539 100644
- "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/p2p/discover" - "github.com/ethereum/go-ethereum/p2p/discover"
+ "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/p2p/enode"
) )
// EventType used to define known envelope events. // EventType used to define known envelope events.
@ -63,7 +63,7 @@ index e03ec9d..1665539 100644
- Peer discover.NodeID - Peer discover.NodeID
+ Event EventType + Event EventType
+ Hash common.Hash + Hash common.Hash
+ Peer discover.NodeID + Peer enode.ID
} }
diff --git a/whisper/whisperv6/whisper.go b/whisper/whisperv6/whisper.go diff --git a/whisper/whisperv6/whisper.go b/whisper/whisperv6/whisper.go
index 697f0ec..4a7b006 100644 index 697f0ec..4a7b006 100644

View File

@ -20,7 +20,7 @@ index 1665539d6..fe7570ed5 100644
@@ -24,4 +28,5 @@ type EnvelopeEvent struct { @@ -24,4 +28,5 @@ type EnvelopeEvent struct {
Event EventType Event EventType
Hash common.Hash Hash common.Hash
Peer discover.NodeID Peer enode.ID
+ Data interface{} + Data interface{}
} }
diff --git a/whisper/whisperv6/whisper.go b/whisper/whisperv6/whisper.go diff --git a/whisper/whisperv6/whisper.go b/whisper/whisperv6/whisper.go

File diff suppressed because it is too large Load Diff

View File

@ -137,6 +137,9 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
// MethodById looks up a method by the 4-byte id // MethodById looks up a method by the 4-byte id
// returns nil if none found // returns nil if none found
func (abi *ABI) MethodById(sigdata []byte) (*Method, error) { func (abi *ABI) MethodById(sigdata []byte) (*Method, error) {
if len(sigdata) < 4 {
return nil, fmt.Errorf("data too short (% bytes) for abi method lookup", len(sigdata))
}
for _, method := range abi.Methods { for _, method := range abi.Methods {
if bytes.Equal(method.Id(), sigdata[:4]) { if bytes.Equal(method.Id(), sigdata[:4]) {
return &method, nil return &method, nil

View File

@ -208,7 +208,7 @@ func (b *SimulatedBackend) PendingNonceAt(ctx context.Context, account common.Ad
} }
// SuggestGasPrice implements ContractTransactor.SuggestGasPrice. Since the simulated // SuggestGasPrice implements ContractTransactor.SuggestGasPrice. Since the simulated
// chain doens't have miners, we just return a gas price of 1 for any call. // chain doesn't have miners, we just return a gas price of 1 for any call.
func (b *SimulatedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error) { func (b *SimulatedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error) {
return big.NewInt(1), nil return big.NewInt(1), nil
} }

View File

@ -23,13 +23,13 @@ package bind
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"go/format"
"regexp" "regexp"
"strings" "strings"
"text/template" "text/template"
"unicode" "unicode"
"github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi"
"golang.org/x/tools/imports"
) )
// Lang is a target programming language selector to generate bindings for. // Lang is a target programming language selector to generate bindings for.
@ -145,9 +145,9 @@ func Bind(types []string, abis []string, bytecodes []string, pkg string, lang La
if err := tmpl.Execute(buffer, data); err != nil { if err := tmpl.Execute(buffer, data); err != nil {
return "", err return "", err
} }
// For Go bindings pass the code through goimports to clean it up and double check // For Go bindings pass the code through gofmt to clean it up
if lang == LangGo { if lang == LangGo {
code, err := imports.Process(".", buffer.Bytes(), nil) code, err := format.Source(buffer.Bytes())
if err != nil { if err != nil {
return "", fmt.Errorf("%v\n%s", err, buffer) return "", fmt.Errorf("%v\n%s", err, buffer)
} }

View File

@ -64,6 +64,30 @@ const tmplSourceGo = `
package {{.Package}} package {{.Package}}
import (
"math/big"
"strings"
ethereum "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event"
)
// Reference imports to suppress errors if they are not otherwise used.
var (
_ = big.NewInt
_ = strings.NewReader
_ = ethereum.NotFound
_ = abi.U256
_ = bind.Bind
_ = common.Big1
_ = types.BloomLookup
_ = event.NewSubscription
)
{{range $contract := .Contracts}} {{range $contract := .Contracts}}
// {{.Type}}ABI is the input ABI used to generate the binding from. // {{.Type}}ABI is the input ABI used to generate the binding from.
const {{.Type}}ABI = "{{.InputABI}}" const {{.Type}}ABI = "{{.InputABI}}"

View File

@ -23,8 +23,8 @@ environment:
install: install:
- git submodule update --init - git submodule update --init
- rmdir C:\go /s /q - rmdir C:\go /s /q
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.11.windows-%GETH_ARCH%.zip - appveyor DownloadFile https://storage.googleapis.com/golang/go1.11.1.windows-%GETH_ARCH%.zip
- 7z x go1.11.windows-%GETH_ARCH%.zip -y -oC:\ > NUL - 7z x go1.11.1.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
- go version - go version
- gcc --version - gcc --version

View File

@ -320,9 +320,7 @@ func goToolArch(arch string, cc string, subcmd string, args ...string) *exec.Cmd
// "tests" also includes static analysis tools such as vet. // "tests" also includes static analysis tools such as vet.
func doTest(cmdline []string) { func doTest(cmdline []string) {
var ( coverage := flag.Bool("coverage", false, "Whether to record code coverage")
coverage = flag.Bool("coverage", false, "Whether to record code coverage")
)
flag.CommandLine.Parse(cmdline) flag.CommandLine.Parse(cmdline)
env := build.Env() env := build.Env()
@ -332,14 +330,11 @@ func doTest(cmdline []string) {
} }
packages = build.ExpandPackagesNoVendor(packages) packages = build.ExpandPackagesNoVendor(packages)
// Run analysis tools before the tests.
build.MustRun(goTool("vet", packages...))
// Run the actual tests. // Run the actual tests.
gotest := goTool("test", buildFlags(env)...)
// Test a single package at a time. CI builders are slow // Test a single package at a time. CI builders are slow
// and some tests run into timeouts under load. // and some tests run into timeouts under load.
gotest.Args = append(gotest.Args, "-p", "1") gotest := goTool("test", buildFlags(env)...)
gotest.Args = append(gotest.Args, "-p", "1", "-timeout", "5m")
if *coverage { if *coverage {
gotest.Args = append(gotest.Args, "-covermode=atomic", "-cover") gotest.Args = append(gotest.Args, "-covermode=atomic", "-cover")
} }
@ -1040,7 +1035,7 @@ func xgoTool(args []string) *exec.Cmd {
func doPurge(cmdline []string) { func doPurge(cmdline []string) {
var ( var (
store = flag.String("store", "", `Destination from where to purge archives (usually "gethstore/builds")`) store = flag.String("store", "", `Destination from where to purge archives (usually "gethstore/builds")`)
limit = flag.Int("days", 30, `Age threshold above which to delete unstalbe archives`) limit = flag.Int("days", 30, `Age threshold above which to delete unstable archives`)
) )
flag.CommandLine.Parse(cmdline) flag.CommandLine.Parse(cmdline)

View File

@ -75,7 +75,7 @@ func main() {
bins []string bins []string
types []string types []string
) )
if *solFlag != "" || *abiFlag == "-" { if *solFlag != "" || (*abiFlag == "-" && *pkgFlag == "") {
// Generate the list of types to exclude from binding // Generate the list of types to exclude from binding
exclude := make(map[string]bool) exclude := make(map[string]bool)
for _, kind := range strings.Split(*excFlag, ",") { for _, kind := range strings.Split(*excFlag, ",") {
@ -111,7 +111,13 @@ func main() {
} }
} else { } else {
// Otherwise load up the ABI, optional bytecode and type name from the parameters // Otherwise load up the ABI, optional bytecode and type name from the parameters
abi, err := ioutil.ReadFile(*abiFlag) var abi []byte
var err error
if *abiFlag == "-" {
abi, err = ioutil.ReadAll(os.Stdin)
} else {
abi, err = ioutil.ReadFile(*abiFlag)
}
if err != nil { if err != nil {
fmt.Printf("Failed to read input ABI: %v\n", err) fmt.Printf("Failed to read input ABI: %v\n", err)
os.Exit(-1) os.Exit(-1)
@ -155,6 +161,5 @@ func contractsFromStdin() (map[string]*compiler.Contract, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return compiler.ParseCombinedJSON(bytes, "", "", "", "") return compiler.ParseCombinedJSON(bytes, "", "", "", "")
} }

View File

@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/discv5" "github.com/ethereum/go-ethereum/p2p/discv5"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/p2p/netutil" "github.com/ethereum/go-ethereum/p2p/netutil"
) )
@ -85,7 +86,7 @@ func main() {
} }
if *writeAddr { if *writeAddr {
fmt.Printf("%v\n", discover.PubkeyID(&nodeKey.PublicKey)) fmt.Printf("%v\n", enode.PubkeyToIDV4(&nodeKey.PublicKey))
os.Exit(0) os.Exit(0)
} }

View File

@ -875,3 +875,4 @@ There are a couple of implementation for a UI. We'll try to keep this list up to
| QtSigner| https://github.com/holiman/qtsigner/| Python3/QT-based| :+1:| :+1:| :+1:| :+1:| :+1:| :x: | :+1: (partially)| | QtSigner| https://github.com/holiman/qtsigner/| Python3/QT-based| :+1:| :+1:| :+1:| :+1:| :+1:| :x: | :+1: (partially)|
| GtkSigner| https://github.com/holiman/gtksigner| Python3/GTK-based| :+1:| :x:| :x:| :+1:| :+1:| :x: | :x: | | GtkSigner| https://github.com/holiman/gtksigner| Python3/GTK-based| :+1:| :x:| :x:| :+1:| :+1:| :x: | :x: |
| Frame | https://github.com/floating/frame/commits/go-signer| Electron-based| :x:| :x:| :x:| :x:| ?| :x: | :x: | | Frame | https://github.com/floating/frame/commits/go-signer| Electron-based| :x:| :x:| :x:| :x:| ?| :x: | :x: |
| Clef UI| https://github.com/kyokan/clef-ui| Golang/QT-based| :+1:| :+1:| :x:| :+1:| :+1:| :x: | :+1: (approve tx only)|

Binary file not shown.

Before

Width:  |  Height:  |  Size: 14 KiB

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 19 KiB

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 25 KiB

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

After

Width:  |  Height:  |  Size: 36 KiB

View File

@ -1,6 +1,13 @@
### Changelog for external API ### Changelog for external API
#### 4.0.0
* The external `account_Ecrecover`-method was removed.
* The external `account_Import`-method was removed.
#### 3.0.0
* The external `account_List`-method was changed to not expose `url`, which contained info about the local filesystem. It now returns only a list of addresses.
#### 2.0.0 #### 2.0.0

View File

@ -1,5 +1,21 @@
### Changelog for internal API (ui-api) ### Changelog for internal API (ui-api)
### 2.1.0
* Add `OnInputRequired(info UserInputRequest)` to internal API. This method is used when Clef needs user input, e.g. passwords.
The following structures are used:
```golang
UserInputRequest struct {
Prompt string `json:"prompt"`
Title string `json:"title"`
IsPassword bool `json:"isPassword"`
}
UserInputResponse struct {
Text string `json:"text"`
}
```
### 2.0.0 ### 2.0.0
* Modify how `call_info` on a transaction is conveyed. New format: * Modify how `call_info` on a transaction is conveyed. New format:

View File

@ -48,7 +48,7 @@ import (
) )
// ExternalAPIVersion -- see extapi_changelog.md // ExternalAPIVersion -- see extapi_changelog.md
const ExternalAPIVersion = "2.0.0" const ExternalAPIVersion = "3.0.0"
// InternalAPIVersion -- see intapi_changelog.md // InternalAPIVersion -- see intapi_changelog.md
const InternalAPIVersion = "2.0.0" const InternalAPIVersion = "2.0.0"
@ -70,6 +70,10 @@ var (
Value: 4, Value: 4,
Usage: "log level to emit to the screen", Usage: "log level to emit to the screen",
} }
advancedMode = cli.BoolFlag{
Name: "advanced",
Usage: "If enabled, issues warnings instead of rejections for suspicious requests. Default off",
}
keystoreFlag = cli.StringFlag{ keystoreFlag = cli.StringFlag{
Name: "keystore", Name: "keystore",
Value: filepath.Join(node.DefaultDataDir(), "keystore"), Value: filepath.Join(node.DefaultDataDir(), "keystore"),
@ -191,6 +195,7 @@ func init() {
ruleFlag, ruleFlag,
stdiouiFlag, stdiouiFlag,
testFlag, testFlag,
advancedMode,
} }
app.Action = signer app.Action = signer
app.Commands = []cli.Command{initCommand, attestCommand, addCredentialCommand} app.Commands = []cli.Command{initCommand, attestCommand, addCredentialCommand}
@ -384,7 +389,8 @@ func signer(c *cli.Context) error {
c.String(keystoreFlag.Name), c.String(keystoreFlag.Name),
c.Bool(utils.NoUSBFlag.Name), c.Bool(utils.NoUSBFlag.Name),
ui, db, ui, db,
c.Bool(utils.LightKDFFlag.Name)) c.Bool(utils.LightKDFFlag.Name),
c.Bool(advancedMode.Name))
api = apiImpl api = apiImpl

Binary file not shown.

Before

Width:  |  Height:  |  Size: 36 KiB

After

Width:  |  Height:  |  Size: 20 KiB

View File

@ -31,43 +31,51 @@ NOTE: This file does not contain your accounts. Those need to be backed up separ
## Creating rules ## Creating rules
Now, you can create a rule-file. Now, you can create a rule-file. Note that it is not mandatory to use predefined rules, but it's really handy.
```javascript ```javascript
function ApproveListing(){ function ApproveListing(){
return "Approve" return "Approve"
} }
``` ```
Get the `sha256` hash....
Get the `sha256` hash. If you have openssl, you can do `openssl sha256 rules.js`...
```text ```text
#sha256sum rules.js #sha256sum rules.js
6c21d1737429d6d4f2e55146da0797782f3c0a0355227f19d702df377c165d72 rules.js 6c21d1737429d6d4f2e55146da0797782f3c0a0355227f19d702df377c165d72 rules.js
``` ```
...And then `attest` the file: ...now `attest` the file...
```text ```text
#./signer attest 6c21d1737429d6d4f2e55146da0797782f3c0a0355227f19d702df377c165d72 #./signer attest 6c21d1737429d6d4f2e55146da0797782f3c0a0355227f19d702df377c165d72
INFO [02-21|12:14:38] Ruleset attestation updated sha256=6c21d1737429d6d4f2e55146da0797782f3c0a0355227f19d702df377c165d72 INFO [02-21|12:14:38] Ruleset attestation updated sha256=6c21d1737429d6d4f2e55146da0797782f3c0a0355227f19d702df377c165d72
``` ```
At this point, we then start the signer with the rule-file:
...and (this is required only for non-production versions) load a mock-up `4byte.json` by copying the file from the source to your current working directory:
```text ```text
#./signer --rules rules.json #cp $GOPATH/src/github.com/ethereum/go-ethereum/cmd/clef/4byte.json $PWD
```
INFO [02-21|12:15:18] Using CLI as UI-channel At this point, we can start the signer with the rule-file:
INFO [02-21|12:15:18] Loaded 4byte db signatures=5509 file=./4byte.json ```text
INFO [02-21|12:15:18] Could not load rulefile, rules not enabled file=rulefile #./signer --rules rules.js --rpc
DEBUG[02-21|12:15:18] FS scan times list=35.335µs set=5.536µs diff=5.073µs
DEBUG[02-21|12:15:18] Ledger support enabled INFO [09-25|20:28:11.866] Using CLI as UI-channel
DEBUG[02-21|12:15:18] Trezor support enabled INFO [09-25|20:28:11.876] Loaded 4byte db signatures=5509 file=./4byte.json
INFO [02-21|12:15:18] Audit logs configured file=audit.log INFO [09-25|20:28:11.877] Rule engine configured file=./rules.js
INFO [02-21|12:15:18] HTTP endpoint opened url=http://localhost:8550 DEBUG[09-25|20:28:11.877] FS scan times list=100.781µs set=13.253µs diff=5.761µs
DEBUG[09-25|20:28:11.884] Ledger support enabled
DEBUG[09-25|20:28:11.888] Trezor support enabled
INFO [09-25|20:28:11.888] Audit logs configured file=audit.log
DEBUG[09-25|20:28:11.888] HTTP registered namespace=account
INFO [09-25|20:28:11.890] HTTP endpoint opened url=http://localhost:8550
DEBUG[09-25|20:28:11.890] IPC registered namespace=account
INFO [09-25|20:28:11.890] IPC endpoint opened url=<nil>
------- Signer info ------- ------- Signer info -------
* extapi_version : 2.0.0
* intapi_version : 2.0.0
* extapi_http : http://localhost:8550 * extapi_http : http://localhost:8550
* extapi_ipc : <nil> * extapi_ipc : <nil>
* extapi_version : 2.0.0
* intapi_version : 1.2.0
``` ```
Any list-requests will now be auto-approved by our rule-file. Any list-requests will now be auto-approved by our rule-file.
@ -107,16 +115,16 @@ The `master_seed` was then used to derive a few other things:
## Adding credentials ## Adding credentials
In order to make more useful rules; sign transactions, the signer needs access to the passwords needed to unlock keystores. In order to make more useful rules like signing transactions, the signer needs access to the passwords needed to unlock keystores.
```text ```text
#./signer addpw 0x694267f14675d7e1b9494fd8d72fefe1755710fa test #./signer addpw "0x694267f14675d7e1b9494fd8d72fefe1755710fa" "test_password"
INFO [02-21|13:43:21] Credential store updated key=0x694267f14675d7e1b9494fd8d72fefe1755710fa INFO [02-21|13:43:21] Credential store updated key=0x694267f14675d7e1b9494fd8d72fefe1755710fa
``` ```
## More advanced rules ## More advanced rules
Now let's update the rules to make use of credentials Now let's update the rules to make use of credentials:
```javascript ```javascript
function ApproveListing(){ function ApproveListing(){
@ -134,13 +142,15 @@ function ApproveSignData(r){
} }
``` ```
In this example, In this example:
* any requests to sign data with the account `0x694...` will be * Any requests to sign data with the account `0x694...` will be
* auto-approved if the message contains with `bazonk`, * auto-approved if the message contains with `bazonk`
* and auto-rejected if it does not. * auto-rejected if it does not.
* Any other signing-requests will be passed along for manual approve/reject. * Any other signing-requests will be passed along for manual approve/reject.
..attest the new file _Note: make sure that `0x694...` is an account you have access to. You can create it either via the clef or the traditional account cli tool. If the latter was chosen, make sure both clef and geth use the same keystore by specifing `--keystore path/to/your/keystore` when running clef._
Attest the new file...
```text ```text
#sha256sum rules.js #sha256sum rules.js
2a0cb661dacfc804b6e95d935d813fd17c0997a7170e4092ffbc34ca976acd9f rules.js 2a0cb661dacfc804b6e95d935d813fd17c0997a7170e4092ffbc34ca976acd9f rules.js
@ -153,23 +163,26 @@ INFO [02-21|14:36:30] Ruleset attestation updated sha256=2a0cb661da
And start the signer: And start the signer:
``` ```
#./signer --rules rules.js #./signer --rules rules.js --rpc
INFO [02-21|14:41:56] Using CLI as UI-channel INFO [09-25|21:02:16.450] Using CLI as UI-channel
INFO [02-21|14:41:56] Loaded 4byte db signatures=5509 file=./4byte.json INFO [09-25|21:02:16.466] Loaded 4byte db signatures=5509 file=./4byte.json
INFO [02-21|14:41:56] Rule engine configured file=rules.js INFO [09-25|21:02:16.467] Rule engine configured file=./rules.js
DEBUG[02-21|14:41:56] FS scan times list=34.607µs set=4.509µs diff=4.87µs DEBUG[09-25|21:02:16.468] FS scan times list=1.45262ms set=21.926µs diff=6.944µs
DEBUG[02-21|14:41:56] Ledger support enabled DEBUG[09-25|21:02:16.473] Ledger support enabled
DEBUG[02-21|14:41:56] Trezor support enabled DEBUG[09-25|21:02:16.475] Trezor support enabled
INFO [02-21|14:41:56] Audit logs configured file=audit.log INFO [09-25|21:02:16.476] Audit logs configured file=audit.log
INFO [02-21|14:41:56] HTTP endpoint opened url=http://localhost:8550 DEBUG[09-25|21:02:16.476] HTTP registered namespace=account
INFO [09-25|21:02:16.478] HTTP endpoint opened url=http://localhost:8550
DEBUG[09-25|21:02:16.478] IPC registered namespace=account
INFO [09-25|21:02:16.478] IPC endpoint opened url=<nil>
------- Signer info ------- ------- Signer info -------
* extapi_version : 2.0.0 * extapi_version : 2.0.0
* intapi_version : 1.2.0 * intapi_version : 2.0.0
* extapi_http : http://localhost:8550 * extapi_http : http://localhost:8550
* extapi_ipc : <nil> * extapi_ipc : <nil>
INFO [02-21|14:41:56] error occurred during execution error="ReferenceError: 'OnSignerStartup' is not defined"
``` ```
And then test signing, once with `bazonk` and once without: And then test signing, once with `bazonk` and once without:
``` ```
@ -190,9 +203,9 @@ INFO [02-21|14:42:56] Op rejected
The signer also stores all traffic over the external API in a log file. The last 4 lines shows the two requests and their responses: The signer also stores all traffic over the external API in a log file. The last 4 lines shows the two requests and their responses:
```text ```text
#tail audit.log -n 4 #tail -n 4 audit.log
t=2018-02-21T14:42:41+0100 lvl=info msg=Sign api=signer type=request metadata="{\"remote\":\"127.0.0.1:49706\",\"local\":\"localhost:8550\",\"scheme\":\"HTTP/1.1\"}" addr="0x694267f14675d7e1b9494fd8d72fefe1755710fa [chksum INVALID]" data=202062617a6f6e6b2062617a2067617a0a t=2018-02-21T14:42:41+0100 lvl=info msg=Sign api=signer type=request metadata="{\"remote\":\"127.0.0.1:49706\",\"local\":\"localhost:8550\",\"scheme\":\"HTTP/1.1\"}" addr="0x694267f14675d7e1b9494fd8d72fefe1755710fa [chksum INVALID]" data=202062617a6f6e6b2062617a2067617a0a
t=2018-02-21T14:42:42+0100 lvl=info msg=Sign api=signer type=response data=93e6161840c3ae1efc26dc68dedab6e8fc233bb3fefa1b4645dbf6609b93dace160572ea4ab33240256bb6d3dadb60dcd9c515d6374d3cf614ee897408d41d541c error=nil t=2018-02-21T14:42:42+0100 lvl=info msg=Sign api=signer type=response data=93e6161840c3ae1efc26dc68dedab6e8fc233bb3fefa1b4645dbf6609b93dace160572ea4ab33240256bb6d3dadb60dcd9c515d6374d3cf614ee897408d41d541c error=nil
t=2018-02-21T14:42:56+0100 lvl=info msg=Sign api=signer type=request metadata="{\"remote\":\"127.0.0.1:49708\",\"local\":\"localhost:8550\",\"scheme\":\"HTTP/1.1\"}" addr="0x694267f14675d7e1b9494fd8d72fefe1755710fa [chksum INVALID]" data=2020626f6e6b2062617a2067617a0a t=2018-02-21T14:42:56+0100 lvl=info msg=Sign api=signer type=request metadata="{\"remote\":\"127.0.0.1:49708\",\"local\":\"localhost:8550\",\"scheme\":\"HTTP/1.1\"}" addr="0x694267f14675d7e1b9494fd8d72fefe1755710fa [chksum INVALID]" data=2020626f6e6b2062617a2067617a0a
t=2018-02-21T14:42:56+0100 lvl=info msg=Sign api=signer type=response data= error="Request denied" t=2018-02-21T14:42:56+0100 lvl=info msg=Sign api=signer type=response data= error="Request denied"
``` ```

View File

@ -97,6 +97,10 @@ func stateTestCmd(ctx *cli.Context) error {
// Run the test and aggregate the result // Run the test and aggregate the result
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true} result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
state, err := test.Run(st, cfg) state, err := test.Run(st, cfg)
// print state root for evmlab tracing
if ctx.GlobalBool(MachineFlag.Name) && state != nil {
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false))
}
if err != nil { if err != nil {
// Test failed, mark as so and dump any state to aid debugging // Test failed, mark as so and dump any state to aid debugging
result.Pass, result.Error = false, err.Error() result.Pass, result.Error = false, err.Error()
@ -105,10 +109,6 @@ func stateTestCmd(ctx *cli.Context) error {
result.State = &dump result.State = &dump
} }
} }
// print state root for evmlab tracing (already committed above, so no need to delete objects again
if ctx.GlobalBool(MachineFlag.Name) && state != nil {
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false))
}
results = append(results, *result) results = append(results, *result)

View File

@ -54,8 +54,8 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/discv5" "github.com/ethereum/go-ethereum/p2p/discv5"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"golang.org/x/net/websocket" "golang.org/x/net/websocket"
@ -255,8 +255,10 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u
return nil, err return nil, err
} }
for _, boot := range enodes { for _, boot := range enodes {
old, _ := discover.ParseNode(boot.String()) old, err := enode.ParseV4(boot.String())
stack.Server().AddPeer(old) if err != nil {
stack.Server().AddPeer(old)
}
} }
// Attach to the client and retrieve and interesting metadatas // Attach to the client and retrieve and interesting metadatas
api, err := stack.Attach() api, err := stack.Attach()

View File

@ -47,7 +47,7 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/simulations" "github.com/ethereum/go-ethereum/p2p/simulations"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters" "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
@ -285,7 +285,7 @@ func createNode(ctx *cli.Context) error {
if err != nil { if err != nil {
return err return err
} }
config.ID = discover.PubkeyID(&privKey.PublicKey) config.ID = enode.PubkeyToIDV4(&privKey.PublicKey)
config.PrivateKey = privKey config.PrivateKey = privKey
} }
if services := ctx.String("services"); services != "" { if services := ctx.String("services"); services != "" {

View File

@ -42,7 +42,7 @@ ADD genesis.json /genesis.json
RUN \ RUN \
echo 'geth --cache 512 init /genesis.json' > geth.sh && \{{if .Unlock}} echo 'geth --cache 512 init /genesis.json' > geth.sh && \{{if .Unlock}}
echo 'mkdir -p /root/.ethereum/keystore/ && cp /signer.json /root/.ethereum/keystore/' >> geth.sh && \{{end}} echo 'mkdir -p /root/.ethereum/keystore/ && cp /signer.json /root/.ethereum/keystore/' >> geth.sh && \{{end}}
echo $'exec geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--miner.etherbase {{.Etherbase}} --mine --miner.threads 1{{end}} {{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --miner.gastarget {{.GasTarget}} --miner.gaslimit {{.GasLimit}} --miner.gasprice {{.GasPrice}}' >> geth.sh echo $'exec geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --nat extip:{{.IP}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--miner.etherbase {{.Etherbase}} --mine --miner.threads 1{{end}} {{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --miner.gastarget {{.GasTarget}} --miner.gaslimit {{.GasLimit}} --miner.gasprice {{.GasPrice}}' >> geth.sh
ENTRYPOINT ["/bin/sh", "geth.sh"] ENTRYPOINT ["/bin/sh", "geth.sh"]
` `
@ -99,6 +99,7 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n
template.Must(template.New("").Parse(nodeDockerfile)).Execute(dockerfile, map[string]interface{}{ template.Must(template.New("").Parse(nodeDockerfile)).Execute(dockerfile, map[string]interface{}{
"NetworkID": config.network, "NetworkID": config.network,
"Port": config.port, "Port": config.port,
"IP": client.address,
"Peers": config.peersTotal, "Peers": config.peersTotal,
"LightFlag": lightFlag, "LightFlag": lightFlag,
"Bootnodes": strings.Join(bootnodes, ","), "Bootnodes": strings.Join(bootnodes, ","),
@ -227,10 +228,10 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
// Container available, retrieve its node ID and its genesis json // Container available, retrieve its node ID and its genesis json
var out []byte var out []byte
if out, err = client.Run(fmt.Sprintf("docker exec %s_%s_1 geth --exec admin.nodeInfo.id --cache=16 attach", network, kind)); err != nil { if out, err = client.Run(fmt.Sprintf("docker exec %s_%s_1 geth --exec admin.nodeInfo.enode --cache=16 attach", network, kind)); err != nil {
return nil, ErrServiceUnreachable return nil, ErrServiceUnreachable
} }
id := bytes.Trim(bytes.TrimSpace(out), "\"") enode := bytes.Trim(bytes.TrimSpace(out), "\"")
if out, err = client.Run(fmt.Sprintf("docker exec %s_%s_1 cat /genesis.json", network, kind)); err != nil { if out, err = client.Run(fmt.Sprintf("docker exec %s_%s_1 cat /genesis.json", network, kind)); err != nil {
return nil, ErrServiceUnreachable return nil, ErrServiceUnreachable
@ -265,7 +266,7 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
gasLimit: gasLimit, gasLimit: gasLimit,
gasPrice: gasPrice, gasPrice: gasPrice,
} }
stats.enode = fmt.Sprintf("enode://%s@%s:%d", id, client.address, stats.port) stats.enode = string(enode)
return stats, nil return stats, nil
} }

View File

@ -130,7 +130,7 @@ func accessNewACT(ctx *cli.Context) {
if err != nil { if err != nil {
utils.Fatalf("had an error reading the grantee public key list") utils.Fatalf("had an error reading the grantee public key list")
} }
pkGrantees = strings.Split(string(bytes), "\n") pkGrantees = strings.Split(strings.Trim(string(bytes), "\n"), "\n")
} }
if passGranteesFilename != "" { if passGranteesFilename != "" {
@ -138,7 +138,7 @@ func accessNewACT(ctx *cli.Context) {
if err != nil { if err != nil {
utils.Fatalf("could not read password filename: %v", err) utils.Fatalf("could not read password filename: %v", err)
} }
passGrantees = strings.Split(string(bytes), "\n") passGrantees = strings.Split(strings.Trim(string(bytes), "\n"), "\n")
} }
accessKey, ae, actManifest, err = api.DoACT(ctx, privateKey, salt, pkGrantees, passGrantees) accessKey, ae, actManifest, err = api.DoACT(ctx, privateKey, salt, pkGrantees, passGrantees)
if err != nil { if err != nil {

View File

@ -59,27 +59,28 @@ var (
//constants for environment variables //constants for environment variables
const ( const (
SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR" SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR"
SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT" SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT"
SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR" SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR"
SWARM_ENV_PORT = "SWARM_PORT" SWARM_ENV_PORT = "SWARM_PORT"
SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID" SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID"
SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE" SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE"
SWARM_ENV_SWAP_API = "SWARM_SWAP_API" SWARM_ENV_SWAP_API = "SWARM_SWAP_API"
SWARM_ENV_SYNC_DISABLE = "SWARM_SYNC_DISABLE" SWARM_ENV_SYNC_DISABLE = "SWARM_SYNC_DISABLE"
SWARM_ENV_SYNC_UPDATE_DELAY = "SWARM_ENV_SYNC_UPDATE_DELAY" SWARM_ENV_SYNC_UPDATE_DELAY = "SWARM_ENV_SYNC_UPDATE_DELAY"
SWARM_ENV_LIGHT_NODE_ENABLE = "SWARM_LIGHT_NODE_ENABLE" SWARM_ENV_MAX_STREAM_PEER_SERVERS = "SWARM_ENV_MAX_STREAM_PEER_SERVERS"
SWARM_ENV_DELIVERY_SKIP_CHECK = "SWARM_DELIVERY_SKIP_CHECK" SWARM_ENV_LIGHT_NODE_ENABLE = "SWARM_LIGHT_NODE_ENABLE"
SWARM_ENV_ENS_API = "SWARM_ENS_API" SWARM_ENV_DELIVERY_SKIP_CHECK = "SWARM_DELIVERY_SKIP_CHECK"
SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR" SWARM_ENV_ENS_API = "SWARM_ENS_API"
SWARM_ENV_CORS = "SWARM_CORS" SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR"
SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES" SWARM_ENV_CORS = "SWARM_CORS"
SWARM_ENV_PSS_ENABLE = "SWARM_PSS_ENABLE" SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES"
SWARM_ENV_STORE_PATH = "SWARM_STORE_PATH" SWARM_ENV_PSS_ENABLE = "SWARM_PSS_ENABLE"
SWARM_ENV_STORE_CAPACITY = "SWARM_STORE_CAPACITY" SWARM_ENV_STORE_PATH = "SWARM_STORE_PATH"
SWARM_ENV_STORE_CACHE_CAPACITY = "SWARM_STORE_CACHE_CAPACITY" SWARM_ENV_STORE_CAPACITY = "SWARM_STORE_CAPACITY"
SWARM_ACCESS_PASSWORD = "SWARM_ACCESS_PASSWORD" SWARM_ENV_STORE_CACHE_CAPACITY = "SWARM_STORE_CACHE_CAPACITY"
GETH_ENV_DATADIR = "GETH_DATADIR" SWARM_ACCESS_PASSWORD = "SWARM_ACCESS_PASSWORD"
GETH_ENV_DATADIR = "GETH_DATADIR"
) )
// These settings ensure that TOML keys use the same names as Go struct fields. // These settings ensure that TOML keys use the same names as Go struct fields.
@ -124,7 +125,7 @@ func initSwarmNode(config *bzzapi.Config, stack *node.Node, ctx *cli.Context) {
//get the account for the provided swarm account //get the account for the provided swarm account
prvkey := getAccount(config.BzzAccount, ctx, stack) prvkey := getAccount(config.BzzAccount, ctx, stack)
//set the resolved config path (geth --datadir) //set the resolved config path (geth --datadir)
config.Path = stack.InstanceDir() config.Path = expandPath(stack.InstanceDir())
//finally, initialize the configuration //finally, initialize the configuration
config.Init(prvkey) config.Init(prvkey)
//configuration phase completed here //configuration phase completed here
@ -175,14 +176,18 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
} }
if networkid := ctx.GlobalString(SwarmNetworkIdFlag.Name); networkid != "" { if networkid := ctx.GlobalString(SwarmNetworkIdFlag.Name); networkid != "" {
if id, _ := strconv.Atoi(networkid); id != 0 { id, err := strconv.ParseUint(networkid, 10, 64)
currentConfig.NetworkID = uint64(id) if err != nil {
utils.Fatalf("invalid cli flag %s: %v", SwarmNetworkIdFlag.Name, err)
}
if id != 0 {
currentConfig.NetworkID = id
} }
} }
if ctx.GlobalIsSet(utils.DataDirFlag.Name) { if ctx.GlobalIsSet(utils.DataDirFlag.Name) {
if datadir := ctx.GlobalString(utils.DataDirFlag.Name); datadir != "" { if datadir := ctx.GlobalString(utils.DataDirFlag.Name); datadir != "" {
currentConfig.Path = datadir currentConfig.Path = expandPath(datadir)
} }
} }
@ -207,6 +212,9 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
currentConfig.SyncUpdateDelay = d currentConfig.SyncUpdateDelay = d
} }
// any value including 0 is acceptable
currentConfig.MaxStreamPeerServers = ctx.GlobalInt(SwarmMaxStreamPeerServersFlag.Name)
if ctx.GlobalIsSet(SwarmLightNodeEnabled.Name) { if ctx.GlobalIsSet(SwarmLightNodeEnabled.Name) {
currentConfig.LightNodeEnabled = true currentConfig.LightNodeEnabled = true
} }
@ -226,6 +234,10 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
if len(ensAPIs) == 1 && ensAPIs[0] == "" { if len(ensAPIs) == 1 && ensAPIs[0] == "" {
ensAPIs = nil ensAPIs = nil
} }
for i := range ensAPIs {
ensAPIs[i] = expandPath(ensAPIs[i])
}
currentConfig.EnsAPIs = ensAPIs currentConfig.EnsAPIs = ensAPIs
} }
@ -262,13 +274,17 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
} }
if networkid := os.Getenv(SWARM_ENV_NETWORK_ID); networkid != "" { if networkid := os.Getenv(SWARM_ENV_NETWORK_ID); networkid != "" {
if id, _ := strconv.Atoi(networkid); id != 0 { id, err := strconv.ParseUint(networkid, 10, 64)
currentConfig.NetworkID = uint64(id) if err != nil {
utils.Fatalf("invalid environment variable %s: %v", SWARM_ENV_NETWORK_ID, err)
}
if id != 0 {
currentConfig.NetworkID = id
} }
} }
if datadir := os.Getenv(GETH_ENV_DATADIR); datadir != "" { if datadir := os.Getenv(GETH_ENV_DATADIR); datadir != "" {
currentConfig.Path = datadir currentConfig.Path = expandPath(datadir)
} }
bzzport := os.Getenv(SWARM_ENV_PORT) bzzport := os.Getenv(SWARM_ENV_PORT)
@ -281,33 +297,50 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
} }
if swapenable := os.Getenv(SWARM_ENV_SWAP_ENABLE); swapenable != "" { if swapenable := os.Getenv(SWARM_ENV_SWAP_ENABLE); swapenable != "" {
if swap, err := strconv.ParseBool(swapenable); err != nil { swap, err := strconv.ParseBool(swapenable)
currentConfig.SwapEnabled = swap if err != nil {
utils.Fatalf("invalid environment variable %s: %v", SWARM_ENV_SWAP_ENABLE, err)
} }
currentConfig.SwapEnabled = swap
} }
if syncdisable := os.Getenv(SWARM_ENV_SYNC_DISABLE); syncdisable != "" { if syncdisable := os.Getenv(SWARM_ENV_SYNC_DISABLE); syncdisable != "" {
if sync, err := strconv.ParseBool(syncdisable); err != nil { sync, err := strconv.ParseBool(syncdisable)
currentConfig.SyncEnabled = !sync if err != nil {
utils.Fatalf("invalid environment variable %s: %v", SWARM_ENV_SYNC_DISABLE, err)
} }
currentConfig.SyncEnabled = !sync
} }
if v := os.Getenv(SWARM_ENV_DELIVERY_SKIP_CHECK); v != "" { if v := os.Getenv(SWARM_ENV_DELIVERY_SKIP_CHECK); v != "" {
if skipCheck, err := strconv.ParseBool(v); err != nil { skipCheck, err := strconv.ParseBool(v)
if err != nil {
currentConfig.DeliverySkipCheck = skipCheck currentConfig.DeliverySkipCheck = skipCheck
} }
} }
if v := os.Getenv(SWARM_ENV_SYNC_UPDATE_DELAY); v != "" { if v := os.Getenv(SWARM_ENV_SYNC_UPDATE_DELAY); v != "" {
if d, err := time.ParseDuration(v); err != nil { d, err := time.ParseDuration(v)
currentConfig.SyncUpdateDelay = d if err != nil {
utils.Fatalf("invalid environment variable %s: %v", SWARM_ENV_SYNC_UPDATE_DELAY, err)
} }
currentConfig.SyncUpdateDelay = d
}
if max := os.Getenv(SWARM_ENV_MAX_STREAM_PEER_SERVERS); max != "" {
m, err := strconv.Atoi(max)
if err != nil {
utils.Fatalf("invalid environment variable %s: %v", SWARM_ENV_MAX_STREAM_PEER_SERVERS, err)
}
currentConfig.MaxStreamPeerServers = m
} }
if lne := os.Getenv(SWARM_ENV_LIGHT_NODE_ENABLE); lne != "" { if lne := os.Getenv(SWARM_ENV_LIGHT_NODE_ENABLE); lne != "" {
if lightnode, err := strconv.ParseBool(lne); err != nil { lightnode, err := strconv.ParseBool(lne)
currentConfig.LightNodeEnabled = lightnode if err != nil {
utils.Fatalf("invalid environment variable %s: %v", SWARM_ENV_LIGHT_NODE_ENABLE, err)
} }
currentConfig.LightNodeEnabled = lightnode
} }
if swapapi := os.Getenv(SWARM_ENV_SWAP_API); swapapi != "" { if swapapi := os.Getenv(SWARM_ENV_SWAP_API); swapapi != "" {

View File

@ -93,21 +93,6 @@ func dbImport(ctx *cli.Context) {
log.Info(fmt.Sprintf("successfully imported %d chunks", count)) log.Info(fmt.Sprintf("successfully imported %d chunks", count))
} }
func dbClean(ctx *cli.Context) {
args := ctx.Args()
if len(args) != 2 {
utils.Fatalf("invalid arguments, please specify <chunkdb> (path to a local chunk database) and the base key")
}
store, err := openLDBStore(args[0], common.Hex2Bytes(args[1]))
if err != nil {
utils.Fatalf("error opening local chunk database: %s", err)
}
defer store.Close()
store.Cleanup()
}
func openLDBStore(path string, basekey []byte) (*storage.LDBStore, error) { func openLDBStore(path string, basekey []byte) (*storage.LDBStore, error) {
if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil { if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil {
return nil, fmt.Errorf("invalid chunkdb path: %s", err) return nil, fmt.Errorf("invalid chunkdb path: %s", err)

View File

@ -0,0 +1,172 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
// Command feed allows the user to create and update signed Swarm feeds
package main
import (
"fmt"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/cmd/utils"
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
"github.com/ethereum/go-ethereum/swarm/storage/feed"
"gopkg.in/urfave/cli.v1"
)
func NewGenericSigner(ctx *cli.Context) feed.Signer {
return feed.NewGenericSigner(getPrivKey(ctx))
}
func getTopic(ctx *cli.Context) (topic feed.Topic) {
var name = ctx.String(SwarmFeedNameFlag.Name)
var relatedTopic = ctx.String(SwarmFeedTopicFlag.Name)
var relatedTopicBytes []byte
var err error
if relatedTopic != "" {
relatedTopicBytes, err = hexutil.Decode(relatedTopic)
if err != nil {
utils.Fatalf("Error parsing topic: %s", err)
}
}
topic, err = feed.NewTopic(name, relatedTopicBytes)
if err != nil {
utils.Fatalf("Error parsing topic: %s", err)
}
return topic
}
// swarm feed create <frequency> [--name <name>] [--data <0x Hexdata> [--multihash=false]]
// swarm feed update <Manifest Address or ENS domain> <0x Hexdata> [--multihash=false]
// swarm feed info <Manifest Address or ENS domain>
func feedCreateManifest(ctx *cli.Context) {
var (
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
client = swarm.NewClient(bzzapi)
)
newFeedUpdateRequest := feed.NewFirstRequest(getTopic(ctx))
newFeedUpdateRequest.Feed.User = feedGetUser(ctx)
manifestAddress, err := client.CreateFeedWithManifest(newFeedUpdateRequest)
if err != nil {
utils.Fatalf("Error creating feed manifest: %s", err.Error())
return
}
fmt.Println(manifestAddress) // output manifest address to the user in a single line (useful for other commands to pick up)
}
func feedUpdate(ctx *cli.Context) {
args := ctx.Args()
var (
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
client = swarm.NewClient(bzzapi)
manifestAddressOrDomain = ctx.String(SwarmFeedManifestFlag.Name)
)
if len(args) < 1 {
fmt.Println("Incorrect number of arguments")
cli.ShowCommandHelpAndExit(ctx, "update", 1)
return
}
signer := NewGenericSigner(ctx)
data, err := hexutil.Decode(args[0])
if err != nil {
utils.Fatalf("Error parsing data: %s", err.Error())
return
}
var updateRequest *feed.Request
var query *feed.Query
if manifestAddressOrDomain == "" {
query = new(feed.Query)
query.User = signer.Address()
query.Topic = getTopic(ctx)
}
// Retrieve a feed update request
updateRequest, err = client.GetFeedRequest(query, manifestAddressOrDomain)
if err != nil {
utils.Fatalf("Error retrieving feed status: %s", err.Error())
}
// set the new data
updateRequest.SetData(data)
// sign update
if err = updateRequest.Sign(signer); err != nil {
utils.Fatalf("Error signing feed update: %s", err.Error())
}
// post update
err = client.UpdateFeed(updateRequest)
if err != nil {
utils.Fatalf("Error updating feed: %s", err.Error())
return
}
}
func feedInfo(ctx *cli.Context) {
var (
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
client = swarm.NewClient(bzzapi)
manifestAddressOrDomain = ctx.String(SwarmFeedManifestFlag.Name)
)
var query *feed.Query
if manifestAddressOrDomain == "" {
query = new(feed.Query)
query.Topic = getTopic(ctx)
query.User = feedGetUser(ctx)
}
metadata, err := client.GetFeedRequest(query, manifestAddressOrDomain)
if err != nil {
utils.Fatalf("Error retrieving feed metadata: %s", err.Error())
return
}
encodedMetadata, err := metadata.MarshalJSON()
if err != nil {
utils.Fatalf("Error encoding metadata to JSON for display:%s", err)
}
fmt.Println(string(encodedMetadata))
}
func feedGetUser(ctx *cli.Context) common.Address {
var user = ctx.String(SwarmFeedUserFlag.Name)
if user != "" {
return common.HexToAddress(user)
}
pk := getPrivKey(ctx)
if pk == nil {
utils.Fatalf("Cannot read private key. Must specify --user or --bzzaccount")
}
return crypto.PubkeyToAddress(pk.PublicKey)
}

View File

@ -38,7 +38,7 @@ import (
"github.com/ethereum/go-ethereum/internal/debug" "github.com/ethereum/go-ethereum/internal/debug"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/swarm" "github.com/ethereum/go-ethereum/swarm"
bzzapi "github.com/ethereum/go-ethereum/swarm/api" bzzapi "github.com/ethereum/go-ethereum/swarm/api"
swarmmetrics "github.com/ethereum/go-ethereum/swarm/metrics" swarmmetrics "github.com/ethereum/go-ethereum/swarm/metrics"
@ -116,6 +116,12 @@ var (
Usage: "Duration for sync subscriptions update after no new peers are added (default 15s)", Usage: "Duration for sync subscriptions update after no new peers are added (default 15s)",
EnvVar: SWARM_ENV_SYNC_UPDATE_DELAY, EnvVar: SWARM_ENV_SYNC_UPDATE_DELAY,
} }
SwarmMaxStreamPeerServersFlag = cli.IntFlag{
Name: "max-stream-peer-servers",
Usage: "Limit of Stream peer servers, 0 denotes unlimited",
EnvVar: SWARM_ENV_MAX_STREAM_PEER_SERVERS,
Value: 10000, // A very large default value is possible as stream servers have very small memory footprint
}
SwarmLightNodeEnabled = cli.BoolFlag{ SwarmLightNodeEnabled = cli.BoolFlag{
Name: "lightnode", Name: "lightnode",
Usage: "Enable Swarm LightNode (default false)", Usage: "Enable Swarm LightNode (default false)",
@ -197,22 +203,30 @@ var (
Usage: "Number of recent chunks cached in memory (default 5000)", Usage: "Number of recent chunks cached in memory (default 5000)",
EnvVar: SWARM_ENV_STORE_CACHE_CAPACITY, EnvVar: SWARM_ENV_STORE_CACHE_CAPACITY,
} }
SwarmResourceMultihashFlag = cli.BoolFlag{
Name: "multihash",
Usage: "Determines how to interpret data for a resource update. If not present, data will be interpreted as raw, literal data that will be included in the resource",
}
SwarmResourceNameFlag = cli.StringFlag{
Name: "name",
Usage: "User-defined name for the new resource",
}
SwarmResourceDataOnCreateFlag = cli.StringFlag{
Name: "data",
Usage: "Initializes the resource with the given hex-encoded data. Data must be prefixed by 0x",
}
SwarmCompressedFlag = cli.BoolFlag{ SwarmCompressedFlag = cli.BoolFlag{
Name: "compressed", Name: "compressed",
Usage: "Prints encryption keys in compressed form", Usage: "Prints encryption keys in compressed form",
} }
SwarmFeedNameFlag = cli.StringFlag{
Name: "name",
Usage: "User-defined name for the new feed, limited to 32 characters. If combined with topic, it will refer to a subtopic with this name",
}
SwarmFeedTopicFlag = cli.StringFlag{
Name: "topic",
Usage: "User-defined topic this feed is tracking, hex encoded. Limited to 64 hexadecimal characters",
}
SwarmFeedDataOnCreateFlag = cli.StringFlag{
Name: "data",
Usage: "Initializes the feed with the given hex-encoded data. Data must be prefixed by 0x",
}
SwarmFeedManifestFlag = cli.StringFlag{
Name: "manifest",
Usage: "Refers to the feed through a manifest",
}
SwarmFeedUserFlag = cli.StringFlag{
Name: "user",
Usage: "Indicates the user who updates the feed",
}
) )
//declare a few constant error messages, useful for later error check comparisons in test //declare a few constant error messages, useful for later error check comparisons in test
@ -242,12 +256,12 @@ func init() {
utils.ListenPortFlag.Value = 30399 utils.ListenPortFlag.Value = 30399
} }
var app = utils.NewApp(gitCommit, "Ethereum Swarm") var app = utils.NewApp("", "Ethereum Swarm")
// This init function creates the cli.App. // This init function creates the cli.App.
func init() { func init() {
app.Action = bzzd app.Action = bzzd
app.HideVersion = true // we have a command to print the version app.Version = sv.ArchiveVersion(gitCommit)
app.Copyright = "Copyright 2013-2016 The go-ethereum Authors" app.Copyright = "Copyright 2013-2016 The go-ethereum Authors"
app.Commands = []cli.Command{ app.Commands = []cli.Command{
{ {
@ -332,36 +346,62 @@ func init() {
}, },
{ {
CustomHelpTemplate: helpTemplate, CustomHelpTemplate: helpTemplate,
Name: "resource", Name: "feed",
Usage: "(Advanced) Create and update Mutable Resources", Usage: "(Advanced) Create and update Swarm Feeds",
ArgsUsage: "<create|update|info>", ArgsUsage: "<create|update|info>",
Description: "Works with Mutable Resource Updates", Description: "Works with Swarm Feeds",
Subcommands: []cli.Command{ Subcommands: []cli.Command{
{ {
Action: resourceCreate, Action: feedCreateManifest,
CustomHelpTemplate: helpTemplate, CustomHelpTemplate: helpTemplate,
Name: "create", Name: "create",
Usage: "creates a new Mutable Resource", Usage: "creates and publishes a new feed manifest",
ArgsUsage: "<frequency>", Description: `creates and publishes a new feed manifest pointing to a specified user's updates about a particular topic.
Description: "creates a new Mutable Resource", The feed topic can be built in the following ways:
Flags: []cli.Flag{SwarmResourceNameFlag, SwarmResourceDataOnCreateFlag, SwarmResourceMultihashFlag}, * use --topic to set the topic to an arbitrary binary hex string.
* use --name to set the topic to a human-readable name.
For example --name could be set to "profile-picture", meaning this feed allows to get this user's current profile picture.
* use both --topic and --name to create named subtopics.
For example, --topic could be set to an Ethereum contract address and --name could be set to "comments", meaning
this feed tracks a discussion about that contract.
The --user flag allows to have this manifest refer to a user other than yourself. If not specified,
it will then default to your local account (--bzzaccount)`,
Flags: []cli.Flag{SwarmFeedNameFlag, SwarmFeedTopicFlag, SwarmFeedUserFlag},
}, },
{ {
Action: resourceUpdate, Action: feedUpdate,
CustomHelpTemplate: helpTemplate, CustomHelpTemplate: helpTemplate,
Name: "update", Name: "update",
Usage: "updates the content of an existing Mutable Resource", Usage: "updates the content of an existing Swarm Feed",
ArgsUsage: "<Manifest Address or ENS domain> <0x Hex data>", ArgsUsage: "<0x Hex data>",
Description: "updates the content of an existing Mutable Resource", Description: `publishes a new update on the specified topic
Flags: []cli.Flag{SwarmResourceMultihashFlag}, The feed topic can be built in the following ways:
* use --topic to set the topic to an arbitrary binary hex string.
* use --name to set the topic to a human-readable name.
For example --name could be set to "profile-picture", meaning this feed allows to get this user's current profile picture.
* use both --topic and --name to create named subtopics.
For example, --topic could be set to an Ethereum contract address and --name could be set to "comments", meaning
this feed tracks a discussion about that contract.
If you have a manifest, you can specify it with --manifest to refer to the feed,
instead of using --topic / --name
`,
Flags: []cli.Flag{SwarmFeedManifestFlag, SwarmFeedNameFlag, SwarmFeedTopicFlag},
}, },
{ {
Action: resourceInfo, Action: feedInfo,
CustomHelpTemplate: helpTemplate, CustomHelpTemplate: helpTemplate,
Name: "info", Name: "info",
Usage: "obtains information about an existing Mutable Resource", Usage: "obtains information about an existing Swarm feed",
ArgsUsage: "<Manifest Address or ENS domain>", Description: `obtains information about an existing Swarm feed
Description: "obtains information about an existing Mutable Resource", The topic can be specified directly with the --topic flag as an hex string
If no topic is specified, the default topic (zero) will be used
The --name flag can be used to specify subtopics with a specific name.
The --user flag allows to refer to a user other than yourself. If not specified,
it will then default to your local account (--bzzaccount)
If you have a manifest, you can specify it with --manifest instead of --topic / --name / ---user
to refer to the feed`,
Flags: []cli.Flag{SwarmFeedManifestFlag, SwarmFeedNameFlag, SwarmFeedTopicFlag, SwarmFeedUserFlag},
}, },
}, },
}, },
@ -497,14 +537,6 @@ pv(1) tool to get a progress bar:
pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -`, pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -`,
}, },
{
Action: dbClean,
CustomHelpTemplate: helpTemplate,
Name: "clean",
Usage: "remove corrupt entries from a local chunk database",
ArgsUsage: "<chunkdb>",
Description: "Remove corrupt entries from a local chunk database",
},
}, },
}, },
@ -542,6 +574,7 @@ pv(1) tool to get a progress bar:
SwarmSwapAPIFlag, SwarmSwapAPIFlag,
SwarmSyncDisabledFlag, SwarmSyncDisabledFlag,
SwarmSyncUpdateDelay, SwarmSyncUpdateDelay,
SwarmMaxStreamPeerServersFlag,
SwarmLightNodeEnabled, SwarmLightNodeEnabled,
SwarmDeliverySkipCheckFlag, SwarmDeliverySkipCheckFlag,
SwarmListenAddrFlag, SwarmListenAddrFlag,
@ -697,7 +730,7 @@ func getAccount(bzzaccount string, ctx *cli.Context, stack *node.Node) *ecdsa.Pr
} }
// getPrivKey returns the private key of the specified bzzaccount // getPrivKey returns the private key of the specified bzzaccount
// Used only by client commands, such as `resource` // Used only by client commands, such as `feed`
func getPrivKey(ctx *cli.Context) *ecdsa.PrivateKey { func getPrivKey(ctx *cli.Context) *ecdsa.PrivateKey {
// booting up the swarm node just as we do in bzzd action // booting up the swarm node just as we do in bzzd action
bzzconfig, err := buildConfig(ctx) bzzconfig, err := buildConfig(ctx)
@ -788,10 +821,10 @@ func setSwarmBootstrapNodes(ctx *cli.Context, cfg *node.Config) {
return return
} }
cfg.P2P.BootstrapNodes = []*discover.Node{} cfg.P2P.BootstrapNodes = []*enode.Node{}
for _, url := range SwarmBootnodes { for _, url := range SwarmBootnodes {
node, err := discover.ParseNode(url) node, err := enode.ParseV4(url)
if err != nil { if err != nil {
log.Error("Bootstrap URL invalid", "enode", url, "err", err) log.Error("Bootstrap URL invalid", "enode", url, "err", err)
} }

View File

@ -0,0 +1,124 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
// Standard "mime" package rely on system-settings, see mime.osInitMime
// Swarm will run on many OS/Platform/Docker and must behave similar
// This command generates code to add common mime types based on mime.types file
//
// mime.types file provided by mailcap, which follow https://www.iana.org/assignments/media-types/media-types.xhtml
//
// Get last version of mime.types file by:
// docker run --rm -v $(pwd):/tmp alpine:edge /bin/sh -c "apk add -U mailcap; mv /etc/mime.types /tmp"
import (
"bufio"
"bytes"
"flag"
"html/template"
"io/ioutil"
"strings"
"log"
)
var (
typesFlag = flag.String("types", "", "Input mime.types file")
packageFlag = flag.String("package", "", "Golang package in output file")
outFlag = flag.String("out", "", "Output file name for the generated mime types")
)
type mime struct {
Name string
Exts []string
}
type templateParams struct {
PackageName string
Mimes []mime
}
func main() {
// Parse and ensure all needed inputs are specified
flag.Parse()
if *typesFlag == "" {
log.Fatalf("--types is required")
}
if *packageFlag == "" {
log.Fatalf("--types is required")
}
if *outFlag == "" {
log.Fatalf("--out is required")
}
params := templateParams{
PackageName: *packageFlag,
}
types, err := ioutil.ReadFile(*typesFlag)
if err != nil {
log.Fatal(err)
}
scanner := bufio.NewScanner(bytes.NewReader(types))
for scanner.Scan() {
txt := scanner.Text()
if strings.HasPrefix(txt, "#") || len(txt) == 0 {
continue
}
parts := strings.Fields(txt)
if len(parts) == 1 {
continue
}
params.Mimes = append(params.Mimes, mime{parts[0], parts[1:]})
}
if err = scanner.Err(); err != nil {
log.Fatal(err)
}
result := bytes.NewBuffer([]byte{})
if err := template.Must(template.New("_").Parse(tpl)).Execute(result, params); err != nil {
log.Fatal(err)
}
if err := ioutil.WriteFile(*outFlag, result.Bytes(), 0600); err != nil {
log.Fatal(err)
}
}
var tpl = `// Code generated by github.com/ethereum/go-ethereum/cmd/swarm/mimegen. DO NOT EDIT.
package {{ .PackageName }}
import "mime"
func init() {
var mimeTypes = map[string]string{
{{- range .Mimes -}}
{{ $name := .Name -}}
{{- range .Exts }}
".{{ . }}": "{{ $name | html }}",
{{- end }}
{{- end }}
}
for ext, name := range mimeTypes {
if err := mime.AddExtensionType(ext, name); err != nil {
panic(err)
}
}
}
`

File diff suppressed because it is too large Load Diff

View File

@ -1,169 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
// Command resource allows the user to create and update signed mutable resource updates
package main
import (
"fmt"
"strconv"
"strings"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/cmd/utils"
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
"github.com/ethereum/go-ethereum/swarm/storage/mru"
"gopkg.in/urfave/cli.v1"
)
func NewGenericSigner(ctx *cli.Context) mru.Signer {
return mru.NewGenericSigner(getPrivKey(ctx))
}
// swarm resource create <frequency> [--name <name>] [--data <0x Hexdata> [--multihash=false]]
// swarm resource update <Manifest Address or ENS domain> <0x Hexdata> [--multihash=false]
// swarm resource info <Manifest Address or ENS domain>
func resourceCreate(ctx *cli.Context) {
args := ctx.Args()
var (
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
client = swarm.NewClient(bzzapi)
multihash = ctx.Bool(SwarmResourceMultihashFlag.Name)
initialData = ctx.String(SwarmResourceDataOnCreateFlag.Name)
name = ctx.String(SwarmResourceNameFlag.Name)
)
if len(args) < 1 {
fmt.Println("Incorrect number of arguments")
cli.ShowCommandHelpAndExit(ctx, "create", 1)
return
}
signer := NewGenericSigner(ctx)
frequency, err := strconv.ParseUint(args[0], 10, 64)
if err != nil {
fmt.Printf("Frequency formatting error: %s\n", err.Error())
cli.ShowCommandHelpAndExit(ctx, "create", 1)
return
}
metadata := mru.ResourceMetadata{
Name: name,
Frequency: frequency,
Owner: signer.Address(),
}
var newResourceRequest *mru.Request
if initialData != "" {
initialDataBytes, err := hexutil.Decode(initialData)
if err != nil {
fmt.Printf("Error parsing data: %s\n", err.Error())
cli.ShowCommandHelpAndExit(ctx, "create", 1)
return
}
newResourceRequest, err = mru.NewCreateUpdateRequest(&metadata)
if err != nil {
utils.Fatalf("Error creating new resource request: %s", err)
}
newResourceRequest.SetData(initialDataBytes, multihash)
if err = newResourceRequest.Sign(signer); err != nil {
utils.Fatalf("Error signing resource update: %s", err.Error())
}
} else {
newResourceRequest, err = mru.NewCreateRequest(&metadata)
if err != nil {
utils.Fatalf("Error creating new resource request: %s", err)
}
}
manifestAddress, err := client.CreateResource(newResourceRequest)
if err != nil {
utils.Fatalf("Error creating resource: %s", err.Error())
return
}
fmt.Println(manifestAddress) // output manifest address to the user in a single line (useful for other commands to pick up)
}
func resourceUpdate(ctx *cli.Context) {
args := ctx.Args()
var (
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
client = swarm.NewClient(bzzapi)
multihash = ctx.Bool(SwarmResourceMultihashFlag.Name)
)
if len(args) < 2 {
fmt.Println("Incorrect number of arguments")
cli.ShowCommandHelpAndExit(ctx, "update", 1)
return
}
signer := NewGenericSigner(ctx)
manifestAddressOrDomain := args[0]
data, err := hexutil.Decode(args[1])
if err != nil {
utils.Fatalf("Error parsing data: %s", err.Error())
return
}
// Retrieve resource status and metadata out of the manifest
updateRequest, err := client.GetResourceMetadata(manifestAddressOrDomain)
if err != nil {
utils.Fatalf("Error retrieving resource status: %s", err.Error())
}
// set the new data
updateRequest.SetData(data, multihash)
// sign update
if err = updateRequest.Sign(signer); err != nil {
utils.Fatalf("Error signing resource update: %s", err.Error())
}
// post update
err = client.UpdateResource(updateRequest)
if err != nil {
utils.Fatalf("Error updating resource: %s", err.Error())
return
}
}
func resourceInfo(ctx *cli.Context) {
var (
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
client = swarm.NewClient(bzzapi)
)
args := ctx.Args()
if len(args) < 1 {
fmt.Println("Incorrect number of arguments.")
cli.ShowCommandHelpAndExit(ctx, "info", 1)
return
}
manifestAddressOrDomain := args[0]
metadata, err := client.GetResourceMetadata(manifestAddressOrDomain)
if err != nil {
utils.Fatalf("Error retrieving resource metadata: %s", err.Error())
return
}
encodedMetadata, err := metadata.MarshalJSON()
if err != nil {
utils.Fatalf("Error encoding metadata to JSON for display:%s", err)
}
fmt.Println(string(encodedMetadata))
}

View File

@ -22,16 +22,15 @@ import (
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"mime"
"net/http"
"os" "os"
"os/user" "os/user"
"path" "path"
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/ethereum/go-ethereum/cmd/utils"
swarm "github.com/ethereum/go-ethereum/swarm/api/client" swarm "github.com/ethereum/go-ethereum/swarm/api/client"
"github.com/ethereum/go-ethereum/cmd/utils"
"gopkg.in/urfave/cli.v1" "gopkg.in/urfave/cli.v1"
) )
@ -118,10 +117,9 @@ func upload(ctx *cli.Context) {
return "", fmt.Errorf("error opening file: %s", err) return "", fmt.Errorf("error opening file: %s", err)
} }
defer f.Close() defer f.Close()
if mimeType == "" { if mimeType != "" {
mimeType = detectMimeType(file) f.ContentType = mimeType
} }
f.ContentType = mimeType
return client.Upload(f, "", toEncrypt) return client.Upload(f, "", toEncrypt)
} }
} }
@ -138,6 +136,12 @@ func upload(ctx *cli.Context) {
// 3. cleans the path, e.g. /a/b/../c -> /a/c // 3. cleans the path, e.g. /a/b/../c -> /a/c
// Note, it has limitations, e.g. ~someuser/tmp will not be expanded // Note, it has limitations, e.g. ~someuser/tmp will not be expanded
func expandPath(p string) string { func expandPath(p string) string {
if i := strings.Index(p, ":"); i > 0 {
return p
}
if i := strings.Index(p, "@"); i > 0 {
return p
}
if strings.HasPrefix(p, "~/") || strings.HasPrefix(p, "~\\") { if strings.HasPrefix(p, "~/") || strings.HasPrefix(p, "~\\") {
if home := homeDir(); home != "" { if home := homeDir(); home != "" {
p = home + p[1:] p = home + p[1:]
@ -155,19 +159,3 @@ func homeDir() string {
} }
return "" return ""
} }
func detectMimeType(file string) string {
if ext := filepath.Ext(file); ext != "" {
return mime.TypeByExtension(ext)
}
f, err := os.Open(file)
if err != nil {
return ""
}
defer f.Close()
buf := make([]byte, 512)
if n, _ := f.Read(buf); n > 0 {
return http.DetectContentType(buf)
}
return ""
}

View File

@ -52,8 +52,8 @@ import (
"github.com/ethereum/go-ethereum/metrics/influxdb" "github.com/ethereum/go-ethereum/metrics/influxdb"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/discv5" "github.com/ethereum/go-ethereum/p2p/discv5"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/p2p/netutil" "github.com/ethereum/go-ethereum/p2p/netutil"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
@ -710,9 +710,9 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
return // already set, don't apply defaults. return // already set, don't apply defaults.
} }
cfg.BootstrapNodes = make([]*discover.Node, 0, len(urls)) cfg.BootstrapNodes = make([]*enode.Node, 0, len(urls))
for _, url := range urls { for _, url := range urls {
node, err := discover.ParseNode(url) node, err := enode.ParseV4(url)
if err != nil { if err != nil {
log.Crit("Bootstrap URL invalid", "enode", url, "err", err) log.Crit("Bootstrap URL invalid", "enode", url, "err", err)
} }
@ -1137,11 +1137,14 @@ func checkExclusive(ctx *cli.Context, args ...interface{}) {
if i+1 < len(args) { if i+1 < len(args) {
switch option := args[i+1].(type) { switch option := args[i+1].(type) {
case string: case string:
// Extended flag, expand the name and shift the arguments // Extended flag check, make sure value set doesn't conflict with passed in option
if ctx.GlobalString(flag.GetName()) == option { if ctx.GlobalString(flag.GetName()) == option {
name += "=" + option name += "=" + option
set = append(set, "--"+name)
} }
// shift arguments and continue
i++ i++
continue
case cli.Flag: case cli.Flag:
default: default:

View File

@ -41,7 +41,7 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/whisper/mailserver" "github.com/ethereum/go-ethereum/whisper/mailserver"
whisper "github.com/ethereum/go-ethereum/whisper/whisperv6" whisper "github.com/ethereum/go-ethereum/whisper/whisperv6"
@ -175,7 +175,7 @@ func initialize() {
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*argVerbosity), log.StreamHandler(os.Stderr, log.TerminalFormat(false)))) log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*argVerbosity), log.StreamHandler(os.Stderr, log.TerminalFormat(false))))
done = make(chan struct{}) done = make(chan struct{})
var peers []*discover.Node var peers []*enode.Node
var err error var err error
if *generateKey { if *generateKey {
@ -203,7 +203,7 @@ func initialize() {
if len(*argEnode) == 0 { if len(*argEnode) == 0 {
argEnode = scanLineA("Please enter the peer's enode: ") argEnode = scanLineA("Please enter the peer's enode: ")
} }
peer := discover.MustParseNode(*argEnode) peer := enode.MustParseV4(*argEnode)
peers = append(peers, peer) peers = append(peers, peer)
} }
@ -747,11 +747,11 @@ func requestExpiredMessagesLoop() {
} }
func extractIDFromEnode(s string) []byte { func extractIDFromEnode(s string) []byte {
n, err := discover.ParseNode(s) n, err := enode.ParseV4(s)
if err != nil { if err != nil {
utils.Fatalf("Failed to parse enode: %s", err) utils.Fatalf("Failed to parse enode: %s", err)
} }
return n.ID[:] return n.ID().Bytes()
} }
// obfuscateBloom adds 16 random bits to the bloom // obfuscateBloom adds 16 random bits to the bloom

View File

@ -151,6 +151,38 @@ func (self *ENS) Resolve(name string) (common.Hash, error) {
return common.BytesToHash(ret[:]), nil return common.BytesToHash(ret[:]), nil
} }
// Addr is a non-transactional call that returns the address associated with a name.
func (self *ENS) Addr(name string) (common.Address, error) {
node := EnsNode(name)
resolver, err := self.getResolver(node)
if err != nil {
return common.Address{}, err
}
ret, err := resolver.Addr(node)
if err != nil {
return common.Address{}, err
}
return common.BytesToAddress(ret[:]), nil
}
// SetAddress sets the address associated with a name. Only works if the caller
// owns the name, and the associated resolver implements a `setAddress` function.
func (self *ENS) SetAddr(name string, addr common.Address) (*types.Transaction, error) {
node := EnsNode(name)
resolver, err := self.getResolver(node)
if err != nil {
return nil, err
}
opts := self.TransactOpts
opts.GasLimit = 200000
return resolver.Contract.SetAddr(&opts, node, addr)
}
// Register registers a new domain name for the caller, making them the owner of the new name. // Register registers a new domain name for the caller, making them the owner of the new name.
// Only works if the registrar for the parent domain implements the FIFS registrar protocol. // Only works if the registrar for the parent domain implements the FIFS registrar protocol.
func (self *ENS) Register(name string) (*types.Transaction, error) { func (self *ENS) Register(name string) (*types.Transaction, error) {

View File

@ -109,9 +109,9 @@ func PrintDisassembled(code string) error {
it := NewInstructionIterator(script) it := NewInstructionIterator(script)
for it.Next() { for it.Next() {
if it.Arg() != nil && 0 < len(it.Arg()) { if it.Arg() != nil && 0 < len(it.Arg()) {
fmt.Printf("%06v: %v 0x%x\n", it.PC(), it.Op(), it.Arg()) fmt.Printf("%05x: %v 0x%x\n", it.PC(), it.Op(), it.Arg())
} else { } else {
fmt.Printf("%06v: %v\n", it.PC(), it.Op()) fmt.Printf("%05x: %v\n", it.PC(), it.Op())
} }
} }
return it.Error() return it.Error()
@ -124,9 +124,9 @@ func Disassemble(script []byte) ([]string, error) {
it := NewInstructionIterator(script) it := NewInstructionIterator(script)
for it.Next() { for it.Next() {
if it.Arg() != nil && 0 < len(it.Arg()) { if it.Arg() != nil && 0 < len(it.Arg()) {
instrs = append(instrs, fmt.Sprintf("%06v: %v 0x%x\n", it.PC(), it.Op(), it.Arg())) instrs = append(instrs, fmt.Sprintf("%05x: %v 0x%x\n", it.PC(), it.Op(), it.Arg()))
} else { } else {
instrs = append(instrs, fmt.Sprintf("%06v: %v\n", it.PC(), it.Op())) instrs = append(instrs, fmt.Sprintf("%05x: %v\n", it.PC(), it.Op()))
} }
} }
if err := it.Error(); err != nil { if err := it.Error(); err != nil {

View File

@ -55,6 +55,7 @@ var (
const ( const (
bodyCacheLimit = 256 bodyCacheLimit = 256
blockCacheLimit = 256 blockCacheLimit = 256
receiptsCacheLimit = 32
maxFutureBlocks = 256 maxFutureBlocks = 256
maxTimeFutureBlocks = 30 maxTimeFutureBlocks = 30
badBlockLimit = 10 badBlockLimit = 10
@ -111,11 +112,12 @@ type BlockChain struct {
currentBlock atomic.Value // Current head of the block chain currentBlock atomic.Value // Current head of the block chain
currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!) currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!)
stateCache state.Database // State database to reuse between imports (contains state cache) stateCache state.Database // State database to reuse between imports (contains state cache)
bodyCache *lru.Cache // Cache for the most recent block bodies bodyCache *lru.Cache // Cache for the most recent block bodies
bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format
blockCache *lru.Cache // Cache for the most recent entire blocks receiptsCache *lru.Cache // Cache for the most recent receipts per block
futureBlocks *lru.Cache // future blocks are blocks added for later processing blockCache *lru.Cache // Cache for the most recent entire blocks
futureBlocks *lru.Cache // future blocks are blocks added for later processing
quit chan struct{} // blockchain quit channel quit chan struct{} // blockchain quit channel
running int32 // running must be called atomically running int32 // running must be called atomically
@ -144,6 +146,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
} }
bodyCache, _ := lru.New(bodyCacheLimit) bodyCache, _ := lru.New(bodyCacheLimit)
bodyRLPCache, _ := lru.New(bodyCacheLimit) bodyRLPCache, _ := lru.New(bodyCacheLimit)
receiptsCache, _ := lru.New(receiptsCacheLimit)
blockCache, _ := lru.New(blockCacheLimit) blockCache, _ := lru.New(blockCacheLimit)
futureBlocks, _ := lru.New(maxFutureBlocks) futureBlocks, _ := lru.New(maxFutureBlocks)
badBlocks, _ := lru.New(badBlockLimit) badBlocks, _ := lru.New(badBlockLimit)
@ -158,6 +161,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
shouldPreserve: shouldPreserve, shouldPreserve: shouldPreserve,
bodyCache: bodyCache, bodyCache: bodyCache,
bodyRLPCache: bodyRLPCache, bodyRLPCache: bodyRLPCache,
receiptsCache: receiptsCache,
blockCache: blockCache, blockCache: blockCache,
futureBlocks: futureBlocks, futureBlocks: futureBlocks,
engine: engine, engine: engine,
@ -280,6 +284,7 @@ func (bc *BlockChain) SetHead(head uint64) error {
// Clear out any stale content from the caches // Clear out any stale content from the caches
bc.bodyCache.Purge() bc.bodyCache.Purge()
bc.bodyRLPCache.Purge() bc.bodyRLPCache.Purge()
bc.receiptsCache.Purge()
bc.blockCache.Purge() bc.blockCache.Purge()
bc.futureBlocks.Purge() bc.futureBlocks.Purge()
@ -603,11 +608,18 @@ func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
// GetReceiptsByHash retrieves the receipts for all transactions in a given block. // GetReceiptsByHash retrieves the receipts for all transactions in a given block.
func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts { func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
if receipts, ok := bc.receiptsCache.Get(hash); ok {
return receipts.(types.Receipts)
}
number := rawdb.ReadHeaderNumber(bc.db, hash) number := rawdb.ReadHeaderNumber(bc.db, hash)
if number == nil { if number == nil {
return nil return nil
} }
return rawdb.ReadReceipts(bc.db, hash, *number)
receipts := rawdb.ReadReceipts(bc.db, hash, *number)
bc.receiptsCache.Add(hash, receipts)
return receipts
} }
// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors. // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.

View File

@ -53,14 +53,14 @@ type ChainIndexerChain interface {
// CurrentHeader retrieves the latest locally known header. // CurrentHeader retrieves the latest locally known header.
CurrentHeader() *types.Header CurrentHeader() *types.Header
// SubscribeChainEvent subscribes to new head header notifications. // SubscribeChainHeadEvent subscribes to new head header notifications.
SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription
} }
// ChainIndexer does a post-processing job for equally sized sections of the // ChainIndexer does a post-processing job for equally sized sections of the
// canonical chain (like BlooomBits and CHT structures). A ChainIndexer is // canonical chain (like BlooomBits and CHT structures). A ChainIndexer is
// connected to the blockchain through the event system by starting a // connected to the blockchain through the event system by starting a
// ChainEventLoop in a goroutine. // ChainHeadEventLoop in a goroutine.
// //
// Further child ChainIndexers can be added which use the output of the parent // Further child ChainIndexers can be added which use the output of the parent
// section indexer. These child indexers receive new head notifications only // section indexer. These child indexers receive new head notifications only
@ -142,8 +142,8 @@ func (c *ChainIndexer) AddCheckpoint(section uint64, shead common.Hash) {
// cascading background processing. Children do not need to be started, they // cascading background processing. Children do not need to be started, they
// are notified about new events by their parents. // are notified about new events by their parents.
func (c *ChainIndexer) Start(chain ChainIndexerChain) { func (c *ChainIndexer) Start(chain ChainIndexerChain) {
events := make(chan ChainEvent, 10) events := make(chan ChainHeadEvent, 10)
sub := chain.SubscribeChainEvent(events) sub := chain.SubscribeChainHeadEvent(events)
go c.eventLoop(chain.CurrentHeader(), events, sub) go c.eventLoop(chain.CurrentHeader(), events, sub)
} }
@ -190,7 +190,7 @@ func (c *ChainIndexer) Close() error {
// eventLoop is a secondary - optional - event loop of the indexer which is only // eventLoop is a secondary - optional - event loop of the indexer which is only
// started for the outermost indexer to push chain head events into a processing // started for the outermost indexer to push chain head events into a processing
// queue. // queue.
func (c *ChainIndexer) eventLoop(currentHeader *types.Header, events chan ChainEvent, sub event.Subscription) { func (c *ChainIndexer) eventLoop(currentHeader *types.Header, events chan ChainHeadEvent, sub event.Subscription) {
// Mark the chain indexer as active, requiring an additional teardown // Mark the chain indexer as active, requiring an additional teardown
atomic.StoreUint32(&c.active, 1) atomic.StoreUint32(&c.active, 1)
@ -219,13 +219,13 @@ func (c *ChainIndexer) eventLoop(currentHeader *types.Header, events chan ChainE
} }
header := ev.Block.Header() header := ev.Block.Header()
if header.ParentHash != prevHash { if header.ParentHash != prevHash {
// Reorg to the common ancestor (might not exist in light sync mode, skip reorg then) // Reorg to the common ancestor if needed (might not exist in light sync mode, skip reorg then)
// TODO(karalabe, zsfelfoldi): This seems a bit brittle, can we detect this case explicitly? // TODO(karalabe, zsfelfoldi): This seems a bit brittle, can we detect this case explicitly?
// TODO(karalabe): This operation is expensive and might block, causing the event system to if rawdb.ReadCanonicalHash(c.chainDb, prevHeader.Number.Uint64()) != prevHash {
// potentially also lock up. We need to do with on a different thread somehow. if h := rawdb.FindCommonAncestor(c.chainDb, prevHeader, header); h != nil {
if h := rawdb.FindCommonAncestor(c.chainDb, prevHeader, header); h != nil { c.newHead(h.Number.Uint64(), true)
c.newHead(h.Number.Uint64(), true) }
} }
} }
c.newHead(header.Number.Uint64(), false) c.newHead(header.Number.Uint64(), false)

View File

@ -47,7 +47,7 @@ type Log struct {
TxIndex uint `json:"transactionIndex" gencodec:"required"` TxIndex uint `json:"transactionIndex" gencodec:"required"`
// hash of the block in which the transaction was included // hash of the block in which the transaction was included
BlockHash common.Hash `json:"blockHash"` BlockHash common.Hash `json:"blockHash"`
// index of the log in the receipt // index of the log in the block
Index uint `json:"logIndex" gencodec:"required"` Index uint `json:"logIndex" gencodec:"required"`
// The Removed field is true if this log was reverted due to a chain reorganisation. // The Removed field is true if this log was reverted due to a chain reorganisation.

View File

@ -153,16 +153,21 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
if err := dec.UnmarshalJSON(input); err != nil { if err := dec.UnmarshalJSON(input); err != nil {
return err return err
} }
var V byte
if isProtectedV(dec.V) { withSignature := dec.V.Sign() != 0 || dec.R.Sign() != 0 || dec.S.Sign() != 0
chainID := deriveChainId(dec.V).Uint64() if withSignature {
V = byte(dec.V.Uint64() - 35 - 2*chainID) var V byte
} else { if isProtectedV(dec.V) {
V = byte(dec.V.Uint64() - 27) chainID := deriveChainId(dec.V).Uint64()
} V = byte(dec.V.Uint64() - 35 - 2*chainID)
if !crypto.ValidateSignatureValues(V, dec.R, dec.S, false) { } else {
return ErrInvalidSig V = byte(dec.V.Uint64() - 27)
}
if !crypto.ValidateSignatureValues(V, dec.R, dec.S, false) {
return ErrInvalidSig
}
} }
*tx = Transaction{data: dec} *tx = Transaction{data: dec}
return nil return nil
} }

View File

@ -227,13 +227,13 @@ func recoverPlain(sighash common.Hash, R, S, Vb *big.Int, homestead bool) (commo
if !crypto.ValidateSignatureValues(V, R, S, homestead) { if !crypto.ValidateSignatureValues(V, R, S, homestead) {
return common.Address{}, ErrInvalidSig return common.Address{}, ErrInvalidSig
} }
// encode the snature in uncompressed format // encode the signature in uncompressed format
r, s := R.Bytes(), S.Bytes() r, s := R.Bytes(), S.Bytes()
sig := make([]byte, 65) sig := make([]byte, 65)
copy(sig[32-len(r):32], r) copy(sig[32-len(r):32], r)
copy(sig[64-len(s):64], s) copy(sig[64-len(s):64], s)
sig[64] = V sig[64] = V
// recover the public key from the snature // recover the public key from the signature
pub, err := crypto.Ecrecover(sighash[:], sig) pub, err := crypto.Ecrecover(sighash[:], sig)
if err != nil { if err != nil {
return common.Address{}, err return common.Address{}, err

View File

@ -16,34 +16,6 @@
package vm package vm
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
)
// destinations stores one map per contract (keyed by hash of code).
// The maps contain an entry for each location of a JUMPDEST
// instruction.
type destinations map[common.Hash]bitvec
// has checks whether code has a JUMPDEST at dest.
func (d destinations) has(codehash common.Hash, code []byte, dest *big.Int) bool {
// PC cannot go beyond len(code) and certainly can't be bigger than 63bits.
// Don't bother checking for JUMPDEST in that case.
udest := dest.Uint64()
if dest.BitLen() >= 63 || udest >= uint64(len(code)) {
return false
}
m, analysed := d[codehash]
if !analysed {
m = codeBitmap(code)
d[codehash] = m
}
return OpCode(code[udest]) == JUMPDEST && m.codeSegment(udest)
}
// bitvec is a bit vector which maps bytes in a program. // bitvec is a bit vector which maps bytes in a program.
// An unset bit means the byte is an opcode, a set bit means // An unset bit means the byte is an opcode, a set bit means
// it's data (i.e. argument of PUSHxx). // it's data (i.e. argument of PUSHxx).

View File

@ -49,7 +49,8 @@ type Contract struct {
caller ContractRef caller ContractRef
self ContractRef self ContractRef
jumpdests destinations // result of JUMPDEST analysis. jumpdests map[common.Hash]bitvec // Aggregated result of JUMPDEST analysis.
analysis bitvec // Locally cached result of JUMPDEST analysis
Code []byte Code []byte
CodeHash common.Hash CodeHash common.Hash
@ -58,21 +59,17 @@ type Contract struct {
Gas uint64 Gas uint64
value *big.Int value *big.Int
Args []byte
DelegateCall bool
} }
// NewContract returns a new contract environment for the execution of EVM. // NewContract returns a new contract environment for the execution of EVM.
func NewContract(caller ContractRef, object ContractRef, value *big.Int, gas uint64) *Contract { func NewContract(caller ContractRef, object ContractRef, value *big.Int, gas uint64) *Contract {
c := &Contract{CallerAddress: caller.Address(), caller: caller, self: object, Args: nil} c := &Contract{CallerAddress: caller.Address(), caller: caller, self: object}
if parent, ok := caller.(*Contract); ok { if parent, ok := caller.(*Contract); ok {
// Reuse JUMPDEST analysis from parent context if available. // Reuse JUMPDEST analysis from parent context if available.
c.jumpdests = parent.jumpdests c.jumpdests = parent.jumpdests
} else { } else {
c.jumpdests = make(destinations) c.jumpdests = make(map[common.Hash]bitvec)
} }
// Gas should be a pointer so it can safely be reduced through the run // Gas should be a pointer so it can safely be reduced through the run
@ -84,10 +81,42 @@ func NewContract(caller ContractRef, object ContractRef, value *big.Int, gas uin
return c return c
} }
func (c *Contract) validJumpdest(dest *big.Int) bool {
udest := dest.Uint64()
// PC cannot go beyond len(code) and certainly can't be bigger than 63bits.
// Don't bother checking for JUMPDEST in that case.
if dest.BitLen() >= 63 || udest >= uint64(len(c.Code)) {
return false
}
// Only JUMPDESTs allowed for destinations
if OpCode(c.Code[udest]) != JUMPDEST {
return false
}
// Do we have a contract hash already?
if c.CodeHash != (common.Hash{}) {
// Does parent context have the analysis?
analysis, exist := c.jumpdests[c.CodeHash]
if !exist {
// Do the analysis and save in parent context
// We do not need to store it in c.analysis
analysis = codeBitmap(c.Code)
c.jumpdests[c.CodeHash] = analysis
}
return analysis.codeSegment(udest)
}
// We don't have the code hash, most likely a piece of initcode not already
// in state trie. In that case, we do an analysis, and save it locally, so
// we don't have to recalculate it for every JUMP instruction in the execution
// However, we don't save it within the parent context
if c.analysis == nil {
c.analysis = codeBitmap(c.Code)
}
return c.analysis.codeSegment(udest)
}
// AsDelegate sets the contract to be a delegate call and returns the current // AsDelegate sets the contract to be a delegate call and returns the current
// contract (for chaining calls) // contract (for chaining calls)
func (c *Contract) AsDelegate() *Contract { func (c *Contract) AsDelegate() *Contract {
c.DelegateCall = true
// NOTE: caller must, at all times be a contract. It should never happen // NOTE: caller must, at all times be a contract. It should never happen
// that caller is something other than a Contract. // that caller is something other than a Contract.
parent := c.caller.(*Contract) parent := c.caller.(*Contract)
@ -138,12 +167,6 @@ func (c *Contract) Value() *big.Int {
return c.value return c.value
} }
// SetCode sets the code to the contract
func (c *Contract) SetCode(hash common.Hash, code []byte) {
c.Code = code
c.CodeHash = hash
}
// SetCallCode sets the code of the contract and address of the backing data // SetCallCode sets the code of the contract and address of the backing data
// object // object
func (c *Contract) SetCallCode(addr *common.Address, hash common.Hash, code []byte) { func (c *Contract) SetCallCode(addr *common.Address, hash common.Hash, code []byte) {
@ -151,3 +174,11 @@ func (c *Contract) SetCallCode(addr *common.Address, hash common.Hash, code []by
c.CodeHash = hash c.CodeHash = hash
c.CodeAddr = addr c.CodeAddr = addr
} }
// SetCodeOptionalHash can be used to provide code, but it's optional to provide hash.
// In case hash is not provided, the jumpdest analysis will not be saved to the parent context
func (c *Contract) SetCodeOptionalHash(addr *common.Address, codeAndHash *codeAndHash) {
c.Code = codeAndHash.code
c.CodeHash = codeAndHash.hash
c.CodeAddr = addr
}

View File

@ -212,12 +212,12 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
evm.StateDB.CreateAccount(addr) evm.StateDB.CreateAccount(addr)
} }
evm.Transfer(evm.StateDB, caller.Address(), to.Address(), value) evm.Transfer(evm.StateDB, caller.Address(), to.Address(), value)
// Initialise a new contract and set the code that is to be used by the EVM. // Initialise a new contract and set the code that is to be used by the EVM.
// The contract is a scoped environment for this execution context only. // The contract is a scoped environment for this execution context only.
contract := NewContract(caller, to, value, gas) contract := NewContract(caller, to, value, gas)
contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr)) contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr))
// Even if the account has no code, we need to continue because it might be a precompile
start := time.Now() start := time.Now()
// Capture the tracer start/end events in debug mode // Capture the tracer start/end events in debug mode
@ -352,8 +352,20 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte
return ret, contract.Gas, err return ret, contract.Gas, err
} }
type codeAndHash struct {
code []byte
hash common.Hash
}
func (c *codeAndHash) Hash() common.Hash {
if c.hash == (common.Hash{}) {
c.hash = crypto.Keccak256Hash(c.code)
}
return c.hash
}
// create creates a new contract using code as deployment code. // create creates a new contract using code as deployment code.
func (evm *EVM) create(caller ContractRef, code []byte, gas uint64, value *big.Int, address common.Address) ([]byte, common.Address, uint64, error) { func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, value *big.Int, address common.Address) ([]byte, common.Address, uint64, error) {
// Depth check execution. Fail if we're trying to execute above the // Depth check execution. Fail if we're trying to execute above the
// limit. // limit.
if evm.depth > int(params.CallCreateDepth) { if evm.depth > int(params.CallCreateDepth) {
@ -382,14 +394,14 @@ func (evm *EVM) create(caller ContractRef, code []byte, gas uint64, value *big.I
// EVM. The contract is a scoped environment for this execution context // EVM. The contract is a scoped environment for this execution context
// only. // only.
contract := NewContract(caller, AccountRef(address), value, gas) contract := NewContract(caller, AccountRef(address), value, gas)
contract.SetCallCode(&address, crypto.Keccak256Hash(code), code) contract.SetCodeOptionalHash(&address, codeAndHash)
if evm.vmConfig.NoRecursion && evm.depth > 0 { if evm.vmConfig.NoRecursion && evm.depth > 0 {
return nil, address, gas, nil return nil, address, gas, nil
} }
if evm.vmConfig.Debug && evm.depth == 0 { if evm.vmConfig.Debug && evm.depth == 0 {
evm.vmConfig.Tracer.CaptureStart(caller.Address(), address, true, code, gas, value) evm.vmConfig.Tracer.CaptureStart(caller.Address(), address, true, codeAndHash.code, gas, value)
} }
start := time.Now() start := time.Now()
@ -433,7 +445,7 @@ func (evm *EVM) create(caller ContractRef, code []byte, gas uint64, value *big.I
// Create creates a new contract using code as deployment code. // Create creates a new contract using code as deployment code.
func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) { func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) {
contractAddr = crypto.CreateAddress(caller.Address(), evm.StateDB.GetNonce(caller.Address())) contractAddr = crypto.CreateAddress(caller.Address(), evm.StateDB.GetNonce(caller.Address()))
return evm.create(caller, code, gas, value, contractAddr) return evm.create(caller, &codeAndHash{code: code}, gas, value, contractAddr)
} }
// Create2 creates a new contract using code as deployment code. // Create2 creates a new contract using code as deployment code.
@ -441,8 +453,9 @@ func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.I
// The different between Create2 with Create is Create2 uses sha3(0xff ++ msg.sender ++ salt ++ sha3(init_code))[12:] // The different between Create2 with Create is Create2 uses sha3(0xff ++ msg.sender ++ salt ++ sha3(init_code))[12:]
// instead of the usual sender-and-nonce-hash as the address where the contract is initialized at. // instead of the usual sender-and-nonce-hash as the address where the contract is initialized at.
func (evm *EVM) Create2(caller ContractRef, code []byte, gas uint64, endowment *big.Int, salt *big.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) { func (evm *EVM) Create2(caller ContractRef, code []byte, gas uint64, endowment *big.Int, salt *big.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) {
contractAddr = crypto.CreateAddress2(caller.Address(), common.BigToHash(salt), code) codeAndHash := &codeAndHash{code: code}
return evm.create(caller, code, gas, endowment, contractAddr) contractAddr = crypto.CreateAddress2(caller.Address(), common.BigToHash(salt), codeAndHash.Hash().Bytes())
return evm.create(caller, codeAndHash, gas, endowment, contractAddr)
} }
// ChainConfig returns the environment's chain configuration // ChainConfig returns the environment's chain configuration

View File

@ -347,6 +347,17 @@ func gasCreate2(gt params.GasTable, evm *EVM, contract *Contract, stack *Stack,
if gas, overflow = math.SafeAdd(gas, params.Create2Gas); overflow { if gas, overflow = math.SafeAdd(gas, params.Create2Gas); overflow {
return 0, errGasUintOverflow return 0, errGasUintOverflow
} }
wordGas, overflow := bigUint64(stack.Back(2))
if overflow {
return 0, errGasUintOverflow
}
if wordGas, overflow = math.SafeMul(toWordSize(wordGas), params.Sha3WordGas); overflow {
return 0, errGasUintOverflow
}
if gas, overflow = math.SafeAdd(gas, wordGas); overflow {
return 0, errGasUintOverflow
}
return gas, nil return gas, nil
} }

View File

@ -24,7 +24,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
) )
@ -373,13 +373,20 @@ func opSAR(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *
func opSha3(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { func opSha3(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
offset, size := stack.pop(), stack.pop() offset, size := stack.pop(), stack.pop()
data := memory.Get(offset.Int64(), size.Int64()) data := memory.Get(offset.Int64(), size.Int64())
hash := crypto.Keccak256(data)
evm := interpreter.evm
if evm.vmConfig.EnablePreimageRecording { if interpreter.hasher == nil {
evm.StateDB.AddPreimage(common.BytesToHash(hash), data) interpreter.hasher = sha3.NewKeccak256().(keccakState)
} else {
interpreter.hasher.Reset()
} }
stack.push(interpreter.intPool.get().SetBytes(hash)) interpreter.hasher.Write(data)
interpreter.hasher.Read(interpreter.hasherBuf[:])
evm := interpreter.evm
if evm.vmConfig.EnablePreimageRecording {
evm.StateDB.AddPreimage(interpreter.hasherBuf, data)
}
stack.push(interpreter.intPool.get().SetBytes(interpreter.hasherBuf[:]))
interpreter.intPool.put(offset, size) interpreter.intPool.put(offset, size)
return nil, nil return nil, nil
@ -620,7 +627,7 @@ func opSstore(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memor
func opJump(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { func opJump(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
pos := stack.pop() pos := stack.pop()
if !contract.jumpdests.has(contract.CodeHash, contract.Code, pos) { if !contract.validJumpdest(pos) {
nop := contract.GetOp(pos.Uint64()) nop := contract.GetOp(pos.Uint64())
return nil, fmt.Errorf("invalid jump destination (%v) %v", nop, pos) return nil, fmt.Errorf("invalid jump destination (%v) %v", nop, pos)
} }
@ -633,7 +640,7 @@ func opJump(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory
func opJumpi(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { func opJumpi(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
pos, cond := stack.pop(), stack.pop() pos, cond := stack.pop(), stack.pop()
if cond.Sign() != 0 { if cond.Sign() != 0 {
if !contract.jumpdests.has(contract.CodeHash, contract.Code, pos) { if !contract.validJumpdest(pos) {
nop := contract.GetOp(pos.Uint64()) nop := contract.GetOp(pos.Uint64())
return nil, fmt.Errorf("invalid jump destination (%v) %v", nop, pos) return nil, fmt.Errorf("invalid jump destination (%v) %v", nop, pos)
} }
@ -727,7 +734,7 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memo
} }
func opCall(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { func opCall(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
// Pop gas. The actual gas in in interpreter.evm.callGasTemp. // Pop gas. The actual gas in interpreter.evm.callGasTemp.
interpreter.intPool.put(stack.pop()) interpreter.intPool.put(stack.pop())
gas := interpreter.evm.callGasTemp gas := interpreter.evm.callGasTemp
// Pop other call parameters. // Pop other call parameters.

View File

@ -18,8 +18,10 @@ package vm
import ( import (
"fmt" "fmt"
"hash"
"sync/atomic" "sync/atomic"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
) )
@ -68,12 +70,24 @@ type Interpreter interface {
CanRun([]byte) bool CanRun([]byte) bool
} }
// keccakState wraps sha3.state. In addition to the usual hash methods, it also supports
// Read to get a variable amount of data from the hash state. Read is faster than Sum
// because it doesn't copy the internal state, but also modifies the internal state.
type keccakState interface {
hash.Hash
Read([]byte) (int, error)
}
// EVMInterpreter represents an EVM interpreter // EVMInterpreter represents an EVM interpreter
type EVMInterpreter struct { type EVMInterpreter struct {
evm *EVM evm *EVM
cfg Config cfg Config
gasTable params.GasTable gasTable params.GasTable
intPool *intPool
intPool *intPool
hasher keccakState // Keccak256 hasher instance shared across opcodes
hasherBuf common.Hash // Keccak256 hasher result array shared aross opcodes
readOnly bool // Whether to throw on stateful modifications readOnly bool // Whether to throw on stateful modifications
returnData []byte // Last CALL's return data for subsequent reuse returnData []byte // Last CALL's return data for subsequent reuse

View File

@ -29,7 +29,7 @@ type Memory struct {
lastGasCost uint64 lastGasCost uint64
} }
// NewMemory returns a new memory memory model. // NewMemory returns a new memory model.
func NewMemory() *Memory { func NewMemory() *Memory {
return &Memory{} return &Memory{}
} }

View File

@ -77,9 +77,9 @@ func CreateAddress(b common.Address, nonce uint64) common.Address {
} }
// CreateAddress2 creates an ethereum address given the address bytes, initial // CreateAddress2 creates an ethereum address given the address bytes, initial
// contract code and a salt. // contract code hash and a salt.
func CreateAddress2(b common.Address, salt [32]byte, code []byte) common.Address { func CreateAddress2(b common.Address, salt [32]byte, inithash []byte) common.Address {
return common.BytesToAddress(Keccak256([]byte{0xff}, b.Bytes(), salt[:], Keccak256(code))[12:]) return common.BytesToAddress(Keccak256([]byte{0xff}, b.Bytes(), salt[:], inithash)[12:])
} }
// ToECDSA creates a private key with the given D value. // ToECDSA creates a private key with the given D value.

View File

@ -54,7 +54,7 @@ static void secp256k1_num_mul(secp256k1_num *r, const secp256k1_num *a, const se
even if r was negative. */ even if r was negative. */
static void secp256k1_num_mod(secp256k1_num *r, const secp256k1_num *m); static void secp256k1_num_mod(secp256k1_num *r, const secp256k1_num *m);
/** Right-shift the passed number by bits bits. */ /** Right-shift the passed number by bits. */
static void secp256k1_num_shift(secp256k1_num *r, int bits); static void secp256k1_num_shift(secp256k1_num *r, int bits);
/** Check whether a number is zero. */ /** Check whether a number is zero. */

View File

@ -67,6 +67,15 @@ func (api *PublicEthereumAPI) Hashrate() hexutil.Uint64 {
return hexutil.Uint64(api.e.Miner().HashRate()) return hexutil.Uint64(api.e.Miner().HashRate())
} }
// ChainId is the EIP-155 replay-protection chain id for the current ethereum chain config.
func (api *PublicEthereumAPI) ChainId() hexutil.Uint64 {
chainID := new(big.Int)
if config := api.e.chainConfig; config.IsEIP155(api.e.blockchain.CurrentBlock().Number()) {
chainID = config.ChainID
}
return (hexutil.Uint64)(chainID.Uint64())
}
// PublicMinerAPI provides an API to control the miner. // PublicMinerAPI provides an API to control the miner.
// It offers only methods that operate on data that pose no security risk when it is publicly accessible. // It offers only methods that operate on data that pose no security risk when it is publicly accessible.
type PublicMinerAPI struct { type PublicMinerAPI struct {

View File

@ -25,7 +25,6 @@ import (
"github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/bloombits" "github.com/ethereum/go-ethereum/core/bloombits"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
@ -107,18 +106,11 @@ func (b *EthAPIBackend) GetBlock(ctx context.Context, hash common.Hash) (*types.
} }
func (b *EthAPIBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { func (b *EthAPIBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
if number := rawdb.ReadHeaderNumber(b.eth.chainDb, hash); number != nil { return b.eth.blockchain.GetReceiptsByHash(hash), nil
return rawdb.ReadReceipts(b.eth.chainDb, hash, *number), nil
}
return nil, nil
} }
func (b *EthAPIBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) { func (b *EthAPIBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) {
number := rawdb.ReadHeaderNumber(b.eth.chainDb, hash) receipts := b.eth.blockchain.GetReceiptsByHash(hash)
if number == nil {
return nil, nil
}
receipts := rawdb.ReadReceipts(b.eth.chainDb, hash, *number)
if receipts == nil { if receipts == nil {
return nil, nil return nil, nil
} }

View File

@ -391,6 +391,15 @@ func (api *PrivateDebugAPI) TraceBlockFromFile(ctx context.Context, file string,
return api.TraceBlock(ctx, blob, config) return api.TraceBlock(ctx, blob, config)
} }
// TraceBadBlock returns the structured logs created during the execution of a block
// within the blockchain 'badblocks' cache
func (api *PrivateDebugAPI) TraceBadBlock(ctx context.Context, index int, config *TraceConfig) ([]*txTraceResult, error) {
if blocks := api.eth.blockchain.BadBlocks(); index < len(blocks) {
return api.traceBlock(ctx, blocks[index], config)
}
return nil, fmt.Errorf("index out of range")
}
// traceBlock configures a new tracer according to the provided configuration, and // traceBlock configures a new tracer according to the provided configuration, and
// executes all the transactions contained within. The return value will be one item // executes all the transactions contained within. The return value will be one item
// per transaction, dependent on the requestd tracer. // per transaction, dependent on the requestd tracer.

View File

@ -60,6 +60,9 @@ var (
maxHeadersProcess = 2048 // Number of header download results to import at once into the chain maxHeadersProcess = 2048 // Number of header download results to import at once into the chain
maxResultsProcess = 2048 // Number of content download results to import at once into the chain maxResultsProcess = 2048 // Number of content download results to import at once into the chain
reorgProtThreshold = 48 // Threshold number of recent blocks to disable mini reorg protection
reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs
fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync
fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected
fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it
@ -674,8 +677,10 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err
continue continue
} }
// Otherwise check if we already know the header or not // Otherwise check if we already know the header or not
if (d.mode == FullSync && d.blockchain.HasBlock(headers[i].Hash(), headers[i].Number.Uint64())) || (d.mode != FullSync && d.lightchain.HasHeader(headers[i].Hash(), headers[i].Number.Uint64())) { h := headers[i].Hash()
number, hash = headers[i].Number.Uint64(), headers[i].Hash() n := headers[i].Number.Uint64()
if (d.mode == FullSync && d.blockchain.HasBlock(h, n)) || (d.mode != FullSync && d.lightchain.HasHeader(h, n)) {
number, hash = n, h
// If every header is known, even future ones, the peer straight out lied about its head // If every header is known, even future ones, the peer straight out lied about its head
if number > height && i == limit-1 { if number > height && i == limit-1 {
@ -739,11 +744,13 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err
arrived = true arrived = true
// Modify the search interval based on the response // Modify the search interval based on the response
if (d.mode == FullSync && !d.blockchain.HasBlock(headers[0].Hash(), headers[0].Number.Uint64())) || (d.mode != FullSync && !d.lightchain.HasHeader(headers[0].Hash(), headers[0].Number.Uint64())) { h := headers[0].Hash()
n := headers[0].Number.Uint64()
if (d.mode == FullSync && !d.blockchain.HasBlock(h, n)) || (d.mode != FullSync && !d.lightchain.HasHeader(h, n)) {
end = check end = check
break break
} }
header := d.lightchain.GetHeaderByHash(headers[0].Hash()) // Independent of sync mode, header surely exists header := d.lightchain.GetHeaderByHash(h) // Independent of sync mode, header surely exists
if header.Number.Uint64() != check { if header.Number.Uint64() != check {
p.log.Debug("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check) p.log.Debug("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check)
return 0, errBadPeer return 0, errBadPeer
@ -859,6 +866,30 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, pivot uint64)
} }
headers = filled[proced:] headers = filled[proced:]
from += uint64(proced) from += uint64(proced)
} else {
// If we're closing in on the chain head, but haven't yet reached it, delay
// the last few headers so mini reorgs on the head don't cause invalid hash
// chain errors.
if n := len(headers); n > 0 {
// Retrieve the current head we're at
head := uint64(0)
if d.mode == LightSync {
head = d.lightchain.CurrentHeader().Number.Uint64()
} else {
head = d.blockchain.CurrentFastBlock().NumberU64()
if full := d.blockchain.CurrentBlock().NumberU64(); head < full {
head = full
}
}
// If the head is way older than this batch, delay the last few headers
if head+uint64(reorgProtThreshold) < headers[n-1].Number.Uint64() {
delay := reorgProtHeaderDelay
if delay > n {
delay = n
}
headers = headers[:n-delay]
}
}
} }
// Insert all the new headers and fetch the next batch // Insert all the new headers and fetch the next batch
if len(headers) > 0 { if len(headers) > 0 {
@ -869,8 +900,18 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, pivot uint64)
return errCancelHeaderFetch return errCancelHeaderFetch
} }
from += uint64(len(headers)) from += uint64(len(headers))
getHeaders(from)
} else {
// No headers delivered, or all of them being delayed, sleep a bit and retry
p.log.Trace("All headers delayed, waiting")
select {
case <-time.After(fsHeaderContCheck):
getHeaders(from)
continue
case <-d.cancelCh:
return errCancelHeaderFetch
}
} }
getHeaders(from)
case <-timeout.C: case <-timeout.C:
if d.dropPeer == nil { if d.dropPeer == nil {

View File

@ -53,6 +53,8 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.NoPruning = c.NoPruning enc.NoPruning = c.NoPruning
enc.LightServ = c.LightServ enc.LightServ = c.LightServ
enc.LightPeers = c.LightPeers enc.LightPeers = c.LightPeers
enc.OnlyAnnounce = c.OnlyAnnounce
enc.ULC = c.ULC
enc.SkipBcVersionCheck = c.SkipBcVersionCheck enc.SkipBcVersionCheck = c.SkipBcVersionCheck
enc.DatabaseHandles = c.DatabaseHandles enc.DatabaseHandles = c.DatabaseHandles
enc.DatabaseCache = c.DatabaseCache enc.DatabaseCache = c.DatabaseCache

View File

@ -37,7 +37,7 @@ import (
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
) )
@ -49,6 +49,9 @@ const (
// txChanSize is the size of channel listening to NewTxsEvent. // txChanSize is the size of channel listening to NewTxsEvent.
// The number is referenced from the size of tx pool. // The number is referenced from the size of tx pool.
txChanSize = 4096 txChanSize = 4096
// minimim number of peers to broadcast new blocks to
minBroadcastPeers = 4
) )
var ( var (
@ -147,7 +150,7 @@ func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, ne
NodeInfo: func() interface{} { NodeInfo: func() interface{} {
return manager.NodeInfo() return manager.NodeInfo()
}, },
PeerInfo: func(id discover.NodeID) interface{} { PeerInfo: func(id enode.ID) interface{} {
if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil { if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
return p.Info() return p.Info()
} }
@ -708,7 +711,14 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
return return
} }
// Send the block to a subset of our peers // Send the block to a subset of our peers
transfer := peers[:int(math.Sqrt(float64(len(peers))))] transferLen := int(math.Sqrt(float64(len(peers))))
if transferLen < minBroadcastPeers {
transferLen = minBroadcastPeers
}
if transferLen > len(peers) {
transferLen = len(peers)
}
transfer := peers[:transferLen]
for _, peer := range transfer { for _, peer := range transfer {
peer.AsyncSendNewBlock(block, td) peer.AsyncSendNewBlock(block, td)
} }

View File

@ -37,7 +37,7 @@ const (
// ProtocolName is the official short name of the protocol used during capability negotiation. // ProtocolName is the official short name of the protocol used during capability negotiation.
var ProtocolName = "eth" var ProtocolName = "eth"
// ProtocolVersions are the upported versions of the eth protocol (first is primary). // ProtocolVersions are the supported versions of the eth protocol (first is primary).
var ProtocolVersions = []uint{eth63, eth62} var ProtocolVersions = []uint{eth63, eth62}
// ProtocolLengths are the number of implemented message corresponding to different protocol versions. // ProtocolLengths are the number of implemented message corresponding to different protocol versions.

View File

@ -25,7 +25,7 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/enode"
) )
const ( const (
@ -64,7 +64,7 @@ func (pm *ProtocolManager) syncTransactions(p *peer) {
// the transactions in small packs to one peer at a time. // the transactions in small packs to one peer at a time.
func (pm *ProtocolManager) txsyncLoop() { func (pm *ProtocolManager) txsyncLoop() {
var ( var (
pending = make(map[discover.NodeID]*txsync) pending = make(map[enode.ID]*txsync)
sending = false // whether a send is active sending = false // whether a send is active
pack = new(txsync) // the pack that is being sent pack = new(txsync) // the pack that is being sent
done = make(chan error, 1) // result of the send done = make(chan error, 1) // result of the send

View File

@ -25,11 +25,11 @@ import (
"runtime" "runtime"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/log/term"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/metrics/exp" "github.com/ethereum/go-ethereum/metrics/exp"
"github.com/fjl/memsize/memsizeui" "github.com/fjl/memsize/memsizeui"
colorable "github.com/mattn/go-colorable" colorable "github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
"gopkg.in/urfave/cli.v1" "gopkg.in/urfave/cli.v1"
) )
@ -101,7 +101,7 @@ var (
) )
func init() { func init() {
usecolor := term.IsTty(os.Stderr.Fd()) && os.Getenv("TERM") != "dumb" usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb"
output := io.Writer(os.Stderr) output := io.Writer(os.Stderr)
if usecolor { if usecolor {
output = colorable.NewColorableStderr() output = colorable.NewColorableStderr()

View File

@ -457,7 +457,7 @@ func (s *PrivateAccountAPI) Sign(ctx context.Context, data hexutil.Bytes, addr c
// addr = ecrecover(hash, signature) // addr = ecrecover(hash, signature)
// //
// Note, the signature must conform to the secp256k1 curve R, S and V values, where // Note, the signature must conform to the secp256k1 curve R, S and V values, where
// the V value must be be 27 or 28 for legacy reasons. // the V value must be 27 or 28 for legacy reasons.
// //
// https://github.com/ethereum/go-ethereum/wiki/Management-APIs#personal_ecRecover // https://github.com/ethereum/go-ethereum/wiki/Management-APIs#personal_ecRecover
func (s *PrivateAccountAPI) EcRecover(ctx context.Context, data, sig hexutil.Bytes) (common.Address, error) { func (s *PrivateAccountAPI) EcRecover(ctx context.Context, data, sig hexutil.Bytes) (common.Address, error) {

View File

@ -1021,7 +1021,7 @@ var formatOutputInt = function (param) {
var value = param.staticPart() || "0"; var value = param.staticPart() || "0";
// check if it's negative number // check if it's negative number
// it it is, return two's complement // it is, return two's complement
if (signedIsNegative(value)) { if (signedIsNegative(value)) {
return new BigNumber(value, 16).minus(new BigNumber('ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff', 16)).minus(1); return new BigNumber(value, 16).minus(new BigNumber('ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff', 16)).minus(1);
} }
@ -2250,7 +2250,7 @@ var isAddress = function (address) {
// check if it has the basic requirements of an address // check if it has the basic requirements of an address
return false; return false;
} else if (/^(0x)?[0-9a-f]{40}$/.test(address) || /^(0x)?[0-9A-F]{40}$/.test(address)) { } else if (/^(0x)?[0-9a-f]{40}$/.test(address) || /^(0x)?[0-9A-F]{40}$/.test(address)) {
// If it's all small caps or all all caps, return true // If it's all small caps or all caps, return true
return true; return true;
} else { } else {
// Otherwise check each case // Otherwise check each case

View File

@ -378,6 +378,12 @@ web3._extend({
params: 2, params: 2,
inputFormatter: [null, null] inputFormatter: [null, null]
}), }),
new web3._extend.Method({
name: 'traceBadBlock',
call: 'debug_traceBadBlock',
params: 1,
inputFormatter: [null]
}),
new web3._extend.Method({ new web3._extend.Method({
name: 'traceBlockByNumber', name: 'traceBlockByNumber',
call: 'debug_traceBlockByNumber', call: 'debug_traceBlockByNumber',
@ -433,6 +439,11 @@ const Eth_JS = `
web3._extend({ web3._extend({
property: 'eth', property: 'eth',
methods: [ methods: [
new web3._extend.Method({
name: 'chainId',
call: 'eth_chainId',
params: 0
}),
new web3._extend.Method({ new web3._extend.Method({
name: 'sign', name: 'sign',
call: 'eth_sign', call: 'eth_sign',

View File

@ -20,6 +20,7 @@ package les
import ( import (
"fmt" "fmt"
"sync" "sync"
"time"
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -161,6 +162,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
} }
if leth.protocolManager.isULCEnabled() { if leth.protocolManager.isULCEnabled() {
log.Warn("Ultra light client is enabled", "trustedNodes", len(leth.protocolManager.ulc.trustedKeys), "minTrustedFraction", leth.protocolManager.ulc.minTrustedFraction)
leth.blockchain.DisableCheckFreq() leth.blockchain.DisableCheckFreq()
} }
leth.ApiBackend = &LesApiBackend{leth, nil} leth.ApiBackend = &LesApiBackend{leth, nil}
@ -279,6 +281,7 @@ func (s *LightEthereum) Stop() error {
s.eventMux.Stop() s.eventMux.Stop()
time.Sleep(time.Millisecond * 200)
s.chainDb.Close() s.chainDb.Close()
close(s.shutdownChan) close(s.shutdownChan)

View File

@ -26,7 +26,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
) )
@ -63,7 +63,7 @@ func (c *lesCommons) makeProtocols(versions []uint) []p2p.Protocol {
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error { Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
return c.protocolManager.runPeer(version, p, rw) return c.protocolManager.runPeer(version, p, rw)
}, },
PeerInfo: func(id discover.NodeID) interface{} { PeerInfo: func(id enode.ID) interface{} {
if p := c.protocolManager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil { if p := c.protocolManager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
return p.Info() return p.Info()
} }

View File

@ -32,8 +32,9 @@ import (
) )
const ( const (
blockDelayTimeout = time.Second * 10 // timeout for a peer to announce a head that has already been confirmed by others blockDelayTimeout = time.Second * 10 // timeout for a peer to announce a head that has already been confirmed by others
maxNodeCount = 20 // maximum number of fetcherTreeNode entries remembered for each peer maxNodeCount = 20 // maximum number of fetcherTreeNode entries remembered for each peer
serverStateAvailable = 100 // number of recent blocks where state availability is assumed
) )
// lightFetcher implements retrieval of newly announced headers. It also provides a peerHasBlock function for the // lightFetcher implements retrieval of newly announced headers. It also provides a peerHasBlock function for the
@ -155,6 +156,7 @@ func (f *lightFetcher) syncLoop() {
if !f.syncing && !(newAnnounce && s) { if !f.syncing && !(newAnnounce && s) {
rq, reqID = f.nextRequest() rq, reqID = f.nextRequest()
} }
syncing := f.syncing syncing := f.syncing
f.lock.Unlock() f.lock.Unlock()
@ -217,7 +219,7 @@ func (f *lightFetcher) syncLoop() {
res, h, td := f.checkSyncedHeaders(p) res, h, td := f.checkSyncedHeaders(p)
f.syncing = false f.syncing = false
if res { if res {
f.newHeaders(h, []*big.Int{td}) f.newHeaders([]*types.Header{h}, []*big.Int{td})
} }
f.lock.Unlock() f.lock.Unlock()
} }
@ -227,8 +229,8 @@ func (f *lightFetcher) syncLoop() {
// registerPeer adds a new peer to the fetcher's peer set // registerPeer adds a new peer to the fetcher's peer set
func (f *lightFetcher) registerPeer(p *peer) { func (f *lightFetcher) registerPeer(p *peer) {
p.lock.Lock() p.lock.Lock()
p.hasBlock = func(hash common.Hash, number uint64) bool { p.hasBlock = func(hash common.Hash, number uint64, hasState bool) bool {
return f.peerHasBlock(p, hash, number) return f.peerHasBlock(p, hash, number, hasState)
} }
p.lock.Unlock() p.lock.Unlock()
@ -358,21 +360,27 @@ func (f *lightFetcher) announce(p *peer, head *announceData) {
// peerHasBlock returns true if we can assume the peer knows the given block // peerHasBlock returns true if we can assume the peer knows the given block
// based on its announcements // based on its announcements
func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64) bool { func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64, hasState bool) bool {
f.lock.Lock() f.lock.Lock()
defer f.lock.Unlock() defer f.lock.Unlock()
fp := f.peers[p]
if fp == nil || fp.root == nil {
return false
}
if hasState {
if fp.lastAnnounced == nil || fp.lastAnnounced.number > number+serverStateAvailable {
return false
}
}
if f.syncing { if f.syncing {
// always return true when syncing // always return true when syncing
// false positives are acceptable, a more sophisticated condition can be implemented later // false positives are acceptable, a more sophisticated condition can be implemented later
return true return true
} }
fp := f.peers[p]
if fp == nil || fp.root == nil {
return false
}
if number >= fp.root.number { if number >= fp.root.number {
// it is recent enough that if it is known, is should be in the peer's block tree // it is recent enough that if it is known, is should be in the peer's block tree
return fp.nodeByHash[hash] != nil return fp.nodeByHash[hash] != nil
@ -450,7 +458,7 @@ func (f *lightFetcher) findBestValues() (bestHash common.Hash, bestAmount uint64
//if ulc mode is disabled, isTrustedHash returns true //if ulc mode is disabled, isTrustedHash returns true
amount := f.requestAmount(p, n) amount := f.requestAmount(p, n)
if (bestTd == nil || n.td.Cmp(bestTd) > 0 || amount < bestAmount) && f.isTrustedHash(hash) { if (bestTd == nil || n.td.Cmp(bestTd) > 0 || amount < bestAmount) && (f.isTrustedHash(hash) || f.maxConfirmedTd.Int64() == 0) {
bestHash = hash bestHash = hash
bestTd = n.td bestTd = n.td
bestAmount = amount bestAmount = amount
@ -689,13 +697,16 @@ func (f *lightFetcher) checkAnnouncedHeaders(fp *fetcherPeerInfo, headers []*typ
// checkSyncedHeaders updates peer's block tree after synchronisation by marking // checkSyncedHeaders updates peer's block tree after synchronisation by marking
// downloaded headers as known. If none of the announced headers are found after // downloaded headers as known. If none of the announced headers are found after
// syncing, the peer is dropped. // syncing, the peer is dropped.
func (f *lightFetcher) checkSyncedHeaders(p *peer) (bool, []*types.Header, *big.Int) { func (f *lightFetcher) checkSyncedHeaders(p *peer) (bool, *types.Header, *big.Int) {
fp := f.peers[p] fp := f.peers[p]
if fp == nil { if fp == nil {
p.Log().Debug("Unknown peer to check sync headers") p.Log().Debug("Unknown peer to check sync headers")
return false, nil, nil return false, nil, nil
} }
n := fp.lastAnnounced
var td *big.Int
var h *types.Header var h *types.Header
if f.pm.isULCEnabled() { if f.pm.isULCEnabled() {
var unapprovedHashes []common.Hash var unapprovedHashes []common.Hash
@ -703,40 +714,26 @@ func (f *lightFetcher) checkSyncedHeaders(p *peer) (bool, []*types.Header, *big.
h, unapprovedHashes = f.lastTrustedTreeNode(p) h, unapprovedHashes = f.lastTrustedTreeNode(p)
//rollback untrusted blocks //rollback untrusted blocks
f.chain.Rollback(unapprovedHashes) f.chain.Rollback(unapprovedHashes)
//overwrite to last trusted
n = fp.nodeByHash[h.Hash()]
} }
n := fp.lastAnnounced //find last valid block
var td *big.Int
trustedHeaderExisted := false
//find last trusted block
for n != nil { for n != nil {
//we found last trusted header if td = f.chain.GetTd(n.hash, n.number); td != nil {
if n.hash == h.Hash() {
trustedHeaderExisted = true
}
if td = f.chain.GetTd(n.hash, n.number); td != nil && trustedHeaderExisted {
break
}
//break if we found last trusted hash before sync
if f.lastTrustedHeader == nil {
break
}
if n.hash == f.lastTrustedHeader.Hash() {
break break
} }
n = n.parent n = n.parent
} }
// Now n is the latest downloaded/approved header after syncing // Now n is the latest downloaded/approved header after syncing
if n == nil && !p.isTrusted { if n == nil {
p.Log().Debug("Synchronisation failed") p.Log().Debug("Synchronisation failed")
go f.pm.removePeer(p.id) go f.pm.removePeer(p.id)
return false, nil, nil return false, nil, nil
} }
header := f.chain.GetHeader(n.hash, n.number) header := f.chain.GetHeader(n.hash, n.number)
return true, []*types.Header{header}, td return true, header, td
} }
// lastTrustedTreeNode return last approved treeNode and a list of unapproved hashes // lastTrustedTreeNode return last approved treeNode and a list of unapproved hashes

View File

@ -238,8 +238,7 @@ func (pm *ProtocolManager) runPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWrit
var entry *poolEntry var entry *poolEntry
peer := pm.newPeer(int(version), pm.networkId, p, rw) peer := pm.newPeer(int(version), pm.networkId, p, rw)
if pm.serverPool != nil { if pm.serverPool != nil {
addr := p.RemoteAddr().(*net.TCPAddr) entry = pm.serverPool.connect(peer, peer.Node())
entry = pm.serverPool.connect(peer, addr.IP, uint16(addr.Port))
} }
peer.poolEntry = entry peer.poolEntry = entry
select { select {
@ -406,14 +405,13 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
if p.announceType == announceTypeNone { if p.announceType == announceTypeNone {
return errResp(ErrUnexpectedResponse, "") return errResp(ErrUnexpectedResponse, "")
} }
var req announceData var req announceData
if err := msg.Decode(&req); err != nil { if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err) return errResp(ErrDecode, "%v: %v", msg, err)
} }
if p.announceType == announceTypeSigned { if p.announceType == announceTypeSigned {
if err := req.checkSignature(p.pubKey); err != nil { if err := req.checkSignature(p.ID()); err != nil {
p.Log().Trace("Invalid announcement signature", "err", err) p.Log().Trace("Invalid announcement signature", "err", err)
return err return err
} }

View File

@ -84,7 +84,7 @@ func (r *BlockRequest) GetCost(peer *peer) uint64 {
// CanSend tells if a certain peer is suitable for serving the given request // CanSend tells if a certain peer is suitable for serving the given request
func (r *BlockRequest) CanSend(peer *peer) bool { func (r *BlockRequest) CanSend(peer *peer) bool {
return peer.HasBlock(r.Hash, r.Number) return peer.HasBlock(r.Hash, r.Number, false)
} }
// Request sends an ODR request to the LES network (implementation of LesOdrRequest) // Request sends an ODR request to the LES network (implementation of LesOdrRequest)
@ -140,7 +140,7 @@ func (r *ReceiptsRequest) GetCost(peer *peer) uint64 {
// CanSend tells if a certain peer is suitable for serving the given request // CanSend tells if a certain peer is suitable for serving the given request
func (r *ReceiptsRequest) CanSend(peer *peer) bool { func (r *ReceiptsRequest) CanSend(peer *peer) bool {
return peer.HasBlock(r.Hash, r.Number) return peer.HasBlock(r.Hash, r.Number, false)
} }
// Request sends an ODR request to the LES network (implementation of LesOdrRequest) // Request sends an ODR request to the LES network (implementation of LesOdrRequest)
@ -202,7 +202,7 @@ func (r *TrieRequest) GetCost(peer *peer) uint64 {
// CanSend tells if a certain peer is suitable for serving the given request // CanSend tells if a certain peer is suitable for serving the given request
func (r *TrieRequest) CanSend(peer *peer) bool { func (r *TrieRequest) CanSend(peer *peer) bool {
return peer.HasBlock(r.Id.BlockHash, r.Id.BlockNumber) return peer.HasBlock(r.Id.BlockHash, r.Id.BlockNumber, true)
} }
// Request sends an ODR request to the LES network (implementation of LesOdrRequest) // Request sends an ODR request to the LES network (implementation of LesOdrRequest)
@ -272,7 +272,7 @@ func (r *CodeRequest) GetCost(peer *peer) uint64 {
// CanSend tells if a certain peer is suitable for serving the given request // CanSend tells if a certain peer is suitable for serving the given request
func (r *CodeRequest) CanSend(peer *peer) bool { func (r *CodeRequest) CanSend(peer *peer) bool {
return peer.HasBlock(r.Id.BlockHash, r.Id.BlockNumber) return peer.HasBlock(r.Id.BlockHash, r.Id.BlockNumber, true)
} }
// Request sends an ODR request to the LES network (implementation of LesOdrRequest) // Request sends an ODR request to the LES network (implementation of LesOdrRequest)

View File

@ -18,7 +18,6 @@
package les package les
import ( import (
"crypto/ecdsa"
"errors" "errors"
"fmt" "fmt"
"math/big" "math/big"
@ -51,7 +50,6 @@ const (
type peer struct { type peer struct {
*p2p.Peer *p2p.Peer
pubKey *ecdsa.PublicKey
rw p2p.MsgReadWriter rw p2p.MsgReadWriter
@ -69,7 +67,7 @@ type peer struct {
sendQueue *execQueue sendQueue *execQueue
poolEntry *poolEntry poolEntry *poolEntry
hasBlock func(common.Hash, uint64) bool hasBlock func(common.Hash, uint64, bool) bool
responseErrors int responseErrors int
fcClient *flowcontrol.ClientNode // nil if the peer is server only fcClient *flowcontrol.ClientNode // nil if the peer is server only
@ -83,11 +81,9 @@ type peer struct {
func newPeer(version int, network uint64, isTrusted bool, p *p2p.Peer, rw p2p.MsgReadWriter) *peer { func newPeer(version int, network uint64, isTrusted bool, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
id := p.ID() id := p.ID()
pubKey, _ := id.Pubkey()
return &peer{ return &peer{
Peer: p, Peer: p,
pubKey: pubKey,
rw: rw, rw: rw,
version: version, version: version,
network: network, network: network,
@ -179,11 +175,11 @@ func (p *peer) GetRequestCost(msgcode uint64, amount int) uint64 {
} }
// HasBlock checks if the peer has a given block // HasBlock checks if the peer has a given block
func (p *peer) HasBlock(hash common.Hash, number uint64) bool { func (p *peer) HasBlock(hash common.Hash, number uint64, hasState bool) bool {
p.lock.RLock() p.lock.RLock()
hasBlock := p.hasBlock hasBlock := p.hasBlock
p.lock.RUnlock() p.lock.RUnlock()
return hasBlock != nil && hasBlock(hash, number) return hasBlock != nil && hasBlock(hash, number, hasState)
} }
// SendAnnounce announces the availability of a number of blocks through // SendAnnounce announces the availability of a number of blocks through

View File

@ -18,9 +18,7 @@
package les package les
import ( import (
"bytes"
"crypto/ecdsa" "crypto/ecdsa"
"crypto/elliptic"
"errors" "errors"
"fmt" "fmt"
"io" "io"
@ -30,7 +28,7 @@ import (
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/secp256k1" "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
) )
@ -148,21 +146,20 @@ func (a *announceData) sign(privKey *ecdsa.PrivateKey) {
} }
// checkSignature verifies if the block announcement has a valid signature by the given pubKey // checkSignature verifies if the block announcement has a valid signature by the given pubKey
func (a *announceData) checkSignature(pubKey *ecdsa.PublicKey) error { func (a *announceData) checkSignature(id enode.ID) error {
var sig []byte var sig []byte
if err := a.Update.decode().get("sign", &sig); err != nil { if err := a.Update.decode().get("sign", &sig); err != nil {
return err return err
} }
rlp, _ := rlp.EncodeToBytes(announceBlock{a.Hash, a.Number, a.Td}) rlp, _ := rlp.EncodeToBytes(announceBlock{a.Hash, a.Number, a.Td})
recPubkey, err := secp256k1.RecoverPubkey(crypto.Keccak256(rlp), sig) recPubkey, err := crypto.SigToPub(crypto.Keccak256(rlp), sig)
if err != nil { if err != nil {
return err return err
} }
pbytes := elliptic.Marshal(pubKey.Curve, pubKey.X, pubKey.Y) if id == enode.PubkeyToIDV4(recPubkey) {
if bytes.Equal(pbytes, recPubkey) {
return nil return nil
} }
return errors.New("Wrong signature") return errors.New("wrong signature")
} }
type blockInfo struct { type blockInfo struct {

View File

@ -18,6 +18,7 @@
package les package les
import ( import (
"crypto/ecdsa"
"fmt" "fmt"
"io" "io"
"math" "math"
@ -28,11 +29,12 @@ import (
"time" "time"
"github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/discv5" "github.com/ethereum/go-ethereum/p2p/discv5"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
) )
@ -90,8 +92,7 @@ const (
// connReq represents a request for peer connection. // connReq represents a request for peer connection.
type connReq struct { type connReq struct {
p *peer p *peer
ip net.IP node *enode.Node
port uint16
result chan *poolEntry result chan *poolEntry
} }
@ -122,11 +123,11 @@ type serverPool struct {
topic discv5.Topic topic discv5.Topic
discSetPeriod chan time.Duration discSetPeriod chan time.Duration
discNodes chan *discv5.Node discNodes chan *enode.Node
discLookups chan bool discLookups chan bool
trustedNodes []string trustedNodes []string
entries map[discover.NodeID]*poolEntry entries map[enode.ID]*poolEntry
timeout, enableRetry chan *poolEntry timeout, enableRetry chan *poolEntry
adjustStats chan poolStatAdjust adjustStats chan poolStatAdjust
@ -145,7 +146,7 @@ func newServerPool(db ethdb.Database, quit chan struct{}, wg *sync.WaitGroup, tr
db: db, db: db,
quit: quit, quit: quit,
wg: wg, wg: wg,
entries: make(map[discover.NodeID]*poolEntry), entries: make(map[enode.ID]*poolEntry),
timeout: make(chan *poolEntry, 1), timeout: make(chan *poolEntry, 1),
adjustStats: make(chan poolStatAdjust, 100), adjustStats: make(chan poolStatAdjust, 100),
enableRetry: make(chan *poolEntry, 1), enableRetry: make(chan *poolEntry, 1),
@ -170,25 +171,42 @@ func (pool *serverPool) start(server *p2p.Server, topic discv5.Topic) {
pool.dbKey = append([]byte("serverPool/"), []byte(topic)...) pool.dbKey = append([]byte("serverPool/"), []byte(topic)...)
pool.wg.Add(1) pool.wg.Add(1)
pool.loadNodes() pool.loadNodes()
pool.connectToTrustedNodes()
if pool.server.DiscV5 != nil { if pool.server.DiscV5 != nil {
pool.discSetPeriod = make(chan time.Duration, 1) pool.discSetPeriod = make(chan time.Duration, 1)
pool.discNodes = make(chan *discv5.Node, 100) pool.discNodes = make(chan *enode.Node, 100)
pool.discLookups = make(chan bool, 100) pool.discLookups = make(chan bool, 100)
go pool.server.DiscV5.SearchTopic(pool.topic, pool.discSetPeriod, pool.discNodes, pool.discLookups) go pool.discoverNodes()
} }
pool.checkDial() pool.checkDial()
go pool.eventLoop() go pool.eventLoop()
} }
// discoverNodes wraps SearchTopic, converting result nodes to enode.Node.
func (pool *serverPool) discoverNodes() {
ch := make(chan *discv5.Node)
go func() {
pool.server.DiscV5.SearchTopic(pool.topic, pool.discSetPeriod, ch, pool.discLookups)
close(ch)
}()
for n := range ch {
pubkey, err := decodePubkey64(n.ID[:])
if err != nil {
continue
}
pool.discNodes <- enode.NewV4(pubkey, n.IP, int(n.TCP), int(n.UDP))
}
}
// connect should be called upon any incoming connection. If the connection has been // connect should be called upon any incoming connection. If the connection has been
// dialed by the server pool recently, the appropriate pool entry is returned. // dialed by the server pool recently, the appropriate pool entry is returned.
// Otherwise, the connection should be rejected. // Otherwise, the connection should be rejected.
// Note that whenever a connection has been accepted and a pool entry has been returned, // Note that whenever a connection has been accepted and a pool entry has been returned,
// disconnect should also always be called. // disconnect should also always be called.
func (pool *serverPool) connect(p *peer, ip net.IP, port uint16) *poolEntry { func (pool *serverPool) connect(p *peer, node *enode.Node) *poolEntry {
log.Debug("Connect new entry", "enode", p.id) log.Debug("Connect new entry", "enode", p.id)
req := &connReq{p: p, ip: ip, port: port, result: make(chan *poolEntry, 1)} req := &connReq{p: p, node: node, result: make(chan *poolEntry, 1)}
select { select {
case pool.connCh <- req: case pool.connCh <- req:
case <-pool.quit: case <-pool.quit:
@ -199,7 +217,7 @@ func (pool *serverPool) connect(p *peer, ip net.IP, port uint16) *poolEntry {
// registered should be called after a successful handshake // registered should be called after a successful handshake
func (pool *serverPool) registered(entry *poolEntry) { func (pool *serverPool) registered(entry *poolEntry) {
log.Debug("Registered new entry", "enode", entry.id) log.Debug("Registered new entry", "enode", entry.node.ID())
req := &registerReq{entry: entry, done: make(chan struct{})} req := &registerReq{entry: entry, done: make(chan struct{})}
select { select {
case pool.registerCh <- req: case pool.registerCh <- req:
@ -219,7 +237,7 @@ func (pool *serverPool) disconnect(entry *poolEntry) {
stopped = true stopped = true
default: default:
} }
log.Debug("Disconnected old entry", "enode", entry.id) log.Debug("Disconnected old entry", "enode", entry.node.ID())
req := &disconnReq{entry: entry, stopped: stopped, done: make(chan struct{})} req := &disconnReq{entry: entry, stopped: stopped, done: make(chan struct{})}
// Block until disconnection request is served. // Block until disconnection request is served.
@ -323,7 +341,7 @@ func (pool *serverPool) eventLoop() {
} }
case node := <-pool.discNodes: case node := <-pool.discNodes:
entry := pool.findOrNewNode(discover.NodeID(node.ID), node.IP, node.TCP) entry := pool.findOrNewNode(node)
pool.updateCheckDial(entry) pool.updateCheckDial(entry)
case conv := <-pool.discLookups: case conv := <-pool.discLookups:
@ -344,7 +362,7 @@ func (pool *serverPool) eventLoop() {
// Handle peer connection requests. // Handle peer connection requests.
entry := pool.entries[req.p.ID()] entry := pool.entries[req.p.ID()]
if entry == nil { if entry == nil {
entry = pool.findOrNewNode(req.p.ID(), req.ip, req.port) entry = pool.findOrNewNode(req.node)
} }
if entry.state == psConnected || entry.state == psRegistered { if entry.state == psConnected || entry.state == psRegistered {
req.result <- nil req.result <- nil
@ -354,8 +372,8 @@ func (pool *serverPool) eventLoop() {
entry.peer = req.p entry.peer = req.p
entry.state = psConnected entry.state = psConnected
addr := &poolEntryAddress{ addr := &poolEntryAddress{
ip: req.ip, ip: req.node.IP(),
port: req.port, port: uint16(req.node.TCP()),
lastSeen: mclock.Now(), lastSeen: mclock.Now(),
} }
entry.lastConnected = addr entry.lastConnected = addr
@ -404,18 +422,18 @@ func (pool *serverPool) eventLoop() {
} }
} }
func (pool *serverPool) findOrNewNode(id discover.NodeID, ip net.IP, port uint16) *poolEntry { func (pool *serverPool) findOrNewNode(node *enode.Node) *poolEntry {
now := mclock.Now() now := mclock.Now()
entry := pool.entries[id] entry := pool.entries[node.ID()]
if entry == nil { if entry == nil {
log.Debug("Discovered new entry", "id", id) log.Debug("Discovered new entry", "id", node.ID())
entry = &poolEntry{ entry = &poolEntry{
id: id, node: node,
addr: make(map[string]*poolEntryAddress), addr: make(map[string]*poolEntryAddress),
addrSelect: *newWeightedRandomSelect(), addrSelect: *newWeightedRandomSelect(),
shortRetry: shortRetryCnt, shortRetry: shortRetryCnt,
} }
pool.entries[id] = entry pool.entries[node.ID()] = entry
// initialize previously unknown peers with good statistics to give a chance to prove themselves // initialize previously unknown peers with good statistics to give a chance to prove themselves
entry.connectStats.add(1, initStatsWeight) entry.connectStats.add(1, initStatsWeight)
entry.delayStats.add(0, initStatsWeight) entry.delayStats.add(0, initStatsWeight)
@ -423,10 +441,7 @@ func (pool *serverPool) findOrNewNode(id discover.NodeID, ip net.IP, port uint16
entry.timeoutStats.add(0, initStatsWeight) entry.timeoutStats.add(0, initStatsWeight)
} }
entry.lastDiscovered = now entry.lastDiscovered = now
addr := &poolEntryAddress{ addr := &poolEntryAddress{ip: node.IP(), port: uint16(node.TCP())}
ip: ip,
port: port,
}
if a, ok := entry.addr[addr.strKey()]; ok { if a, ok := entry.addr[addr.strKey()]; ok {
addr = a addr = a
} else { } else {
@ -453,34 +468,38 @@ func (pool *serverPool) loadNodes() {
return return
} }
for _, e := range list { for _, e := range list {
log.Debug("Loaded server stats", "id", e.id, "fails", e.lastConnected.fails, log.Debug("Loaded server stats", "id", e.node.ID(), "fails", e.lastConnected.fails,
"conn", fmt.Sprintf("%v/%v", e.connectStats.avg, e.connectStats.weight), "conn", fmt.Sprintf("%v/%v", e.connectStats.avg, e.connectStats.weight),
"delay", fmt.Sprintf("%v/%v", time.Duration(e.delayStats.avg), e.delayStats.weight), "delay", fmt.Sprintf("%v/%v", time.Duration(e.delayStats.avg), e.delayStats.weight),
"response", fmt.Sprintf("%v/%v", time.Duration(e.responseStats.avg), e.responseStats.weight), "response", fmt.Sprintf("%v/%v", time.Duration(e.responseStats.avg), e.responseStats.weight),
"timeout", fmt.Sprintf("%v/%v", e.timeoutStats.avg, e.timeoutStats.weight)) "timeout", fmt.Sprintf("%v/%v", e.timeoutStats.avg, e.timeoutStats.weight))
pool.entries[e.id] = e pool.entries[e.node.ID()] = e
pool.knownQueue.setLatest(e) pool.knownQueue.setLatest(e)
pool.knownSelect.update((*knownEntry)(e)) pool.knownSelect.update((*knownEntry)(e))
} }
}
for _, trusted := range pool.parseTrustedServers() { func (pool *serverPool) connectToTrustedNodes() {
e := pool.findOrNewNode(trusted.ID, trusted.IP, trusted.TCP) //connect to trusted nodes
e.trusted = true if len(pool.trustedNodes) > 0 {
e.dialed = &poolEntryAddress{ip: trusted.IP, port: trusted.TCP} for _, trusted := range pool.parseTrustedServers() {
pool.entries[e.id] = e e := pool.findOrNewNode(trusted)
pool.trustedQueue.setLatest(e) e.trusted = true
e.dialed = &poolEntryAddress{ip: trusted.IP(), port: uint16(trusted.TCP())}
pool.entries[e.node.ID()] = e
pool.trustedQueue.setLatest(e)
}
} }
} }
// parseTrustedServers returns valid and parsed by discovery enodes. // parseTrustedServers returns valid and parsed by discovery enodes.
func (pool *serverPool) parseTrustedServers() []*discover.Node { func (pool *serverPool) parseTrustedServers() []*enode.Node {
nodes := make([]*discover.Node, 0, len(pool.trustedNodes)) nodes := make([]*enode.Node, 0, len(pool.trustedNodes))
for _, enode := range pool.trustedNodes { for _, node := range pool.trustedNodes {
node, err := discover.ParseNode(enode) node, err := enode.ParseV4(node)
if err != nil { if err != nil {
log.Warn("Trusted node URL invalid", "enode", enode, "err", err) log.Warn("Trusted node URL invalid", "enode", node, "err", err)
continue continue
} }
nodes = append(nodes, node) nodes = append(nodes, node)
@ -508,7 +527,7 @@ func (pool *serverPool) removeEntry(entry *poolEntry) {
pool.newSelect.remove((*discoveredEntry)(entry)) pool.newSelect.remove((*discoveredEntry)(entry))
pool.knownSelect.remove((*knownEntry)(entry)) pool.knownSelect.remove((*knownEntry)(entry))
entry.removed = true entry.removed = true
delete(pool.entries, entry.id) delete(pool.entries, entry.node.ID())
} }
// setRetryDial starts the timer which will enable dialing a certain node again // setRetryDial starts the timer which will enable dialing a certain node again
@ -599,10 +618,10 @@ func (pool *serverPool) dial(entry *poolEntry, knownSelected bool) {
if entry.trusted { if entry.trusted {
state = "trusted" state = "trusted"
} }
log.Debug("Dialing new peer", "lesaddr", entry.id.String()+"@"+entry.dialed.strKey(), "set", len(entry.addr), state, knownSelected) log.Debug("Dialing new peer", "lesaddr", entry.node.ID().String()+"@"+entry.dialed.strKey(), "set", len(entry.addr), state, knownSelected)
go func() { go func() {
pool.server.AddPeer(discover.NewNode(entry.id, entry.dialed.ip, entry.dialed.port, entry.dialed.port)) pool.server.AddPeer(entry.node)
select { select {
case <-pool.quit: case <-pool.quit:
case <-time.After(dialTimeout): case <-time.After(dialTimeout):
@ -620,7 +639,7 @@ func (pool *serverPool) checkDialTimeout(entry *poolEntry) {
if entry.state != psDialed { if entry.state != psDialed {
return return
} }
log.Debug("Dial timeout", "lesaddr", entry.id.String()+"@"+entry.dialed.strKey()) log.Debug("Dial timeout", "lesaddr", entry.node.ID().String()+"@"+entry.dialed.strKey())
entry.state = psNotConnected entry.state = psNotConnected
if entry.knownSelected { if entry.knownSelected {
pool.knownSelected-- pool.knownSelected--
@ -642,8 +661,9 @@ const (
// poolEntry represents a server node and stores its current state and statistics. // poolEntry represents a server node and stores its current state and statistics.
type poolEntry struct { type poolEntry struct {
peer *peer peer *peer
id discover.NodeID pubkey [64]byte // secp256k1 key of the node
addr map[string]*poolEntryAddress addr map[string]*poolEntryAddress
node *enode.Node
lastConnected, dialed *poolEntryAddress lastConnected, dialed *poolEntryAddress
addrSelect weightedRandomSelect addrSelect weightedRandomSelect
@ -661,23 +681,39 @@ type poolEntry struct {
shortRetry int shortRetry int
} }
// poolEntryEnc is the RLP encoding of poolEntry.
type poolEntryEnc struct {
Pubkey []byte
IP net.IP
Port uint16
Fails uint
CStat, DStat, RStat, TStat poolStats
}
func (e *poolEntry) EncodeRLP(w io.Writer) error { func (e *poolEntry) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, []interface{}{e.id, e.lastConnected.ip, e.lastConnected.port, e.lastConnected.fails, &e.connectStats, &e.delayStats, &e.responseStats, &e.timeoutStats}) return rlp.Encode(w, &poolEntryEnc{
Pubkey: encodePubkey64(e.node.Pubkey()),
IP: e.lastConnected.ip,
Port: e.lastConnected.port,
Fails: e.lastConnected.fails,
CStat: e.connectStats,
DStat: e.delayStats,
RStat: e.responseStats,
TStat: e.timeoutStats,
})
} }
func (e *poolEntry) DecodeRLP(s *rlp.Stream) error { func (e *poolEntry) DecodeRLP(s *rlp.Stream) error {
var entry struct { var entry poolEntryEnc
ID discover.NodeID
IP net.IP
Port uint16
Fails uint
CStat, DStat, RStat, TStat poolStats
}
if err := s.Decode(&entry); err != nil { if err := s.Decode(&entry); err != nil {
return err return err
} }
pubkey, err := decodePubkey64(entry.Pubkey)
if err != nil {
return err
}
addr := &poolEntryAddress{ip: entry.IP, port: entry.Port, fails: entry.Fails, lastSeen: mclock.Now()} addr := &poolEntryAddress{ip: entry.IP, port: entry.Port, fails: entry.Fails, lastSeen: mclock.Now()}
e.id = entry.ID e.node = enode.NewV4(pubkey, entry.IP, int(entry.Port), int(entry.Port))
e.addr = make(map[string]*poolEntryAddress) e.addr = make(map[string]*poolEntryAddress)
e.addr[addr.strKey()] = addr e.addr[addr.strKey()] = addr
e.addrSelect = *newWeightedRandomSelect() e.addrSelect = *newWeightedRandomSelect()
@ -692,6 +728,14 @@ func (e *poolEntry) DecodeRLP(s *rlp.Stream) error {
return nil return nil
} }
func encodePubkey64(pub *ecdsa.PublicKey) []byte {
return crypto.FromECDSAPub(pub)[:1]
}
func decodePubkey64(b []byte) (*ecdsa.PublicKey, error) {
return crypto.UnmarshalPubkey(append([]byte{0x04}, b...))
}
// discoveredEntry implements wrsItem // discoveredEntry implements wrsItem
type discoveredEntry poolEntry type discoveredEntry poolEntry

View File

@ -1,8 +1,10 @@
package les package les
import ( import (
"fmt"
"github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/enode"
) )
type ulc struct { type ulc struct {
@ -17,17 +19,18 @@ func newULC(ulcConfig *eth.ULCConfig) *ulc {
m := make(map[string]struct{}, len(ulcConfig.TrustedServers)) m := make(map[string]struct{}, len(ulcConfig.TrustedServers))
for _, id := range ulcConfig.TrustedServers { for _, id := range ulcConfig.TrustedServers {
node, err := discover.ParseNode(id) node, err := enode.ParseV4(id)
if err != nil { if err != nil {
fmt.Println("node:", id, " err:", err)
continue continue
} }
m[node.ID.String()] = struct{}{} m[node.ID().String()] = struct{}{}
} }
return &ulc{m, ulcConfig.MinTrustedFraction} return &ulc{m, ulcConfig.MinTrustedFraction}
} }
func (u *ulc) isTrusted(p discover.NodeID) bool { func (u *ulc) isTrusted(p enode.ID) bool {
if u.trustedKeys == nil { if u.trustedKeys == nil {
return false return false
} }

View File

@ -84,23 +84,23 @@ var (
} }
// TestServerIndexerConfig wraps a set of configs as a test indexer config for server side. // TestServerIndexerConfig wraps a set of configs as a test indexer config for server side.
TestServerIndexerConfig = &IndexerConfig{ TestServerIndexerConfig = &IndexerConfig{
ChtSize: 256, ChtSize: 64,
PairChtSize: 2048, PairChtSize: 512,
ChtConfirms: 16, ChtConfirms: 4,
BloomSize: 256, BloomSize: 64,
BloomConfirms: 16, BloomConfirms: 4,
BloomTrieSize: 2048, BloomTrieSize: 512,
BloomTrieConfirms: 16, BloomTrieConfirms: 4,
} }
// TestClientIndexerConfig wraps a set of configs as a test indexer config for client side. // TestClientIndexerConfig wraps a set of configs as a test indexer config for client side.
TestClientIndexerConfig = &IndexerConfig{ TestClientIndexerConfig = &IndexerConfig{
ChtSize: 2048, ChtSize: 512,
PairChtSize: 256, PairChtSize: 64,
ChtConfirms: 128, ChtConfirms: 32,
BloomSize: 2048, BloomSize: 512,
BloomConfirms: 128, BloomConfirms: 32,
BloomTrieSize: 2048, BloomTrieSize: 512,
BloomTrieConfirms: 128, BloomTrieConfirms: 32,
} }
) )

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Simon Eskildsen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,13 +0,0 @@
// Based on ssh/terminal:
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build appengine
package term
// IsTty always returns false on AppEngine.
func IsTty(fd uintptr) bool {
return false
}

View File

@ -1,13 +0,0 @@
// Based on ssh/terminal:
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
package term
import "syscall"
const ioctlReadTermios = syscall.TIOCGETA
type Termios syscall.Termios

View File

@ -1,18 +0,0 @@
package term
import (
"syscall"
)
const ioctlReadTermios = syscall.TIOCGETA
// Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin.
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Cc [20]uint8
Ispeed uint32
Ospeed uint32
}

View File

@ -1,14 +0,0 @@
// Based on ssh/terminal:
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
package term
import "syscall"
const ioctlReadTermios = syscall.TCGETS
type Termios syscall.Termios

View File

@ -1,7 +0,0 @@
package term
import "syscall"
const ioctlReadTermios = syscall.TIOCGETA
type Termios syscall.Termios

View File

@ -1,20 +0,0 @@
// Based on ssh/terminal:
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux,!appengine darwin freebsd openbsd netbsd
package term
import (
"syscall"
"unsafe"
)
// IsTty returns true if the given file descriptor is a terminal.
func IsTty(fd uintptr) bool {
var termios Termios
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0
}

View File

@ -1,7 +0,0 @@
package term
import "syscall"
const ioctlReadTermios = syscall.TIOCGETA
type Termios syscall.Termios

View File

@ -1,9 +0,0 @@
package term
import "golang.org/x/sys/unix"
// IsTty returns true if the given file descriptor is a terminal.
func IsTty(fd uintptr) bool {
_, err := unix.IoctlGetTermios(int(fd), unix.TCGETA)
return err == nil
}

View File

@ -1,26 +0,0 @@
// Based on ssh/terminal:
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows
package term
import (
"syscall"
"unsafe"
)
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
var (
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
)
// IsTty returns true if the given file descriptor is a terminal.
func IsTty(fd uintptr) bool {
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0
}

View File

@ -1,6 +1,8 @@
package metrics package metrics
import "sync/atomic" import (
"sync/atomic"
)
// Counters hold an int64 value that can be incremented and decremented. // Counters hold an int64 value that can be incremented and decremented.
type Counter interface { type Counter interface {
@ -28,6 +30,12 @@ func NewCounter() Counter {
return &StandardCounter{0} return &StandardCounter{0}
} }
// NewCounterForced constructs a new StandardCounter and returns it no matter if
// the global switch is enabled or not.
func NewCounterForced() Counter {
return &StandardCounter{0}
}
// NewRegisteredCounter constructs and registers a new StandardCounter. // NewRegisteredCounter constructs and registers a new StandardCounter.
func NewRegisteredCounter(name string, r Registry) Counter { func NewRegisteredCounter(name string, r Registry) Counter {
c := NewCounter() c := NewCounter()
@ -38,6 +46,19 @@ func NewRegisteredCounter(name string, r Registry) Counter {
return c return c
} }
// NewRegisteredCounterForced constructs and registers a new StandardCounter
// and launches a goroutine no matter the global switch is enabled or not.
// Be sure to unregister the counter from the registry once it is of no use to
// allow for garbage collection.
func NewRegisteredCounterForced(name string, r Registry) Counter {
c := NewCounterForced()
if nil == r {
r = DefaultRegistry
}
r.Register(name, c)
return c
}
// CounterSnapshot is a read-only copy of another Counter. // CounterSnapshot is a read-only copy of another Counter.
type CounterSnapshot int64 type CounterSnapshot int64

View File

@ -43,7 +43,7 @@ type unconfirmedBlock struct {
} }
// unconfirmedBlocks implements a data structure to maintain locally mined blocks // unconfirmedBlocks implements a data structure to maintain locally mined blocks
// have have not yet reached enough maturity to guarantee chain inclusion. It is // have not yet reached enough maturity to guarantee chain inclusion. It is
// used by the miner to provide logs to the user when a previously mined block // used by the miner to provide logs to the user when a previously mined block
// has a high enough guarantee to not be reorged out of the canonical chain. // has a high enough guarantee to not be reorged out of the canonical chain.
type unconfirmedBlocks struct { type unconfirmedBlocks struct {

View File

@ -26,7 +26,7 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
) )
@ -51,7 +51,7 @@ func (api *PrivateAdminAPI) AddPeer(url string) (bool, error) {
return false, ErrNodeStopped return false, ErrNodeStopped
} }
// Try to add the url as a static peer and return // Try to add the url as a static peer and return
node, err := discover.ParseNode(url) node, err := enode.ParseV4(url)
if err != nil { if err != nil {
return false, fmt.Errorf("invalid enode: %v", err) return false, fmt.Errorf("invalid enode: %v", err)
} }
@ -67,7 +67,7 @@ func (api *PrivateAdminAPI) RemovePeer(url string) (bool, error) {
return false, ErrNodeStopped return false, ErrNodeStopped
} }
// Try to remove the url as a static peer and return // Try to remove the url as a static peer and return
node, err := discover.ParseNode(url) node, err := enode.ParseV4(url)
if err != nil { if err != nil {
return false, fmt.Errorf("invalid enode: %v", err) return false, fmt.Errorf("invalid enode: %v", err)
} }
@ -82,7 +82,7 @@ func (api *PrivateAdminAPI) AddTrustedPeer(url string) (bool, error) {
if server == nil { if server == nil {
return false, ErrNodeStopped return false, ErrNodeStopped
} }
node, err := discover.ParseNode(url) node, err := enode.ParseV4(url)
if err != nil { if err != nil {
return false, fmt.Errorf("invalid enode: %v", err) return false, fmt.Errorf("invalid enode: %v", err)
} }
@ -98,7 +98,7 @@ func (api *PrivateAdminAPI) RemoveTrustedPeer(url string) (bool, error) {
if server == nil { if server == nil {
return false, ErrNodeStopped return false, ErrNodeStopped
} }
node, err := discover.ParseNode(url) node, err := enode.ParseV4(url)
if err != nil { if err != nil {
return false, fmt.Errorf("invalid enode: %v", err) return false, fmt.Errorf("invalid enode: %v", err)
} }

View File

@ -32,7 +32,7 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
) )
@ -336,18 +336,18 @@ func (c *Config) NodeKey() *ecdsa.PrivateKey {
} }
// StaticNodes returns a list of node enode URLs configured as static nodes. // StaticNodes returns a list of node enode URLs configured as static nodes.
func (c *Config) StaticNodes() []*discover.Node { func (c *Config) StaticNodes() []*enode.Node {
return c.parsePersistentNodes(c.ResolvePath(datadirStaticNodes)) return c.parsePersistentNodes(c.ResolvePath(datadirStaticNodes))
} }
// TrustedNodes returns a list of node enode URLs configured as trusted nodes. // TrustedNodes returns a list of node enode URLs configured as trusted nodes.
func (c *Config) TrustedNodes() []*discover.Node { func (c *Config) TrustedNodes() []*enode.Node {
return c.parsePersistentNodes(c.ResolvePath(datadirTrustedNodes)) return c.parsePersistentNodes(c.ResolvePath(datadirTrustedNodes))
} }
// parsePersistentNodes parses a list of discovery node URLs loaded from a .json // parsePersistentNodes parses a list of discovery node URLs loaded from a .json
// file from within the data directory. // file from within the data directory.
func (c *Config) parsePersistentNodes(path string) []*discover.Node { func (c *Config) parsePersistentNodes(path string) []*enode.Node {
// Short circuit if no node config is present // Short circuit if no node config is present
if c.DataDir == "" { if c.DataDir == "" {
return nil return nil
@ -362,12 +362,12 @@ func (c *Config) parsePersistentNodes(path string) []*discover.Node {
return nil return nil
} }
// Interpret the list as a discovery node array // Interpret the list as a discovery node array
var nodes []*discover.Node var nodes []*enode.Node
for _, url := range nodelist { for _, url := range nodelist {
if url == "" { if url == "" {
continue continue
} }
node, err := discover.ParseNode(url) node, err := enode.ParseV4(url)
if err != nil { if err != nil {
log.Error(fmt.Sprintf("Node URL %s: %v\n", url, err)) log.Error(fmt.Sprintf("Node URL %s: %v\n", url, err))
continue continue

View File

@ -18,14 +18,13 @@ package p2p
import ( import (
"container/heap" "container/heap"
"crypto/rand"
"errors" "errors"
"fmt" "fmt"
"net" "net"
"time" "time"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/netutil" "github.com/ethereum/go-ethereum/p2p/netutil"
) )
@ -50,7 +49,7 @@ const (
// NodeDialer is used to connect to nodes in the network, typically by using // NodeDialer is used to connect to nodes in the network, typically by using
// an underlying net.Dialer but also using net.Pipe in tests // an underlying net.Dialer but also using net.Pipe in tests
type NodeDialer interface { type NodeDialer interface {
Dial(*discover.Node) (net.Conn, error) Dial(*enode.Node) (net.Conn, error)
} }
// TCPDialer implements the NodeDialer interface by using a net.Dialer to // TCPDialer implements the NodeDialer interface by using a net.Dialer to
@ -60,8 +59,8 @@ type TCPDialer struct {
} }
// Dial creates a TCP connection to the node // Dial creates a TCP connection to the node
func (t TCPDialer) Dial(dest *discover.Node) (net.Conn, error) { func (t TCPDialer) Dial(dest *enode.Node) (net.Conn, error) {
addr := &net.TCPAddr{IP: dest.IP, Port: int(dest.TCP)} addr := &net.TCPAddr{IP: dest.IP(), Port: dest.TCP()}
return t.Dialer.Dial("tcp", addr.String()) return t.Dialer.Dial("tcp", addr.String())
} }
@ -74,22 +73,22 @@ type dialstate struct {
netrestrict *netutil.Netlist netrestrict *netutil.Netlist
lookupRunning bool lookupRunning bool
dialing map[discover.NodeID]connFlag dialing map[enode.ID]connFlag
lookupBuf []*discover.Node // current discovery lookup results lookupBuf []*enode.Node // current discovery lookup results
randomNodes []*discover.Node // filled from Table randomNodes []*enode.Node // filled from Table
static map[discover.NodeID]*dialTask static map[enode.ID]*dialTask
hist *dialHistory hist *dialHistory
start time.Time // time when the dialer was first used start time.Time // time when the dialer was first used
bootnodes []*discover.Node // default dials when there are no peers bootnodes []*enode.Node // default dials when there are no peers
} }
type discoverTable interface { type discoverTable interface {
Self() *discover.Node Self() *enode.Node
Close() Close()
Resolve(target discover.NodeID) *discover.Node Resolve(*enode.Node) *enode.Node
Lookup(target discover.NodeID) []*discover.Node LookupRandom() []*enode.Node
ReadRandomNodes([]*discover.Node) int ReadRandomNodes([]*enode.Node) int
} }
// the dial history remembers recent dials. // the dial history remembers recent dials.
@ -97,7 +96,7 @@ type dialHistory []pastDial
// pastDial is an entry in the dial history. // pastDial is an entry in the dial history.
type pastDial struct { type pastDial struct {
id discover.NodeID id enode.ID
exp time.Time exp time.Time
} }
@ -109,7 +108,7 @@ type task interface {
// fields cannot be accessed while the task is running. // fields cannot be accessed while the task is running.
type dialTask struct { type dialTask struct {
flags connFlag flags connFlag
dest *discover.Node dest *enode.Node
lastResolved time.Time lastResolved time.Time
resolveDelay time.Duration resolveDelay time.Duration
} }
@ -118,7 +117,7 @@ type dialTask struct {
// Only one discoverTask is active at any time. // Only one discoverTask is active at any time.
// discoverTask.Do performs a random lookup. // discoverTask.Do performs a random lookup.
type discoverTask struct { type discoverTask struct {
results []*discover.Node results []*enode.Node
} }
// A waitExpireTask is generated if there are no other tasks // A waitExpireTask is generated if there are no other tasks
@ -127,15 +126,15 @@ type waitExpireTask struct {
time.Duration time.Duration
} }
func newDialState(static []*discover.Node, bootnodes []*discover.Node, ntab discoverTable, maxdyn int, netrestrict *netutil.Netlist) *dialstate { func newDialState(static []*enode.Node, bootnodes []*enode.Node, ntab discoverTable, maxdyn int, netrestrict *netutil.Netlist) *dialstate {
s := &dialstate{ s := &dialstate{
maxDynDials: maxdyn, maxDynDials: maxdyn,
ntab: ntab, ntab: ntab,
netrestrict: netrestrict, netrestrict: netrestrict,
static: make(map[discover.NodeID]*dialTask), static: make(map[enode.ID]*dialTask),
dialing: make(map[discover.NodeID]connFlag), dialing: make(map[enode.ID]connFlag),
bootnodes: make([]*discover.Node, len(bootnodes)), bootnodes: make([]*enode.Node, len(bootnodes)),
randomNodes: make([]*discover.Node, maxdyn/2), randomNodes: make([]*enode.Node, maxdyn/2),
hist: new(dialHistory), hist: new(dialHistory),
} }
copy(s.bootnodes, bootnodes) copy(s.bootnodes, bootnodes)
@ -145,32 +144,32 @@ func newDialState(static []*discover.Node, bootnodes []*discover.Node, ntab disc
return s return s
} }
func (s *dialstate) addStatic(n *discover.Node) { func (s *dialstate) addStatic(n *enode.Node) {
// This overwrites the task instead of updating an existing // This overwrites the task instead of updating an existing
// entry, giving users the opportunity to force a resolve operation. // entry, giving users the opportunity to force a resolve operation.
s.static[n.ID] = &dialTask{flags: staticDialedConn, dest: n} s.static[n.ID()] = &dialTask{flags: staticDialedConn, dest: n}
} }
func (s *dialstate) removeStatic(n *discover.Node) { func (s *dialstate) removeStatic(n *enode.Node) {
// This removes a task so future attempts to connect will not be made. // This removes a task so future attempts to connect will not be made.
delete(s.static, n.ID) delete(s.static, n.ID())
// This removes a previous dial timestamp so that application // This removes a previous dial timestamp so that application
// can force a server to reconnect with chosen peer immediately. // can force a server to reconnect with chosen peer immediately.
s.hist.remove(n.ID) s.hist.remove(n.ID())
} }
func (s *dialstate) newTasks(nRunning int, peers map[discover.NodeID]*Peer, now time.Time) []task { func (s *dialstate) newTasks(nRunning int, peers map[enode.ID]*Peer, now time.Time) []task {
if s.start.IsZero() { if s.start.IsZero() {
s.start = now s.start = now
} }
var newtasks []task var newtasks []task
addDial := func(flag connFlag, n *discover.Node) bool { addDial := func(flag connFlag, n *enode.Node) bool {
if err := s.checkDial(n, peers); err != nil { if err := s.checkDial(n, peers); err != nil {
log.Trace("Skipping dial candidate", "id", n.ID, "addr", &net.TCPAddr{IP: n.IP, Port: int(n.TCP)}, "err", err) log.Trace("Skipping dial candidate", "id", n.ID(), "addr", &net.TCPAddr{IP: n.IP(), Port: n.TCP()}, "err", err)
return false return false
} }
s.dialing[n.ID] = flag s.dialing[n.ID()] = flag
newtasks = append(newtasks, &dialTask{flags: flag, dest: n}) newtasks = append(newtasks, &dialTask{flags: flag, dest: n})
return true return true
} }
@ -196,8 +195,8 @@ func (s *dialstate) newTasks(nRunning int, peers map[discover.NodeID]*Peer, now
err := s.checkDial(t.dest, peers) err := s.checkDial(t.dest, peers)
switch err { switch err {
case errNotWhitelisted, errSelf: case errNotWhitelisted, errSelf:
log.Warn("Removing static dial candidate", "id", t.dest.ID, "addr", &net.TCPAddr{IP: t.dest.IP, Port: int(t.dest.TCP)}, "err", err) log.Warn("Removing static dial candidate", "id", t.dest.ID, "addr", &net.TCPAddr{IP: t.dest.IP(), Port: t.dest.TCP()}, "err", err)
delete(s.static, t.dest.ID) delete(s.static, t.dest.ID())
case nil: case nil:
s.dialing[id] = t.flags s.dialing[id] = t.flags
newtasks = append(newtasks, t) newtasks = append(newtasks, t)
@ -260,18 +259,18 @@ var (
errNotWhitelisted = errors.New("not contained in netrestrict whitelist") errNotWhitelisted = errors.New("not contained in netrestrict whitelist")
) )
func (s *dialstate) checkDial(n *discover.Node, peers map[discover.NodeID]*Peer) error { func (s *dialstate) checkDial(n *enode.Node, peers map[enode.ID]*Peer) error {
_, dialing := s.dialing[n.ID] _, dialing := s.dialing[n.ID()]
switch { switch {
case dialing: case dialing:
return errAlreadyDialing return errAlreadyDialing
case peers[n.ID] != nil: case peers[n.ID()] != nil:
return errAlreadyConnected return errAlreadyConnected
case s.ntab != nil && n.ID == s.ntab.Self().ID: case s.ntab != nil && n.ID() == s.ntab.Self().ID():
return errSelf return errSelf
case s.netrestrict != nil && !s.netrestrict.Contains(n.IP): case s.netrestrict != nil && !s.netrestrict.Contains(n.IP()):
return errNotWhitelisted return errNotWhitelisted
case s.hist.contains(n.ID): case s.hist.contains(n.ID()):
return errRecentlyDialed return errRecentlyDialed
} }
return nil return nil
@ -280,8 +279,8 @@ func (s *dialstate) checkDial(n *discover.Node, peers map[discover.NodeID]*Peer)
func (s *dialstate) taskDone(t task, now time.Time) { func (s *dialstate) taskDone(t task, now time.Time) {
switch t := t.(type) { switch t := t.(type) {
case *dialTask: case *dialTask:
s.hist.add(t.dest.ID, now.Add(dialHistoryExpiration)) s.hist.add(t.dest.ID(), now.Add(dialHistoryExpiration))
delete(s.dialing, t.dest.ID) delete(s.dialing, t.dest.ID())
case *discoverTask: case *discoverTask:
s.lookupRunning = false s.lookupRunning = false
s.lookupBuf = append(s.lookupBuf, t.results...) s.lookupBuf = append(s.lookupBuf, t.results...)
@ -323,7 +322,7 @@ func (t *dialTask) resolve(srv *Server) bool {
if time.Since(t.lastResolved) < t.resolveDelay { if time.Since(t.lastResolved) < t.resolveDelay {
return false return false
} }
resolved := srv.ntab.Resolve(t.dest.ID) resolved := srv.ntab.Resolve(t.dest)
t.lastResolved = time.Now() t.lastResolved = time.Now()
if resolved == nil { if resolved == nil {
t.resolveDelay *= 2 t.resolveDelay *= 2
@ -336,7 +335,7 @@ func (t *dialTask) resolve(srv *Server) bool {
// The node was found. // The node was found.
t.resolveDelay = initialResolveDelay t.resolveDelay = initialResolveDelay
t.dest = resolved t.dest = resolved
log.Debug("Resolved node", "id", t.dest.ID, "addr", &net.TCPAddr{IP: t.dest.IP, Port: int(t.dest.TCP)}) log.Debug("Resolved node", "id", t.dest.ID, "addr", &net.TCPAddr{IP: t.dest.IP(), Port: t.dest.TCP()})
return true return true
} }
@ -345,7 +344,7 @@ type dialError struct {
} }
// dial performs the actual connection attempt. // dial performs the actual connection attempt.
func (t *dialTask) dial(srv *Server, dest *discover.Node) error { func (t *dialTask) dial(srv *Server, dest *enode.Node) error {
fd, err := srv.Dialer.Dial(dest) fd, err := srv.Dialer.Dial(dest)
if err != nil { if err != nil {
return &dialError{err} return &dialError{err}
@ -355,7 +354,8 @@ func (t *dialTask) dial(srv *Server, dest *discover.Node) error {
} }
func (t *dialTask) String() string { func (t *dialTask) String() string {
return fmt.Sprintf("%v %x %v:%d", t.flags, t.dest.ID[:8], t.dest.IP, t.dest.TCP) id := t.dest.ID()
return fmt.Sprintf("%v %x %v:%d", t.flags, id[:8], t.dest.IP(), t.dest.TCP())
} }
func (t *discoverTask) Do(srv *Server) { func (t *discoverTask) Do(srv *Server) {
@ -367,9 +367,7 @@ func (t *discoverTask) Do(srv *Server) {
time.Sleep(next.Sub(now)) time.Sleep(next.Sub(now))
} }
srv.lastLookup = time.Now() srv.lastLookup = time.Now()
var target discover.NodeID t.results = srv.ntab.LookupRandom()
rand.Read(target[:])
t.results = srv.ntab.Lookup(target)
} }
func (t *discoverTask) String() string { func (t *discoverTask) String() string {
@ -391,11 +389,11 @@ func (t waitExpireTask) String() string {
func (h dialHistory) min() pastDial { func (h dialHistory) min() pastDial {
return h[0] return h[0]
} }
func (h *dialHistory) add(id discover.NodeID, exp time.Time) { func (h *dialHistory) add(id enode.ID, exp time.Time) {
heap.Push(h, pastDial{id, exp}) heap.Push(h, pastDial{id, exp})
} }
func (h *dialHistory) remove(id discover.NodeID) bool { func (h *dialHistory) remove(id enode.ID) bool {
for i, v := range *h { for i, v := range *h {
if v.id == id { if v.id == id {
heap.Remove(h, i) heap.Remove(h, i)
@ -404,7 +402,7 @@ func (h *dialHistory) remove(id discover.NodeID) bool {
} }
return false return false
} }
func (h dialHistory) contains(id discover.NodeID) bool { func (h dialHistory) contains(id enode.ID) bool {
for _, v := range h { for _, v := range h {
if v.id == id { if v.id == id {
return true return true

View File

@ -18,415 +18,87 @@ package discover
import ( import (
"crypto/ecdsa" "crypto/ecdsa"
"crypto/elliptic"
"encoding/hex"
"errors" "errors"
"fmt"
"math/big" "math/big"
"math/rand"
"net" "net"
"net/url"
"regexp"
"strconv"
"strings"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/secp256k1" "github.com/ethereum/go-ethereum/crypto/secp256k1"
"github.com/ethereum/go-ethereum/p2p/enode"
) )
const NodeIDBits = 512 // node represents a host on the network.
// Node represents a host on the network.
// The fields of Node may not be modified. // The fields of Node may not be modified.
type Node struct { type node struct {
IP net.IP // len 4 for IPv4 or 16 for IPv6 enode.Node
UDP, TCP uint16 // port numbers addedAt time.Time // time when the node was added to the table
ID NodeID // the node's public key
// This is a cached copy of sha3(ID) which is used for node
// distance calculations. This is part of Node in order to make it
// possible to write tests that need a node at a certain distance.
// In those tests, the content of sha will not actually correspond
// with ID.
sha common.Hash
// Time when the node was added to the table.
addedAt time.Time
} }
// NewNode creates a new node. It is mostly meant to be used for type encPubkey [64]byte
// testing purposes.
func NewNode(id NodeID, ip net.IP, udpPort, tcpPort uint16) *Node { func encodePubkey(key *ecdsa.PublicKey) encPubkey {
if ipv4 := ip.To4(); ipv4 != nil { var e encPubkey
ip = ipv4 math.ReadBits(key.X, e[:len(e)/2])
} math.ReadBits(key.Y, e[len(e)/2:])
return &Node{ return e
IP: ip,
UDP: udpPort,
TCP: tcpPort,
ID: id,
sha: crypto.Keccak256Hash(id[:]),
}
} }
func (n *Node) addr() *net.UDPAddr { func decodePubkey(e encPubkey) (*ecdsa.PublicKey, error) {
return &net.UDPAddr{IP: n.IP, Port: int(n.UDP)}
}
// Incomplete returns true for nodes with no IP address.
func (n *Node) Incomplete() bool {
return n.IP == nil
}
// checks whether n is a valid complete node.
func (n *Node) validateComplete() error {
if n.Incomplete() {
return errors.New("incomplete node")
}
if n.UDP == 0 {
return errors.New("missing UDP port")
}
if n.TCP == 0 {
return errors.New("missing TCP port")
}
if n.IP.IsMulticast() || n.IP.IsUnspecified() {
return errors.New("invalid IP (multicast/unspecified)")
}
_, err := n.ID.Pubkey() // validate the key (on curve, etc.)
return err
}
// The string representation of a Node is a URL.
// Please see ParseNode for a description of the format.
func (n *Node) String() string {
u := url.URL{Scheme: "enode"}
if n.Incomplete() {
u.Host = fmt.Sprintf("%x", n.ID[:])
} else {
addr := net.TCPAddr{IP: n.IP, Port: int(n.TCP)}
u.User = url.User(fmt.Sprintf("%x", n.ID[:]))
u.Host = addr.String()
if n.UDP != n.TCP {
u.RawQuery = "discport=" + strconv.Itoa(int(n.UDP))
}
}
return u.String()
}
var incompleteNodeURL = regexp.MustCompile("(?i)^(?:enode://)?([0-9a-f]+)$")
// ParseNode parses a node designator.
//
// There are two basic forms of node designators
// - incomplete nodes, which only have the public key (node ID)
// - complete nodes, which contain the public key and IP/Port information
//
// For incomplete nodes, the designator must look like one of these
//
// enode://<hex node id>
// <hex node id>
//
// For complete nodes, the node ID is encoded in the username portion
// of the URL, separated from the host by an @ sign. The hostname can
// only be given as an IP address, DNS domain names are not allowed.
// The port in the host name section is the TCP listening port. If the
// TCP and UDP (discovery) ports differ, the UDP port is specified as
// query parameter "discport".
//
// In the following example, the node URL describes
// a node with IP address 10.3.58.6, TCP listening port 30303
// and UDP discovery port 30301.
//
// enode://<hex node id>@10.3.58.6:30303?discport=30301
func ParseNode(rawurl string) (*Node, error) {
if m := incompleteNodeURL.FindStringSubmatch(rawurl); m != nil {
id, err := HexID(m[1])
if err != nil {
return nil, fmt.Errorf("invalid node ID (%v)", err)
}
return NewNode(id, nil, 0, 0), nil
}
return parseComplete(rawurl)
}
func parseComplete(rawurl string) (*Node, error) {
var (
id NodeID
ip net.IP
tcpPort, udpPort uint64
)
u, err := url.Parse(rawurl)
if err != nil {
return nil, err
}
if u.Scheme != "enode" {
return nil, errors.New("invalid URL scheme, want \"enode\"")
}
// Parse the Node ID from the user portion.
if u.User == nil {
return nil, errors.New("does not contain node ID")
}
if id, err = HexID(u.User.String()); err != nil {
return nil, fmt.Errorf("invalid node ID (%v)", err)
}
// Parse the IP address.
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
return nil, fmt.Errorf("invalid host: %v", err)
}
if ip = net.ParseIP(host); ip == nil {
return nil, errors.New("invalid IP address")
}
// Ensure the IP is 4 bytes long for IPv4 addresses.
if ipv4 := ip.To4(); ipv4 != nil {
ip = ipv4
}
// Parse the port numbers.
if tcpPort, err = strconv.ParseUint(port, 10, 16); err != nil {
return nil, errors.New("invalid port")
}
udpPort = tcpPort
qv := u.Query()
if qv.Get("discport") != "" {
udpPort, err = strconv.ParseUint(qv.Get("discport"), 10, 16)
if err != nil {
return nil, errors.New("invalid discport in query")
}
}
return NewNode(id, ip, uint16(udpPort), uint16(tcpPort)), nil
}
// MustParseNode parses a node URL. It panics if the URL is not valid.
func MustParseNode(rawurl string) *Node {
n, err := ParseNode(rawurl)
if err != nil {
panic("invalid node URL: " + err.Error())
}
return n
}
// MarshalText implements encoding.TextMarshaler.
func (n *Node) MarshalText() ([]byte, error) {
return []byte(n.String()), nil
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (n *Node) UnmarshalText(text []byte) error {
dec, err := ParseNode(string(text))
if err == nil {
*n = *dec
}
return err
}
// NodeID is a unique identifier for each node.
// The node identifier is a marshaled elliptic curve public key.
type NodeID [NodeIDBits / 8]byte
// Bytes returns a byte slice representation of the NodeID
func (n NodeID) Bytes() []byte {
return n[:]
}
// NodeID prints as a long hexadecimal number.
func (n NodeID) String() string {
return fmt.Sprintf("%x", n[:])
}
// The Go syntax representation of a NodeID is a call to HexID.
func (n NodeID) GoString() string {
return fmt.Sprintf("discover.HexID(\"%x\")", n[:])
}
// TerminalString returns a shortened hex string for terminal logging.
func (n NodeID) TerminalString() string {
return hex.EncodeToString(n[:8])
}
// MarshalText implements the encoding.TextMarshaler interface.
func (n NodeID) MarshalText() ([]byte, error) {
return []byte(hex.EncodeToString(n[:])), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (n *NodeID) UnmarshalText(text []byte) error {
id, err := HexID(string(text))
if err != nil {
return err
}
*n = id
return nil
}
// BytesID converts a byte slice to a NodeID
func BytesID(b []byte) (NodeID, error) {
var id NodeID
if len(b) != len(id) {
return id, fmt.Errorf("wrong length, want %d bytes", len(id))
}
copy(id[:], b)
return id, nil
}
// MustBytesID converts a byte slice to a NodeID.
// It panics if the byte slice is not a valid NodeID.
func MustBytesID(b []byte) NodeID {
id, err := BytesID(b)
if err != nil {
panic(err)
}
return id
}
// HexID converts a hex string to a NodeID.
// The string may be prefixed with 0x.
func HexID(in string) (NodeID, error) {
var id NodeID
b, err := hex.DecodeString(strings.TrimPrefix(in, "0x"))
if err != nil {
return id, err
} else if len(b) != len(id) {
return id, fmt.Errorf("wrong length, want %d hex chars", len(id)*2)
}
copy(id[:], b)
return id, nil
}
// MustHexID converts a hex string to a NodeID.
// It panics if the string is not a valid NodeID.
func MustHexID(in string) NodeID {
id, err := HexID(in)
if err != nil {
panic(err)
}
return id
}
// PubkeyID returns a marshaled representation of the given public key.
func PubkeyID(pub *ecdsa.PublicKey) NodeID {
var id NodeID
pbytes := elliptic.Marshal(pub.Curve, pub.X, pub.Y)
if len(pbytes)-1 != len(id) {
panic(fmt.Errorf("need %d bit pubkey, got %d bits", (len(id)+1)*8, len(pbytes)))
}
copy(id[:], pbytes[1:])
return id
}
// Pubkey returns the public key represented by the node ID.
// It returns an error if the ID is not a point on the curve.
func (id NodeID) Pubkey() (*ecdsa.PublicKey, error) {
p := &ecdsa.PublicKey{Curve: crypto.S256(), X: new(big.Int), Y: new(big.Int)} p := &ecdsa.PublicKey{Curve: crypto.S256(), X: new(big.Int), Y: new(big.Int)}
half := len(id) / 2 half := len(e) / 2
p.X.SetBytes(id[:half]) p.X.SetBytes(e[:half])
p.Y.SetBytes(id[half:]) p.Y.SetBytes(e[half:])
if !p.Curve.IsOnCurve(p.X, p.Y) { if !p.Curve.IsOnCurve(p.X, p.Y) {
return nil, errors.New("id is invalid secp256k1 curve point") return nil, errors.New("invalid secp256k1 curve point")
} }
return p, nil return p, nil
} }
// recoverNodeID computes the public key used to sign the func (e encPubkey) id() enode.ID {
return enode.ID(crypto.Keccak256Hash(e[:]))
}
// recoverNodeKey computes the public key used to sign the
// given hash from the signature. // given hash from the signature.
func recoverNodeID(hash, sig []byte) (id NodeID, err error) { func recoverNodeKey(hash, sig []byte) (key encPubkey, err error) {
pubkey, err := secp256k1.RecoverPubkey(hash, sig) pubkey, err := secp256k1.RecoverPubkey(hash, sig)
if err != nil { if err != nil {
return id, err return key, err
} }
if len(pubkey)-1 != len(id) { copy(key[:], pubkey[1:])
return id, fmt.Errorf("recovered pubkey has %d bits, want %d bits", len(pubkey)*8, (len(id)+1)*8) return key, nil
}
for i := range id {
id[i] = pubkey[i+1]
}
return id, nil
} }
// distcmp compares the distances a->target and b->target. func wrapNode(n *enode.Node) *node {
// Returns -1 if a is closer to target, 1 if b is closer to target return &node{Node: *n}
// and 0 if they are equal.
func distcmp(target, a, b common.Hash) int {
for i := range target {
da := a[i] ^ target[i]
db := b[i] ^ target[i]
if da > db {
return 1
} else if da < db {
return -1
}
}
return 0
} }
// table of leading zero counts for bytes [0..255] func wrapNodes(ns []*enode.Node) []*node {
var lzcount = [256]int{ result := make([]*node, len(ns))
8, 7, 6, 6, 5, 5, 5, 5, for i, n := range ns {
4, 4, 4, 4, 4, 4, 4, 4, result[i] = wrapNode(n)
3, 3, 3, 3, 3, 3, 3, 3, }
3, 3, 3, 3, 3, 3, 3, 3, return result
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
} }
// logdist returns the logarithmic distance between a and b, log2(a ^ b). func unwrapNode(n *node) *enode.Node {
func logdist(a, b common.Hash) int { return &n.Node
lz := 0
for i := range a {
x := a[i] ^ b[i]
if x == 0 {
lz += 8
} else {
lz += lzcount[x]
break
}
}
return len(a)*8 - lz
} }
// hashAtDistance returns a random hash such that logdist(a, b) == n func unwrapNodes(ns []*node) []*enode.Node {
func hashAtDistance(a common.Hash, n int) (b common.Hash) { result := make([]*enode.Node, len(ns))
if n == 0 { for i, n := range ns {
return a result[i] = unwrapNode(n)
} }
// flip bit at position n, fill the rest with random bits return result
b = a }
pos := len(a) - n/8 - 1
bit := byte(0x01) << (byte(n%8) - 1) func (n *node) addr() *net.UDPAddr {
if bit == 0 { return &net.UDPAddr{IP: n.IP(), Port: n.UDP()}
pos++ }
bit = 0x80
} func (n *node) String() string {
b[pos] = a[pos]&^bit | ^a[pos]&bit // TODO: randomize end bits return n.Node.String()
for i := pos + 1; i < len(a); i++ {
b[i] = byte(rand.Intn(255))
}
return b
} }

View File

@ -23,6 +23,7 @@
package discover package discover
import ( import (
"crypto/ecdsa"
crand "crypto/rand" crand "crypto/rand"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
@ -35,6 +36,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/netutil" "github.com/ethereum/go-ethereum/p2p/netutil"
) )
@ -65,49 +67,44 @@ const (
type Table struct { type Table struct {
mutex sync.Mutex // protects buckets, bucket content, nursery, rand mutex sync.Mutex // protects buckets, bucket content, nursery, rand
buckets [nBuckets]*bucket // index of known nodes by distance buckets [nBuckets]*bucket // index of known nodes by distance
nursery []*Node // bootstrap nodes nursery []*node // bootstrap nodes
rand *mrand.Rand // source of randomness, periodically reseeded rand *mrand.Rand // source of randomness, periodically reseeded
ips netutil.DistinctNetSet ips netutil.DistinctNetSet
db *nodeDB // database of known nodes db *enode.DB // database of known nodes
refreshReq chan chan struct{} refreshReq chan chan struct{}
initDone chan struct{} initDone chan struct{}
closeReq chan struct{} closeReq chan struct{}
closed chan struct{} closed chan struct{}
nodeAddedHook func(*Node) // for testing nodeAddedHook func(*node) // for testing
net transport net transport
self *Node // metadata of the local node self *node // metadata of the local node
} }
// transport is implemented by the UDP transport. // transport is implemented by the UDP transport.
// it is an interface so we can test without opening lots of UDP // it is an interface so we can test without opening lots of UDP
// sockets and without generating a private key. // sockets and without generating a private key.
type transport interface { type transport interface {
ping(NodeID, *net.UDPAddr) error ping(enode.ID, *net.UDPAddr) error
findnode(toid NodeID, addr *net.UDPAddr, target NodeID) ([]*Node, error) findnode(toid enode.ID, addr *net.UDPAddr, target encPubkey) ([]*node, error)
close() close()
} }
// bucket contains nodes, ordered by their last activity. the entry // bucket contains nodes, ordered by their last activity. the entry
// that was most recently active is the first element in entries. // that was most recently active is the first element in entries.
type bucket struct { type bucket struct {
entries []*Node // live entries, sorted by time of last contact entries []*node // live entries, sorted by time of last contact
replacements []*Node // recently seen nodes to be used if revalidation fails replacements []*node // recently seen nodes to be used if revalidation fails
ips netutil.DistinctNetSet ips netutil.DistinctNetSet
} }
func newTable(t transport, ourID NodeID, ourAddr *net.UDPAddr, nodeDBPath string, bootnodes []*Node) (*Table, error) { func newTable(t transport, self *enode.Node, db *enode.DB, bootnodes []*enode.Node) (*Table, error) {
// If no node database was given, use an in-memory one
db, err := newNodeDB(nodeDBPath, nodeDBVersion, ourID)
if err != nil {
return nil, err
}
tab := &Table{ tab := &Table{
net: t, net: t,
db: db, db: db,
self: NewNode(ourID, ourAddr.IP, uint16(ourAddr.Port), uint16(ourAddr.Port)), self: wrapNode(self),
refreshReq: make(chan chan struct{}), refreshReq: make(chan chan struct{}),
initDone: make(chan struct{}), initDone: make(chan struct{}),
closeReq: make(chan struct{}), closeReq: make(chan struct{}),
@ -125,10 +122,7 @@ func newTable(t transport, ourID NodeID, ourAddr *net.UDPAddr, nodeDBPath string
} }
tab.seedRand() tab.seedRand()
tab.loadSeedNodes() tab.loadSeedNodes()
// Start the background expiration goroutine after loading seeds so that the search for
// seed nodes also considers older nodes that would otherwise be removed by the
// expiration.
tab.db.ensureExpirer()
go tab.loop() go tab.loop()
return tab, nil return tab, nil
} }
@ -143,15 +137,13 @@ func (tab *Table) seedRand() {
} }
// Self returns the local node. // Self returns the local node.
// The returned node should not be modified by the caller. func (tab *Table) Self() *enode.Node {
func (tab *Table) Self() *Node { return unwrapNode(tab.self)
return tab.self
} }
// ReadRandomNodes fills the given slice with random nodes from the // ReadRandomNodes fills the given slice with random nodes from the table. The results
// table. It will not write the same node more than once. The nodes in // are guaranteed to be unique for a single invocation, no node will appear twice.
// the slice are copies and can be modified by the caller. func (tab *Table) ReadRandomNodes(buf []*enode.Node) (n int) {
func (tab *Table) ReadRandomNodes(buf []*Node) (n int) {
if !tab.isInitDone() { if !tab.isInitDone() {
return 0 return 0
} }
@ -159,7 +151,7 @@ func (tab *Table) ReadRandomNodes(buf []*Node) (n int) {
defer tab.mutex.Unlock() defer tab.mutex.Unlock()
// Find all non-empty buckets and get a fresh slice of their entries. // Find all non-empty buckets and get a fresh slice of their entries.
var buckets [][]*Node var buckets [][]*node
for _, b := range &tab.buckets { for _, b := range &tab.buckets {
if len(b.entries) > 0 { if len(b.entries) > 0 {
buckets = append(buckets, b.entries) buckets = append(buckets, b.entries)
@ -177,7 +169,7 @@ func (tab *Table) ReadRandomNodes(buf []*Node) (n int) {
var i, j int var i, j int
for ; i < len(buf); i, j = i+1, (j+1)%len(buckets) { for ; i < len(buf); i, j = i+1, (j+1)%len(buckets) {
b := buckets[j] b := buckets[j]
buf[i] = &(*b[0]) buf[i] = unwrapNode(b[0])
buckets[j] = b[1:] buckets[j] = b[1:]
if len(b) == 1 { if len(b) == 1 {
buckets = append(buckets[:j], buckets[j+1:]...) buckets = append(buckets[:j], buckets[j+1:]...)
@ -202,20 +194,13 @@ func (tab *Table) Close() {
// setFallbackNodes sets the initial points of contact. These nodes // setFallbackNodes sets the initial points of contact. These nodes
// are used to connect to the network if the table is empty and there // are used to connect to the network if the table is empty and there
// are no known nodes in the database. // are no known nodes in the database.
func (tab *Table) setFallbackNodes(nodes []*Node) error { func (tab *Table) setFallbackNodes(nodes []*enode.Node) error {
for _, n := range nodes { for _, n := range nodes {
if err := n.validateComplete(); err != nil { if err := n.ValidateComplete(); err != nil {
return fmt.Errorf("bad bootstrap/fallback node %q (%v)", n, err) return fmt.Errorf("bad bootstrap node %q: %v", n, err)
} }
} }
tab.nursery = make([]*Node, 0, len(nodes)) tab.nursery = wrapNodes(nodes)
for _, n := range nodes {
cpy := *n
// Recompute cpy.sha because the node might not have been
// created by NewNode or ParseNode.
cpy.sha = crypto.Keccak256Hash(n.ID[:])
tab.nursery = append(tab.nursery, &cpy)
}
return nil return nil
} }
@ -231,47 +216,48 @@ func (tab *Table) isInitDone() bool {
// Resolve searches for a specific node with the given ID. // Resolve searches for a specific node with the given ID.
// It returns nil if the node could not be found. // It returns nil if the node could not be found.
func (tab *Table) Resolve(targetID NodeID) *Node { func (tab *Table) Resolve(n *enode.Node) *enode.Node {
// If the node is present in the local table, no // If the node is present in the local table, no
// network interaction is required. // network interaction is required.
hash := crypto.Keccak256Hash(targetID[:]) hash := n.ID()
tab.mutex.Lock() tab.mutex.Lock()
cl := tab.closest(hash, 1) cl := tab.closest(hash, 1)
tab.mutex.Unlock() tab.mutex.Unlock()
if len(cl.entries) > 0 && cl.entries[0].ID == targetID { if len(cl.entries) > 0 && cl.entries[0].ID() == hash {
return cl.entries[0] return unwrapNode(cl.entries[0])
} }
// Otherwise, do a network lookup. // Otherwise, do a network lookup.
result := tab.Lookup(targetID) result := tab.lookup(encodePubkey(n.Pubkey()), true)
for _, n := range result { for _, n := range result {
if n.ID == targetID { if n.ID() == hash {
return n return unwrapNode(n)
} }
} }
return nil return nil
} }
// Lookup performs a network search for nodes close // LookupRandom finds random nodes in the network.
// to the given target. It approaches the target by querying func (tab *Table) LookupRandom() []*enode.Node {
// nodes that are closer to it on each iteration. var target encPubkey
// The given target does not need to be an actual node crand.Read(target[:])
// identifier. return unwrapNodes(tab.lookup(target, true))
func (tab *Table) Lookup(targetID NodeID) []*Node {
return tab.lookup(targetID, true)
} }
func (tab *Table) lookup(targetID NodeID, refreshIfEmpty bool) []*Node { // lookup performs a network search for nodes close to the given target. It approaches the
// target by querying nodes that are closer to it on each iteration. The given target does
// not need to be an actual node identifier.
func (tab *Table) lookup(targetKey encPubkey, refreshIfEmpty bool) []*node {
var ( var (
target = crypto.Keccak256Hash(targetID[:]) target = enode.ID(crypto.Keccak256Hash(targetKey[:]))
asked = make(map[NodeID]bool) asked = make(map[enode.ID]bool)
seen = make(map[NodeID]bool) seen = make(map[enode.ID]bool)
reply = make(chan []*Node, alpha) reply = make(chan []*node, alpha)
pendingQueries = 0 pendingQueries = 0
result *nodesByDistance result *nodesByDistance
) )
// don't query further if we hit ourself. // don't query further if we hit ourself.
// unlikely to happen often in practice. // unlikely to happen often in practice.
asked[tab.self.ID] = true asked[tab.self.ID()] = true
for { for {
tab.mutex.Lock() tab.mutex.Lock()
@ -293,10 +279,10 @@ func (tab *Table) lookup(targetID NodeID, refreshIfEmpty bool) []*Node {
// ask the alpha closest nodes that we haven't asked yet // ask the alpha closest nodes that we haven't asked yet
for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ { for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ {
n := result.entries[i] n := result.entries[i]
if !asked[n.ID] { if !asked[n.ID()] {
asked[n.ID] = true asked[n.ID()] = true
pendingQueries++ pendingQueries++
go tab.findnode(n, targetID, reply) go tab.findnode(n, targetKey, reply)
} }
} }
if pendingQueries == 0 { if pendingQueries == 0 {
@ -305,8 +291,8 @@ func (tab *Table) lookup(targetID NodeID, refreshIfEmpty bool) []*Node {
} }
// wait for the next reply // wait for the next reply
for _, n := range <-reply { for _, n := range <-reply {
if n != nil && !seen[n.ID] { if n != nil && !seen[n.ID()] {
seen[n.ID] = true seen[n.ID()] = true
result.push(n, bucketSize) result.push(n, bucketSize)
} }
} }
@ -315,19 +301,19 @@ func (tab *Table) lookup(targetID NodeID, refreshIfEmpty bool) []*Node {
return result.entries return result.entries
} }
func (tab *Table) findnode(n *Node, targetID NodeID, reply chan<- []*Node) { func (tab *Table) findnode(n *node, targetKey encPubkey, reply chan<- []*node) {
fails := tab.db.findFails(n.ID) fails := tab.db.FindFails(n.ID())
r, err := tab.net.findnode(n.ID, n.addr(), targetID) r, err := tab.net.findnode(n.ID(), n.addr(), targetKey)
if err != nil || len(r) == 0 { if err != nil || len(r) == 0 {
fails++ fails++
tab.db.updateFindFails(n.ID, fails) tab.db.UpdateFindFails(n.ID(), fails)
log.Trace("Findnode failed", "id", n.ID, "failcount", fails, "err", err) log.Trace("Findnode failed", "id", n.ID(), "failcount", fails, "err", err)
if fails >= maxFindnodeFailures { if fails >= maxFindnodeFailures {
log.Trace("Too many findnode failures, dropping", "id", n.ID, "failcount", fails) log.Trace("Too many findnode failures, dropping", "id", n.ID(), "failcount", fails)
tab.delete(n) tab.delete(n)
} }
} else if fails > 0 { } else if fails > 0 {
tab.db.updateFindFails(n.ID, fails-1) tab.db.UpdateFindFails(n.ID(), fails-1)
} }
// Grab as many nodes as possible. Some of them might not be alive anymore, but we'll // Grab as many nodes as possible. Some of them might not be alive anymore, but we'll
@ -405,7 +391,6 @@ loop:
for _, ch := range waiting { for _, ch := range waiting {
close(ch) close(ch)
} }
tab.db.close()
close(tab.closed) close(tab.closed)
} }
@ -421,7 +406,11 @@ func (tab *Table) doRefresh(done chan struct{}) {
tab.loadSeedNodes() tab.loadSeedNodes()
// Run self lookup to discover new neighbor nodes. // Run self lookup to discover new neighbor nodes.
tab.lookup(tab.self.ID, false) // We can only do this if we have a secp256k1 identity.
var key ecdsa.PublicKey
if err := tab.self.Load((*enode.Secp256k1)(&key)); err == nil {
tab.lookup(encodePubkey(&key), false)
}
// The Kademlia paper specifies that the bucket refresh should // The Kademlia paper specifies that the bucket refresh should
// perform a lookup in the least recently used bucket. We cannot // perform a lookup in the least recently used bucket. We cannot
@ -430,19 +419,19 @@ func (tab *Table) doRefresh(done chan struct{}) {
// sha3 preimage that falls into a chosen bucket. // sha3 preimage that falls into a chosen bucket.
// We perform a few lookups with a random target instead. // We perform a few lookups with a random target instead.
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
var target NodeID var target encPubkey
crand.Read(target[:]) crand.Read(target[:])
tab.lookup(target, false) tab.lookup(target, false)
} }
} }
func (tab *Table) loadSeedNodes() { func (tab *Table) loadSeedNodes() {
seeds := tab.db.querySeeds(seedCount, seedMaxAge) seeds := wrapNodes(tab.db.QuerySeeds(seedCount, seedMaxAge))
seeds = append(seeds, tab.nursery...) seeds = append(seeds, tab.nursery...)
for i := range seeds { for i := range seeds {
seed := seeds[i] seed := seeds[i]
age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.lastPongReceived(seed.ID)) }} age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.LastPongReceived(seed.ID())) }}
log.Debug("Found seed node in database", "id", seed.ID, "addr", seed.addr(), "age", age) log.Debug("Found seed node in database", "id", seed.ID(), "addr", seed.addr(), "age", age)
tab.add(seed) tab.add(seed)
} }
} }
@ -459,28 +448,28 @@ func (tab *Table) doRevalidate(done chan<- struct{}) {
} }
// Ping the selected node and wait for a pong. // Ping the selected node and wait for a pong.
err := tab.net.ping(last.ID, last.addr()) err := tab.net.ping(last.ID(), last.addr())
tab.mutex.Lock() tab.mutex.Lock()
defer tab.mutex.Unlock() defer tab.mutex.Unlock()
b := tab.buckets[bi] b := tab.buckets[bi]
if err == nil { if err == nil {
// The node responded, move it to the front. // The node responded, move it to the front.
log.Trace("Revalidated node", "b", bi, "id", last.ID) log.Debug("Revalidated node", "b", bi, "id", last.ID())
b.bump(last) b.bump(last)
return return
} }
// No reply received, pick a replacement or delete the node if there aren't // No reply received, pick a replacement or delete the node if there aren't
// any replacements. // any replacements.
if r := tab.replace(b, last); r != nil { if r := tab.replace(b, last); r != nil {
log.Trace("Replaced dead node", "b", bi, "id", last.ID, "ip", last.IP, "r", r.ID, "rip", r.IP) log.Debug("Replaced dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "r", r.ID(), "rip", r.IP())
} else { } else {
log.Trace("Removed dead node", "b", bi, "id", last.ID, "ip", last.IP) log.Debug("Removed dead node", "b", bi, "id", last.ID(), "ip", last.IP())
} }
} }
// nodeToRevalidate returns the last node in a random, non-empty bucket. // nodeToRevalidate returns the last node in a random, non-empty bucket.
func (tab *Table) nodeToRevalidate() (n *Node, bi int) { func (tab *Table) nodeToRevalidate() (n *node, bi int) {
tab.mutex.Lock() tab.mutex.Lock()
defer tab.mutex.Unlock() defer tab.mutex.Unlock()
@ -511,7 +500,7 @@ func (tab *Table) copyLiveNodes() {
for _, b := range &tab.buckets { for _, b := range &tab.buckets {
for _, n := range b.entries { for _, n := range b.entries {
if now.Sub(n.addedAt) >= seedMinTableTime { if now.Sub(n.addedAt) >= seedMinTableTime {
tab.db.updateNode(n) tab.db.UpdateNode(unwrapNode(n))
} }
} }
} }
@ -519,7 +508,7 @@ func (tab *Table) copyLiveNodes() {
// closest returns the n nodes in the table that are closest to the // closest returns the n nodes in the table that are closest to the
// given id. The caller must hold tab.mutex. // given id. The caller must hold tab.mutex.
func (tab *Table) closest(target common.Hash, nresults int) *nodesByDistance { func (tab *Table) closest(target enode.ID, nresults int) *nodesByDistance {
// This is a very wasteful way to find the closest nodes but // This is a very wasteful way to find the closest nodes but
// obviously correct. I believe that tree-based buckets would make // obviously correct. I believe that tree-based buckets would make
// this easier to implement efficiently. // this easier to implement efficiently.
@ -540,8 +529,8 @@ func (tab *Table) len() (n int) {
} }
// bucket returns the bucket for the given node ID hash. // bucket returns the bucket for the given node ID hash.
func (tab *Table) bucket(sha common.Hash) *bucket { func (tab *Table) bucket(id enode.ID) *bucket {
d := logdist(tab.self.sha, sha) d := enode.LogDist(tab.self.ID(), id)
if d <= bucketMinDistance { if d <= bucketMinDistance {
return tab.buckets[0] return tab.buckets[0]
} }
@ -553,11 +542,14 @@ func (tab *Table) bucket(sha common.Hash) *bucket {
// least recently active node in the bucket does not respond to a ping packet. // least recently active node in the bucket does not respond to a ping packet.
// //
// The caller must not hold tab.mutex. // The caller must not hold tab.mutex.
func (tab *Table) add(n *Node) { func (tab *Table) add(n *node) {
if n.ID() == tab.self.ID() {
return
}
tab.mutex.Lock() tab.mutex.Lock()
defer tab.mutex.Unlock() defer tab.mutex.Unlock()
b := tab.bucket(n.ID())
b := tab.bucket(n.sha)
if !tab.bumpOrAdd(b, n) { if !tab.bumpOrAdd(b, n) {
// Node is not in table. Add it to the replacement list. // Node is not in table. Add it to the replacement list.
tab.addReplacement(b, n) tab.addReplacement(b, n)
@ -570,7 +562,7 @@ func (tab *Table) add(n *Node) {
// table could be filled by just sending ping repeatedly. // table could be filled by just sending ping repeatedly.
// //
// The caller must not hold tab.mutex. // The caller must not hold tab.mutex.
func (tab *Table) addThroughPing(n *Node) { func (tab *Table) addThroughPing(n *node) {
if !tab.isInitDone() { if !tab.isInitDone() {
return return
} }
@ -579,15 +571,15 @@ func (tab *Table) addThroughPing(n *Node) {
// stuff adds nodes the table to the end of their corresponding bucket // stuff adds nodes the table to the end of their corresponding bucket
// if the bucket is not full. The caller must not hold tab.mutex. // if the bucket is not full. The caller must not hold tab.mutex.
func (tab *Table) stuff(nodes []*Node) { func (tab *Table) stuff(nodes []*node) {
tab.mutex.Lock() tab.mutex.Lock()
defer tab.mutex.Unlock() defer tab.mutex.Unlock()
for _, n := range nodes { for _, n := range nodes {
if n.ID == tab.self.ID { if n.ID() == tab.self.ID() {
continue // don't add self continue // don't add self
} }
b := tab.bucket(n.sha) b := tab.bucket(n.ID())
if len(b.entries) < bucketSize { if len(b.entries) < bucketSize {
tab.bumpOrAdd(b, n) tab.bumpOrAdd(b, n)
} }
@ -595,11 +587,11 @@ func (tab *Table) stuff(nodes []*Node) {
} }
// delete removes an entry from the node table. It is used to evacuate dead nodes. // delete removes an entry from the node table. It is used to evacuate dead nodes.
func (tab *Table) delete(node *Node) { func (tab *Table) delete(node *node) {
tab.mutex.Lock() tab.mutex.Lock()
defer tab.mutex.Unlock() defer tab.mutex.Unlock()
tab.deleteInBucket(tab.bucket(node.sha), node) tab.deleteInBucket(tab.bucket(node.ID()), node)
} }
func (tab *Table) addIP(b *bucket, ip net.IP) bool { func (tab *Table) addIP(b *bucket, ip net.IP) bool {
@ -626,27 +618,27 @@ func (tab *Table) removeIP(b *bucket, ip net.IP) {
b.ips.Remove(ip) b.ips.Remove(ip)
} }
func (tab *Table) addReplacement(b *bucket, n *Node) { func (tab *Table) addReplacement(b *bucket, n *node) {
for _, e := range b.replacements { for _, e := range b.replacements {
if e.ID == n.ID { if e.ID() == n.ID() {
return // already in list return // already in list
} }
} }
if !tab.addIP(b, n.IP) { if !tab.addIP(b, n.IP()) {
return return
} }
var removed *Node var removed *node
b.replacements, removed = pushNode(b.replacements, n, maxReplacements) b.replacements, removed = pushNode(b.replacements, n, maxReplacements)
if removed != nil { if removed != nil {
tab.removeIP(b, removed.IP) tab.removeIP(b, removed.IP())
} }
} }
// replace removes n from the replacement list and replaces 'last' with it if it is the // replace removes n from the replacement list and replaces 'last' with it if it is the
// last entry in the bucket. If 'last' isn't the last entry, it has either been replaced // last entry in the bucket. If 'last' isn't the last entry, it has either been replaced
// with someone else or became active. // with someone else or became active.
func (tab *Table) replace(b *bucket, last *Node) *Node { func (tab *Table) replace(b *bucket, last *node) *node {
if len(b.entries) == 0 || b.entries[len(b.entries)-1].ID != last.ID { if len(b.entries) == 0 || b.entries[len(b.entries)-1].ID() != last.ID() {
// Entry has moved, don't replace it. // Entry has moved, don't replace it.
return nil return nil
} }
@ -658,15 +650,15 @@ func (tab *Table) replace(b *bucket, last *Node) *Node {
r := b.replacements[tab.rand.Intn(len(b.replacements))] r := b.replacements[tab.rand.Intn(len(b.replacements))]
b.replacements = deleteNode(b.replacements, r) b.replacements = deleteNode(b.replacements, r)
b.entries[len(b.entries)-1] = r b.entries[len(b.entries)-1] = r
tab.removeIP(b, last.IP) tab.removeIP(b, last.IP())
return r return r
} }
// bump moves the given node to the front of the bucket entry list // bump moves the given node to the front of the bucket entry list
// if it is contained in that list. // if it is contained in that list.
func (b *bucket) bump(n *Node) bool { func (b *bucket) bump(n *node) bool {
for i := range b.entries { for i := range b.entries {
if b.entries[i].ID == n.ID { if b.entries[i].ID() == n.ID() {
// move it to the front // move it to the front
copy(b.entries[1:], b.entries[:i]) copy(b.entries[1:], b.entries[:i])
b.entries[0] = n b.entries[0] = n
@ -678,11 +670,11 @@ func (b *bucket) bump(n *Node) bool {
// bumpOrAdd moves n to the front of the bucket entry list or adds it if the list isn't // bumpOrAdd moves n to the front of the bucket entry list or adds it if the list isn't
// full. The return value is true if n is in the bucket. // full. The return value is true if n is in the bucket.
func (tab *Table) bumpOrAdd(b *bucket, n *Node) bool { func (tab *Table) bumpOrAdd(b *bucket, n *node) bool {
if b.bump(n) { if b.bump(n) {
return true return true
} }
if len(b.entries) >= bucketSize || !tab.addIP(b, n.IP) { if len(b.entries) >= bucketSize || !tab.addIP(b, n.IP()) {
return false return false
} }
b.entries, _ = pushNode(b.entries, n, bucketSize) b.entries, _ = pushNode(b.entries, n, bucketSize)
@ -694,13 +686,13 @@ func (tab *Table) bumpOrAdd(b *bucket, n *Node) bool {
return true return true
} }
func (tab *Table) deleteInBucket(b *bucket, n *Node) { func (tab *Table) deleteInBucket(b *bucket, n *node) {
b.entries = deleteNode(b.entries, n) b.entries = deleteNode(b.entries, n)
tab.removeIP(b, n.IP) tab.removeIP(b, n.IP())
} }
// pushNode adds n to the front of list, keeping at most max items. // pushNode adds n to the front of list, keeping at most max items.
func pushNode(list []*Node, n *Node, max int) ([]*Node, *Node) { func pushNode(list []*node, n *node, max int) ([]*node, *node) {
if len(list) < max { if len(list) < max {
list = append(list, nil) list = append(list, nil)
} }
@ -711,9 +703,9 @@ func pushNode(list []*Node, n *Node, max int) ([]*Node, *Node) {
} }
// deleteNode removes n from list. // deleteNode removes n from list.
func deleteNode(list []*Node, n *Node) []*Node { func deleteNode(list []*node, n *node) []*node {
for i := range list { for i := range list {
if list[i].ID == n.ID { if list[i].ID() == n.ID() {
return append(list[:i], list[i+1:]...) return append(list[:i], list[i+1:]...)
} }
} }
@ -723,14 +715,14 @@ func deleteNode(list []*Node, n *Node) []*Node {
// nodesByDistance is a list of nodes, ordered by // nodesByDistance is a list of nodes, ordered by
// distance to target. // distance to target.
type nodesByDistance struct { type nodesByDistance struct {
entries []*Node entries []*node
target common.Hash target enode.ID
} }
// push adds the given node to the list, keeping the total size below maxElems. // push adds the given node to the list, keeping the total size below maxElems.
func (h *nodesByDistance) push(n *Node, maxElems int) { func (h *nodesByDistance) push(n *node, maxElems int) {
ix := sort.Search(len(h.entries), func(i int) bool { ix := sort.Search(len(h.entries), func(i int) bool {
return distcmp(h.target, h.entries[i].sha, n.sha) > 0 return enode.DistCmp(h.target, h.entries[i].ID(), n.ID()) > 0
}) })
if len(h.entries) < maxElems { if len(h.entries) < maxElems {
h.entries = append(h.entries, n) h.entries = append(h.entries, n)

View File

@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/p2p/netutil" "github.com/ethereum/go-ethereum/p2p/netutil"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
@ -46,8 +47,9 @@ var (
// Timeouts // Timeouts
const ( const (
respTimeout = 500 * time.Millisecond respTimeout = 500 * time.Millisecond
expiration = 20 * time.Second expiration = 20 * time.Second
bondExpiration = 24 * time.Hour
ntpFailureThreshold = 32 // Continuous timeouts after which to check NTP ntpFailureThreshold = 32 // Continuous timeouts after which to check NTP
ntpWarningCooldown = 10 * time.Minute // Minimum amount of time to pass before repeating NTP warning ntpWarningCooldown = 10 * time.Minute // Minimum amount of time to pass before repeating NTP warning
@ -87,7 +89,7 @@ type (
// findnode is a query for nodes close to the given target. // findnode is a query for nodes close to the given target.
findnode struct { findnode struct {
Target NodeID // doesn't need to be an actual public key Target encPubkey
Expiration uint64 Expiration uint64
// Ignore additional fields (for forward compatibility). // Ignore additional fields (for forward compatibility).
Rest []rlp.RawValue `rlp:"tail"` Rest []rlp.RawValue `rlp:"tail"`
@ -105,7 +107,7 @@ type (
IP net.IP // len 4 for IPv4 or 16 for IPv6 IP net.IP // len 4 for IPv4 or 16 for IPv6
UDP uint16 // for discovery protocol UDP uint16 // for discovery protocol
TCP uint16 // for RLPx protocol TCP uint16 // for RLPx protocol
ID NodeID ID encPubkey
} }
rpcEndpoint struct { rpcEndpoint struct {
@ -123,7 +125,7 @@ func makeEndpoint(addr *net.UDPAddr, tcpPort uint16) rpcEndpoint {
return rpcEndpoint{IP: ip, UDP: uint16(addr.Port), TCP: tcpPort} return rpcEndpoint{IP: ip, UDP: uint16(addr.Port), TCP: tcpPort}
} }
func (t *udp) nodeFromRPC(sender *net.UDPAddr, rn rpcNode) (*Node, error) { func (t *udp) nodeFromRPC(sender *net.UDPAddr, rn rpcNode) (*node, error) {
if rn.UDP <= 1024 { if rn.UDP <= 1024 {
return nil, errors.New("low port") return nil, errors.New("low port")
} }
@ -133,17 +135,26 @@ func (t *udp) nodeFromRPC(sender *net.UDPAddr, rn rpcNode) (*Node, error) {
if t.netrestrict != nil && !t.netrestrict.Contains(rn.IP) { if t.netrestrict != nil && !t.netrestrict.Contains(rn.IP) {
return nil, errors.New("not contained in netrestrict whitelist") return nil, errors.New("not contained in netrestrict whitelist")
} }
n := NewNode(rn.ID, rn.IP, rn.UDP, rn.TCP) key, err := decodePubkey(rn.ID)
err := n.validateComplete() if err != nil {
return nil, err
}
n := wrapNode(enode.NewV4(key, rn.IP, int(rn.TCP), int(rn.UDP)))
err = n.ValidateComplete()
return n, err return n, err
} }
func nodeToRPC(n *Node) rpcNode { func nodeToRPC(n *node) rpcNode {
return rpcNode{ID: n.ID, IP: n.IP, UDP: n.UDP, TCP: n.TCP} var key ecdsa.PublicKey
var ekey encPubkey
if err := n.Load((*enode.Secp256k1)(&key)); err == nil {
ekey = encodePubkey(&key)
}
return rpcNode{ID: ekey, IP: n.IP(), UDP: uint16(n.UDP()), TCP: uint16(n.TCP())}
} }
type packet interface { type packet interface {
handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte) error handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []byte) error
name() string name() string
} }
@ -181,7 +192,7 @@ type udp struct {
// to all the callback functions for that node. // to all the callback functions for that node.
type pending struct { type pending struct {
// these fields must match in the reply. // these fields must match in the reply.
from NodeID from enode.ID
ptype byte ptype byte
// time when the request must complete // time when the request must complete
@ -199,7 +210,7 @@ type pending struct {
} }
type reply struct { type reply struct {
from NodeID from enode.ID
ptype byte ptype byte
data interface{} data interface{}
// loop indicates whether there was // loop indicates whether there was
@ -222,7 +233,7 @@ type Config struct {
AnnounceAddr *net.UDPAddr // local address announced in the DHT AnnounceAddr *net.UDPAddr // local address announced in the DHT
NodeDBPath string // if set, the node database is stored at this filesystem location NodeDBPath string // if set, the node database is stored at this filesystem location
NetRestrict *netutil.Netlist // network whitelist NetRestrict *netutil.Netlist // network whitelist
Bootnodes []*Node // list of bootstrap nodes Bootnodes []*enode.Node // list of bootstrap nodes
Unhandled chan<- ReadPacket // unhandled packets are sent on this channel Unhandled chan<- ReadPacket // unhandled packets are sent on this channel
} }
@ -237,6 +248,16 @@ func ListenUDP(c conn, cfg Config) (*Table, error) {
} }
func newUDP(c conn, cfg Config) (*Table, *udp, error) { func newUDP(c conn, cfg Config) (*Table, *udp, error) {
realaddr := c.LocalAddr().(*net.UDPAddr)
if cfg.AnnounceAddr != nil {
realaddr = cfg.AnnounceAddr
}
self := enode.NewV4(&cfg.PrivateKey.PublicKey, realaddr.IP, realaddr.Port, realaddr.Port)
db, err := enode.OpenDB(cfg.NodeDBPath)
if err != nil {
return nil, nil, err
}
udp := &udp{ udp := &udp{
conn: c, conn: c,
priv: cfg.PrivateKey, priv: cfg.PrivateKey,
@ -245,13 +266,9 @@ func newUDP(c conn, cfg Config) (*Table, *udp, error) {
gotreply: make(chan reply), gotreply: make(chan reply),
addpending: make(chan *pending), addpending: make(chan *pending),
} }
realaddr := c.LocalAddr().(*net.UDPAddr)
if cfg.AnnounceAddr != nil {
realaddr = cfg.AnnounceAddr
}
// TODO: separate TCP port // TODO: separate TCP port
udp.ourEndpoint = makeEndpoint(realaddr, uint16(realaddr.Port)) udp.ourEndpoint = makeEndpoint(realaddr, uint16(realaddr.Port))
tab, err := newTable(udp, PubkeyID(&cfg.PrivateKey.PublicKey), realaddr, cfg.NodeDBPath, cfg.Bootnodes) tab, err := newTable(udp, self, db, cfg.Bootnodes)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -265,17 +282,18 @@ func newUDP(c conn, cfg Config) (*Table, *udp, error) {
func (t *udp) close() { func (t *udp) close() {
close(t.closing) close(t.closing)
t.conn.Close() t.conn.Close()
t.db.Close()
// TODO: wait for the loops to end. // TODO: wait for the loops to end.
} }
// ping sends a ping message to the given node and waits for a reply. // ping sends a ping message to the given node and waits for a reply.
func (t *udp) ping(toid NodeID, toaddr *net.UDPAddr) error { func (t *udp) ping(toid enode.ID, toaddr *net.UDPAddr) error {
return <-t.sendPing(toid, toaddr, nil) return <-t.sendPing(toid, toaddr, nil)
} }
// sendPing sends a ping message to the given node and invokes the callback // sendPing sends a ping message to the given node and invokes the callback
// when the reply arrives. // when the reply arrives.
func (t *udp) sendPing(toid NodeID, toaddr *net.UDPAddr, callback func()) <-chan error { func (t *udp) sendPing(toid enode.ID, toaddr *net.UDPAddr, callback func()) <-chan error {
req := &ping{ req := &ping{
Version: 4, Version: 4,
From: t.ourEndpoint, From: t.ourEndpoint,
@ -299,21 +317,21 @@ func (t *udp) sendPing(toid NodeID, toaddr *net.UDPAddr, callback func()) <-chan
return errc return errc
} }
func (t *udp) waitping(from NodeID) error { func (t *udp) waitping(from enode.ID) error {
return <-t.pending(from, pingPacket, func(interface{}) bool { return true }) return <-t.pending(from, pingPacket, func(interface{}) bool { return true })
} }
// findnode sends a findnode request to the given node and waits until // findnode sends a findnode request to the given node and waits until
// the node has sent up to k neighbors. // the node has sent up to k neighbors.
func (t *udp) findnode(toid NodeID, toaddr *net.UDPAddr, target NodeID) ([]*Node, error) { func (t *udp) findnode(toid enode.ID, toaddr *net.UDPAddr, target encPubkey) ([]*node, error) {
// If we haven't seen a ping from the destination node for a while, it won't remember // If we haven't seen a ping from the destination node for a while, it won't remember
// our endpoint proof and reject findnode. Solicit a ping first. // our endpoint proof and reject findnode. Solicit a ping first.
if time.Since(t.db.lastPingReceived(toid)) > nodeDBNodeExpiration { if time.Since(t.db.LastPingReceived(toid)) > bondExpiration {
t.ping(toid, toaddr) t.ping(toid, toaddr)
t.waitping(toid) t.waitping(toid)
} }
nodes := make([]*Node, 0, bucketSize) nodes := make([]*node, 0, bucketSize)
nreceived := 0 nreceived := 0
errc := t.pending(toid, neighborsPacket, func(r interface{}) bool { errc := t.pending(toid, neighborsPacket, func(r interface{}) bool {
reply := r.(*neighbors) reply := r.(*neighbors)
@ -337,7 +355,7 @@ func (t *udp) findnode(toid NodeID, toaddr *net.UDPAddr, target NodeID) ([]*Node
// pending adds a reply callback to the pending reply queue. // pending adds a reply callback to the pending reply queue.
// see the documentation of type pending for a detailed explanation. // see the documentation of type pending for a detailed explanation.
func (t *udp) pending(id NodeID, ptype byte, callback func(interface{}) bool) <-chan error { func (t *udp) pending(id enode.ID, ptype byte, callback func(interface{}) bool) <-chan error {
ch := make(chan error, 1) ch := make(chan error, 1)
p := &pending{from: id, ptype: ptype, callback: callback, errc: ch} p := &pending{from: id, ptype: ptype, callback: callback, errc: ch}
select { select {
@ -349,7 +367,7 @@ func (t *udp) pending(id NodeID, ptype byte, callback func(interface{}) bool) <-
return ch return ch
} }
func (t *udp) handleReply(from NodeID, ptype byte, req packet) bool { func (t *udp) handleReply(from enode.ID, ptype byte, req packet) bool {
matched := make(chan bool, 1) matched := make(chan bool, 1)
select { select {
case t.gotreply <- reply{from, ptype, req, matched}: case t.gotreply <- reply{from, ptype, req, matched}:
@ -563,19 +581,20 @@ func (t *udp) handlePacket(from *net.UDPAddr, buf []byte) error {
return err return err
} }
func decodePacket(buf []byte) (packet, NodeID, []byte, error) { func decodePacket(buf []byte) (packet, encPubkey, []byte, error) {
if len(buf) < headSize+1 { if len(buf) < headSize+1 {
return nil, NodeID{}, nil, errPacketTooSmall return nil, encPubkey{}, nil, errPacketTooSmall
} }
hash, sig, sigdata := buf[:macSize], buf[macSize:headSize], buf[headSize:] hash, sig, sigdata := buf[:macSize], buf[macSize:headSize], buf[headSize:]
shouldhash := crypto.Keccak256(buf[macSize:]) shouldhash := crypto.Keccak256(buf[macSize:])
if !bytes.Equal(hash, shouldhash) { if !bytes.Equal(hash, shouldhash) {
return nil, NodeID{}, nil, errBadHash return nil, encPubkey{}, nil, errBadHash
} }
fromID, err := recoverNodeID(crypto.Keccak256(buf[headSize:]), sig) fromKey, err := recoverNodeKey(crypto.Keccak256(buf[headSize:]), sig)
if err != nil { if err != nil {
return nil, NodeID{}, hash, err return nil, fromKey, hash, err
} }
var req packet var req packet
switch ptype := sigdata[0]; ptype { switch ptype := sigdata[0]; ptype {
case pingPacket: case pingPacket:
@ -587,56 +606,59 @@ func decodePacket(buf []byte) (packet, NodeID, []byte, error) {
case neighborsPacket: case neighborsPacket:
req = new(neighbors) req = new(neighbors)
default: default:
return nil, fromID, hash, fmt.Errorf("unknown type: %d", ptype) return nil, fromKey, hash, fmt.Errorf("unknown type: %d", ptype)
} }
s := rlp.NewStream(bytes.NewReader(sigdata[1:]), 0) s := rlp.NewStream(bytes.NewReader(sigdata[1:]), 0)
err = s.Decode(req) err = s.Decode(req)
return req, fromID, hash, err return req, fromKey, hash, err
} }
func (req *ping) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte) error { func (req *ping) handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []byte) error {
if expired(req.Expiration) { if expired(req.Expiration) {
return errExpired return errExpired
} }
key, err := decodePubkey(fromKey)
if err != nil {
return fmt.Errorf("invalid public key: %v", err)
}
t.send(from, pongPacket, &pong{ t.send(from, pongPacket, &pong{
To: makeEndpoint(from, req.From.TCP), To: makeEndpoint(from, req.From.TCP),
ReplyTok: mac, ReplyTok: mac,
Expiration: uint64(time.Now().Add(expiration).Unix()), Expiration: uint64(time.Now().Add(expiration).Unix()),
}) })
t.handleReply(fromID, pingPacket, req) n := wrapNode(enode.NewV4(key, from.IP, int(req.From.TCP), from.Port))
t.handleReply(n.ID(), pingPacket, req)
// Add the node to the table. Before doing so, ensure that we have a recent enough pong if time.Since(t.db.LastPongReceived(n.ID())) > bondExpiration {
// recorded in the database so their findnode requests will be accepted later. t.sendPing(n.ID(), from, func() { t.addThroughPing(n) })
n := NewNode(fromID, from.IP, uint16(from.Port), req.From.TCP)
if time.Since(t.db.lastPongReceived(fromID)) > nodeDBNodeExpiration {
t.sendPing(fromID, from, func() { t.addThroughPing(n) })
} else { } else {
t.addThroughPing(n) t.addThroughPing(n)
} }
t.db.updateLastPingReceived(fromID, time.Now()) t.db.UpdateLastPingReceived(n.ID(), time.Now())
return nil return nil
} }
func (req *ping) name() string { return "PING/v4" } func (req *ping) name() string { return "PING/v4" }
func (req *pong) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte) error { func (req *pong) handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []byte) error {
if expired(req.Expiration) { if expired(req.Expiration) {
return errExpired return errExpired
} }
fromID := fromKey.id()
if !t.handleReply(fromID, pongPacket, req) { if !t.handleReply(fromID, pongPacket, req) {
return errUnsolicitedReply return errUnsolicitedReply
} }
t.db.updateLastPongReceived(fromID, time.Now()) t.db.UpdateLastPongReceived(fromID, time.Now())
return nil return nil
} }
func (req *pong) name() string { return "PONG/v4" } func (req *pong) name() string { return "PONG/v4" }
func (req *findnode) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte) error { func (req *findnode) handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []byte) error {
if expired(req.Expiration) { if expired(req.Expiration) {
return errExpired return errExpired
} }
if !t.db.hasBond(fromID) { fromID := fromKey.id()
if time.Since(t.db.LastPongReceived(fromID)) > bondExpiration {
// No endpoint proof pong exists, we don't process the packet. This prevents an // No endpoint proof pong exists, we don't process the packet. This prevents an
// attack vector where the discovery protocol could be used to amplify traffic in a // attack vector where the discovery protocol could be used to amplify traffic in a
// DDOS attack. A malicious actor would send a findnode request with the IP address // DDOS attack. A malicious actor would send a findnode request with the IP address
@ -645,7 +667,7 @@ func (req *findnode) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte
// findnode) to the victim. // findnode) to the victim.
return errUnknownNode return errUnknownNode
} }
target := crypto.Keccak256Hash(req.Target[:]) target := enode.ID(crypto.Keccak256Hash(req.Target[:]))
t.mutex.Lock() t.mutex.Lock()
closest := t.closest(target, bucketSize).entries closest := t.closest(target, bucketSize).entries
t.mutex.Unlock() t.mutex.Unlock()
@ -655,7 +677,7 @@ func (req *findnode) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte
// Send neighbors in chunks with at most maxNeighbors per packet // Send neighbors in chunks with at most maxNeighbors per packet
// to stay below the 1280 byte limit. // to stay below the 1280 byte limit.
for _, n := range closest { for _, n := range closest {
if netutil.CheckRelayIP(from.IP, n.IP) == nil { if netutil.CheckRelayIP(from.IP, n.IP()) == nil {
p.Nodes = append(p.Nodes, nodeToRPC(n)) p.Nodes = append(p.Nodes, nodeToRPC(n))
} }
if len(p.Nodes) == maxNeighbors { if len(p.Nodes) == maxNeighbors {
@ -672,11 +694,11 @@ func (req *findnode) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte
func (req *findnode) name() string { return "FINDNODE/v4" } func (req *findnode) name() string { return "FINDNODE/v4" }
func (req *neighbors) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte) error { func (req *neighbors) handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []byte) error {
if expired(req.Expiration) { if expired(req.Expiration) {
return errExpired return errExpired
} }
if !t.handleReply(fromID, neighborsPacket, req) { if !t.handleReply(fromKey.id(), neighborsPacket, req) {
return errUnsolicitedReply return errUnsolicitedReply
} }
return nil return nil

Some files were not shown because too many files have changed in this diff Show More