Rebase on geth 1.7.2 (#402)
This commit is contained in:
parent
689e19e22f
commit
b9372459cc
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -30,3 +30,6 @@ build/_vendor/pkg
|
|||
# travis
|
||||
profile.tmp
|
||||
profile.cov
|
||||
|
||||
# IdeaIDE
|
||||
.idea
|
||||
|
|
|
@ -6,7 +6,7 @@ matrix:
|
|||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.7.6
|
||||
go: 1.7.x
|
||||
script:
|
||||
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
|
||||
- sudo modprobe fuse
|
||||
|
@ -18,7 +18,7 @@ matrix:
|
|||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.8.3
|
||||
go: 1.8.x
|
||||
script:
|
||||
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
|
||||
- sudo modprobe fuse
|
||||
|
@ -31,7 +31,7 @@ matrix:
|
|||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.9.0
|
||||
go: 1.9.x
|
||||
script:
|
||||
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
|
||||
- sudo modprobe fuse
|
||||
|
@ -41,7 +41,7 @@ matrix:
|
|||
- go run build/ci.go test -coverage -misspell
|
||||
|
||||
- os: osx
|
||||
go: 1.9.0
|
||||
go: 1.9.x
|
||||
sudo: required
|
||||
script:
|
||||
- brew update
|
||||
|
@ -54,7 +54,7 @@ matrix:
|
|||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.9.0
|
||||
go: 1.9.x
|
||||
env:
|
||||
- ubuntu-ppa
|
||||
- azure-linux
|
||||
|
@ -93,7 +93,7 @@ matrix:
|
|||
sudo: required
|
||||
services:
|
||||
- docker
|
||||
go: 1.9.0
|
||||
go: 1.9.x
|
||||
env:
|
||||
- azure-linux-mips
|
||||
script:
|
||||
|
@ -150,7 +150,7 @@ matrix:
|
|||
|
||||
# This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads
|
||||
- os: osx
|
||||
go: 1.9.0
|
||||
go: 1.9.x
|
||||
env:
|
||||
- azure-osx
|
||||
- azure-ios
|
||||
|
@ -176,7 +176,7 @@ matrix:
|
|||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.9.0
|
||||
go: 1.9.x
|
||||
env:
|
||||
- azure-purge
|
||||
script:
|
||||
|
|
|
@ -1 +1 @@
|
|||
1.7.0
|
||||
1.7.2
|
||||
|
|
41
vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/backends/simulated.go
generated
vendored
41
vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/backends/simulated.go
generated
vendored
|
@ -22,6 +22,7 @@ import (
|
|||
"fmt"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
|
@ -167,7 +168,7 @@ func (b *SimulatedBackend) CallContract(ctx context.Context, call ethereum.CallM
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rval, _, err := b.callContract(ctx, call, b.blockchain.CurrentBlock(), state)
|
||||
rval, _, _, err := b.callContract(ctx, call, b.blockchain.CurrentBlock(), state)
|
||||
return rval, err
|
||||
}
|
||||
|
||||
|
@ -177,7 +178,7 @@ func (b *SimulatedBackend) PendingCallContract(ctx context.Context, call ethereu
|
|||
defer b.mu.Unlock()
|
||||
defer b.pendingState.RevertToSnapshot(b.pendingState.Snapshot())
|
||||
|
||||
rval, _, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
|
||||
rval, _, _, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
|
||||
return rval, err
|
||||
}
|
||||
|
||||
|
@ -203,8 +204,11 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs
|
|||
defer b.mu.Unlock()
|
||||
|
||||
// Binary search the gas requirement, as it may be higher than the amount used
|
||||
var lo, hi uint64
|
||||
if call.Gas != nil {
|
||||
var (
|
||||
lo uint64 = params.TxGas - 1
|
||||
hi uint64
|
||||
)
|
||||
if call.Gas != nil && call.Gas.Uint64() >= params.TxGas {
|
||||
hi = call.Gas.Uint64()
|
||||
} else {
|
||||
hi = b.pendingBlock.GasLimit().Uint64()
|
||||
|
@ -215,11 +219,11 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs
|
|||
call.Gas = new(big.Int).SetUint64(mid)
|
||||
|
||||
snapshot := b.pendingState.Snapshot()
|
||||
_, gas, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
|
||||
_, _, failed, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
|
||||
b.pendingState.RevertToSnapshot(snapshot)
|
||||
|
||||
// If the transaction became invalid or used all the gas (failed), raise the gas limit
|
||||
if err != nil || gas.Cmp(call.Gas) == 0 {
|
||||
// If the transaction became invalid or execution failed, raise the gas limit
|
||||
if err != nil || failed {
|
||||
lo = mid
|
||||
continue
|
||||
}
|
||||
|
@ -231,7 +235,7 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs
|
|||
|
||||
// callContract implemens common code between normal and pending contract calls.
|
||||
// state is modified during execution, make sure to copy it if necessary.
|
||||
func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, block *types.Block, statedb *state.StateDB) ([]byte, *big.Int, error) {
|
||||
func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, block *types.Block, statedb *state.StateDB) ([]byte, *big.Int, bool, error) {
|
||||
// Ensure message is initialized properly.
|
||||
if call.GasPrice == nil {
|
||||
call.GasPrice = big.NewInt(1)
|
||||
|
@ -253,9 +257,8 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
|
|||
// about the transaction and calling mechanisms.
|
||||
vmenv := vm.NewEVM(evmContext, statedb, b.config, vm.Config{})
|
||||
gaspool := new(core.GasPool).AddGas(math.MaxBig256)
|
||||
// TODO utilize returned failed flag to help gas estimation.
|
||||
ret, gasUsed, _, _, err := core.NewStateTransition(vmenv, msg, gaspool).TransitionDb()
|
||||
return ret, gasUsed, err
|
||||
ret, gasUsed, _, failed, err := core.NewStateTransition(vmenv, msg, gaspool).TransitionDb()
|
||||
return ret, gasUsed, failed, err
|
||||
}
|
||||
|
||||
// SendTransaction updates the pending block to include the given transaction.
|
||||
|
@ -284,6 +287,22 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
|
|||
return nil
|
||||
}
|
||||
|
||||
// JumpTimeInSeconds adds skip seconds to the clock
|
||||
func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), b.database, 1, func(number int, block *core.BlockGen) {
|
||||
for _, tx := range b.pendingBlock.Transactions() {
|
||||
block.AddTx(tx)
|
||||
}
|
||||
block.OffsetTime(int64(adjustment.Seconds()))
|
||||
})
|
||||
b.pendingBlock = blocks[0]
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), state.NewDatabase(b.database))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// callmsg implements core.Message to allow passing it as a transaction simulator.
|
||||
type callmsg struct {
|
||||
ethereum.CallMsg
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"gopkg.in/fatih/set.v0"
|
||||
)
|
||||
|
||||
// Minimum amount of time between cache reloads. This limit applies if the platform does
|
||||
|
@ -71,6 +72,14 @@ type accountCache struct {
|
|||
byAddr map[common.Address][]accounts.Account
|
||||
throttle *time.Timer
|
||||
notify chan struct{}
|
||||
fileC fileCache
|
||||
}
|
||||
|
||||
// fileCache is a cache of files seen during scan of keystore
|
||||
type fileCache struct {
|
||||
all *set.SetNonTS // list of all files
|
||||
mtime time.Time // latest mtime seen
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func newAccountCache(keydir string) (*accountCache, chan struct{}) {
|
||||
|
@ -78,6 +87,7 @@ func newAccountCache(keydir string) (*accountCache, chan struct{}) {
|
|||
keydir: keydir,
|
||||
byAddr: make(map[common.Address][]accounts.Account),
|
||||
notify: make(chan struct{}, 1),
|
||||
fileC: fileCache{all: set.NewNonTS()},
|
||||
}
|
||||
ac.watcher = newWatcher(ac)
|
||||
return ac, ac.notify
|
||||
|
@ -127,6 +137,23 @@ func (ac *accountCache) delete(removed accounts.Account) {
|
|||
}
|
||||
}
|
||||
|
||||
// deleteByFile removes an account referenced by the given path.
|
||||
func (ac *accountCache) deleteByFile(path string) {
|
||||
ac.mu.Lock()
|
||||
defer ac.mu.Unlock()
|
||||
i := sort.Search(len(ac.all), func(i int) bool { return ac.all[i].URL.Path >= path })
|
||||
|
||||
if i < len(ac.all) && ac.all[i].URL.Path == path {
|
||||
removed := ac.all[i]
|
||||
ac.all = append(ac.all[:i], ac.all[i+1:]...)
|
||||
if ba := removeAccount(ac.byAddr[removed.Address], removed); len(ba) == 0 {
|
||||
delete(ac.byAddr, removed.Address)
|
||||
} else {
|
||||
ac.byAddr[removed.Address] = ba
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeAccount(slice []accounts.Account, elem accounts.Account) []accounts.Account {
|
||||
for i := range slice {
|
||||
if slice[i] == elem {
|
||||
|
@ -167,15 +194,16 @@ func (ac *accountCache) find(a accounts.Account) (accounts.Account, error) {
|
|||
default:
|
||||
err := &AmbiguousAddrError{Addr: a.Address, Matches: make([]accounts.Account, len(matches))}
|
||||
copy(err.Matches, matches)
|
||||
sort.Sort(accountsByURL(err.Matches))
|
||||
return accounts.Account{}, err
|
||||
}
|
||||
}
|
||||
|
||||
func (ac *accountCache) maybeReload() {
|
||||
ac.mu.Lock()
|
||||
defer ac.mu.Unlock()
|
||||
|
||||
if ac.watcher.running {
|
||||
ac.mu.Unlock()
|
||||
return // A watcher is running and will keep the cache up-to-date.
|
||||
}
|
||||
if ac.throttle == nil {
|
||||
|
@ -184,12 +212,15 @@ func (ac *accountCache) maybeReload() {
|
|||
select {
|
||||
case <-ac.throttle.C:
|
||||
default:
|
||||
ac.mu.Unlock()
|
||||
return // The cache was reloaded recently.
|
||||
}
|
||||
}
|
||||
// No watcher running, start it.
|
||||
ac.watcher.start()
|
||||
ac.reload()
|
||||
ac.throttle.Reset(minReloadInterval)
|
||||
ac.mu.Unlock()
|
||||
ac.scanAccounts()
|
||||
}
|
||||
|
||||
func (ac *accountCache) close() {
|
||||
|
@ -205,54 +236,76 @@ func (ac *accountCache) close() {
|
|||
ac.mu.Unlock()
|
||||
}
|
||||
|
||||
// reload caches addresses of existing accounts.
|
||||
// Callers must hold ac.mu.
|
||||
func (ac *accountCache) reload() {
|
||||
accounts, err := ac.scan()
|
||||
// scanFiles performs a new scan on the given directory, compares against the already
|
||||
// cached filenames, and returns file sets: new, missing , modified
|
||||
func (fc *fileCache) scanFiles(keyDir string) (set.Interface, set.Interface, set.Interface, error) {
|
||||
t0 := time.Now()
|
||||
files, err := ioutil.ReadDir(keyDir)
|
||||
t1 := time.Now()
|
||||
if err != nil {
|
||||
log.Debug("Failed to reload keystore contents", "err", err)
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
ac.all = accounts
|
||||
sort.Sort(ac.all)
|
||||
for k := range ac.byAddr {
|
||||
delete(ac.byAddr, k)
|
||||
}
|
||||
for _, a := range accounts {
|
||||
ac.byAddr[a.Address] = append(ac.byAddr[a.Address], a)
|
||||
}
|
||||
select {
|
||||
case ac.notify <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
log.Debug("Reloaded keystore contents", "accounts", len(ac.all))
|
||||
}
|
||||
fc.mu.RLock()
|
||||
prevMtime := fc.mtime
|
||||
fc.mu.RUnlock()
|
||||
|
||||
func (ac *accountCache) scan() ([]accounts.Account, error) {
|
||||
files, err := ioutil.ReadDir(ac.keydir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
buf = new(bufio.Reader)
|
||||
addrs []accounts.Account
|
||||
keyJSON struct {
|
||||
Address string `json:"address"`
|
||||
}
|
||||
)
|
||||
filesNow := set.NewNonTS()
|
||||
moddedFiles := set.NewNonTS()
|
||||
var newMtime time.Time
|
||||
for _, fi := range files {
|
||||
path := filepath.Join(ac.keydir, fi.Name())
|
||||
modTime := fi.ModTime()
|
||||
path := filepath.Join(keyDir, fi.Name())
|
||||
if skipKeyFile(fi) {
|
||||
log.Trace("Ignoring file on account scan", "path", path)
|
||||
continue
|
||||
}
|
||||
logger := log.New("path", path)
|
||||
filesNow.Add(path)
|
||||
if modTime.After(prevMtime) {
|
||||
moddedFiles.Add(path)
|
||||
}
|
||||
if modTime.After(newMtime) {
|
||||
newMtime = modTime
|
||||
}
|
||||
}
|
||||
t2 := time.Now()
|
||||
|
||||
fc.mu.Lock()
|
||||
// Missing = previous - current
|
||||
missing := set.Difference(fc.all, filesNow)
|
||||
// New = current - previous
|
||||
newFiles := set.Difference(filesNow, fc.all)
|
||||
// Modified = modified - new
|
||||
modified := set.Difference(moddedFiles, newFiles)
|
||||
fc.all = filesNow
|
||||
fc.mtime = newMtime
|
||||
fc.mu.Unlock()
|
||||
t3 := time.Now()
|
||||
log.Debug("FS scan times", "list", t1.Sub(t0), "set", t2.Sub(t1), "diff", t3.Sub(t2))
|
||||
return newFiles, missing, modified, nil
|
||||
}
|
||||
|
||||
// scanAccounts checks if any changes have occurred on the filesystem, and
|
||||
// updates the account cache accordingly
|
||||
func (ac *accountCache) scanAccounts() error {
|
||||
newFiles, missingFiles, modified, err := ac.fileC.scanFiles(ac.keydir)
|
||||
t1 := time.Now()
|
||||
if err != nil {
|
||||
log.Debug("Failed to reload keystore contents", "err", err)
|
||||
return err
|
||||
}
|
||||
var (
|
||||
buf = new(bufio.Reader)
|
||||
keyJSON struct {
|
||||
Address string `json:"address"`
|
||||
}
|
||||
)
|
||||
readAccount := func(path string) *accounts.Account {
|
||||
fd, err := os.Open(path)
|
||||
if err != nil {
|
||||
logger.Trace("Failed to open keystore file", "err", err)
|
||||
continue
|
||||
log.Trace("Failed to open keystore file", "path", path, "err", err)
|
||||
return nil
|
||||
}
|
||||
defer fd.Close()
|
||||
buf.Reset(fd)
|
||||
// Parse the address.
|
||||
keyJSON.Address = ""
|
||||
|
@ -260,15 +313,45 @@ func (ac *accountCache) scan() ([]accounts.Account, error) {
|
|||
addr := common.HexToAddress(keyJSON.Address)
|
||||
switch {
|
||||
case err != nil:
|
||||
logger.Debug("Failed to decode keystore key", "err", err)
|
||||
log.Debug("Failed to decode keystore key", "path", path, "err", err)
|
||||
case (addr == common.Address{}):
|
||||
logger.Debug("Failed to decode keystore key", "err", "missing or zero address")
|
||||
log.Debug("Failed to decode keystore key", "path", path, "err", "missing or zero address")
|
||||
default:
|
||||
addrs = append(addrs, accounts.Account{Address: addr, URL: accounts.URL{Scheme: KeyStoreScheme, Path: path}})
|
||||
return &accounts.Account{Address: addr, URL: accounts.URL{Scheme: KeyStoreScheme, Path: path}}
|
||||
}
|
||||
fd.Close()
|
||||
return nil
|
||||
}
|
||||
return addrs, err
|
||||
|
||||
for _, p := range newFiles.List() {
|
||||
path, _ := p.(string)
|
||||
a := readAccount(path)
|
||||
if a != nil {
|
||||
ac.add(*a)
|
||||
}
|
||||
}
|
||||
for _, p := range missingFiles.List() {
|
||||
path, _ := p.(string)
|
||||
ac.deleteByFile(path)
|
||||
}
|
||||
|
||||
for _, p := range modified.List() {
|
||||
path, _ := p.(string)
|
||||
a := readAccount(path)
|
||||
ac.deleteByFile(path)
|
||||
if a != nil {
|
||||
ac.add(*a)
|
||||
}
|
||||
}
|
||||
|
||||
t2 := time.Now()
|
||||
|
||||
select {
|
||||
case ac.notify <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
log.Trace("Handled keystore changes", "time", t2.Sub(t1))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func skipKeyFile(fi os.FileInfo) bool {
|
||||
|
|
|
@ -70,7 +70,6 @@ func (w *watcher) loop() {
|
|||
return
|
||||
}
|
||||
defer notify.Stop(w.ev)
|
||||
|
||||
logger.Trace("Started watching keystore folder")
|
||||
defer logger.Trace("Stopped watching keystore folder")
|
||||
|
||||
|
@ -82,9 +81,9 @@ func (w *watcher) loop() {
|
|||
// When an event occurs, the reload call is delayed a bit so that
|
||||
// multiple events arriving quickly only cause a single reload.
|
||||
var (
|
||||
debounce = time.NewTimer(0)
|
||||
debounceDuration = 500 * time.Millisecond
|
||||
inCycle, hadEvent bool
|
||||
debounce = time.NewTimer(0)
|
||||
debounceDuration = 500 * time.Millisecond
|
||||
rescanTriggered = false
|
||||
)
|
||||
defer debounce.Stop()
|
||||
for {
|
||||
|
@ -92,22 +91,14 @@ func (w *watcher) loop() {
|
|||
case <-w.quit:
|
||||
return
|
||||
case <-w.ev:
|
||||
if !inCycle {
|
||||
// Trigger the scan (with delay), if not already triggered
|
||||
if !rescanTriggered {
|
||||
debounce.Reset(debounceDuration)
|
||||
inCycle = true
|
||||
} else {
|
||||
hadEvent = true
|
||||
rescanTriggered = true
|
||||
}
|
||||
case <-debounce.C:
|
||||
w.ac.mu.Lock()
|
||||
w.ac.reload()
|
||||
w.ac.mu.Unlock()
|
||||
if hadEvent {
|
||||
debounce.Reset(debounceDuration)
|
||||
inCycle, hadEvent = true, false
|
||||
} else {
|
||||
inCycle, hadEvent = false, false
|
||||
}
|
||||
w.ac.scanAccounts()
|
||||
rescanTriggered = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,6 +47,8 @@ type Hub struct {
|
|||
scheme string // Protocol scheme prefixing account and wallet URLs.
|
||||
vendorID uint16 // USB vendor identifier used for device discovery
|
||||
productIDs []uint16 // USB product identifiers used for device discovery
|
||||
usageID uint16 // USB usage page identifier used for macOS device discovery
|
||||
endpointID int // USB endpoint identifier used for non-macOS device discovery
|
||||
makeDriver func(log.Logger) driver // Factory method to construct a vendor specific driver
|
||||
|
||||
refreshed time.Time // Time instance when the list of wallets was last refreshed
|
||||
|
@ -66,16 +68,16 @@ type Hub struct {
|
|||
|
||||
// NewLedgerHub creates a new hardware wallet manager for Ledger devices.
|
||||
func NewLedgerHub() (*Hub, error) {
|
||||
return newHub(LedgerScheme, 0x2c97, []uint16{0x0000 /* Ledger Blue */, 0x0001 /* Ledger Nano S */}, newLedgerDriver)
|
||||
return newHub(LedgerScheme, 0x2c97, []uint16{0x0000 /* Ledger Blue */, 0x0001 /* Ledger Nano S */}, 0xffa0, 0, newLedgerDriver)
|
||||
}
|
||||
|
||||
// NewTrezorHub creates a new hardware wallet manager for Trezor devices.
|
||||
func NewTrezorHub() (*Hub, error) {
|
||||
return newHub(TrezorScheme, 0x534c, []uint16{0x0001 /* Trezor 1 */}, newTrezorDriver)
|
||||
return newHub(TrezorScheme, 0x534c, []uint16{0x0001 /* Trezor 1 */}, 0xff00, 0, newTrezorDriver)
|
||||
}
|
||||
|
||||
// newHub creates a new hardware wallet manager for generic USB devices.
|
||||
func newHub(scheme string, vendorID uint16, productIDs []uint16, makeDriver func(log.Logger) driver) (*Hub, error) {
|
||||
func newHub(scheme string, vendorID uint16, productIDs []uint16, usageID uint16, endpointID int, makeDriver func(log.Logger) driver) (*Hub, error) {
|
||||
if !hid.Supported() {
|
||||
return nil, errors.New("unsupported platform")
|
||||
}
|
||||
|
@ -83,6 +85,8 @@ func newHub(scheme string, vendorID uint16, productIDs []uint16, makeDriver func
|
|||
scheme: scheme,
|
||||
vendorID: vendorID,
|
||||
productIDs: productIDs,
|
||||
usageID: usageID,
|
||||
endpointID: endpointID,
|
||||
makeDriver: makeDriver,
|
||||
quit: make(chan chan error),
|
||||
}
|
||||
|
@ -133,7 +137,7 @@ func (hub *Hub) refreshWallets() {
|
|||
}
|
||||
for _, info := range hid.Enumerate(hub.vendorID, 0) {
|
||||
for _, id := range hub.productIDs {
|
||||
if info.ProductID == id && info.Interface == 0 {
|
||||
if info.ProductID == id && (info.UsagePage == hub.usageID || info.Interface == hub.endpointID) {
|
||||
devices = append(devices, info)
|
||||
break
|
||||
}
|
||||
|
|
|
@ -1,49 +0,0 @@
|
|||
# Debian Packaging
|
||||
|
||||
Tagged releases and develop branch commits are available as installable Debian packages
|
||||
for Ubuntu. Packages are built for the all Ubuntu versions which are supported by
|
||||
Canonical:
|
||||
|
||||
- Trusty Tahr (14.04 LTS)
|
||||
- Xenial Xerus (16.04 LTS)
|
||||
- Yakkety Yak (16.10)
|
||||
- Zesty Zapus (17.04)
|
||||
|
||||
Packages of develop branch commits have suffix -unstable and cannot be installed alongside
|
||||
the stable version. Switching between release streams requires user intervention.
|
||||
|
||||
The packages are built and served by launchpad.net. We generate a Debian source package
|
||||
for each distribution and upload it. Their builder picks up the source package, builds it
|
||||
and installs the new version into the PPA repository. Launchpad requires a valid signature
|
||||
by a team member for source package uploads. The signing key is stored in an environment
|
||||
variable which Travis CI makes available to certain builds.
|
||||
|
||||
We want to build go-ethereum with the most recent version of Go, irrespective of the Go
|
||||
version that is available in the main Ubuntu repository. In order to make this possible,
|
||||
our PPA depends on the ~gophers/ubuntu/archive PPA. Our source package build-depends on
|
||||
golang-1.9, which is co-installable alongside the regular golang package. PPA dependencies
|
||||
can be edited at https://launchpad.net/%7Eethereum/+archive/ubuntu/ethereum/+edit-dependencies
|
||||
|
||||
## Building Packages Locally (for testing)
|
||||
|
||||
You need to run Ubuntu to do test packaging.
|
||||
|
||||
Add the gophers PPA and install Go 1.9 and Debian packaging tools:
|
||||
|
||||
$ sudo apt-add-repository ppa:gophers/ubuntu/archive
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install build-essential golang-1.9 devscripts debhelper
|
||||
|
||||
Create the source packages:
|
||||
|
||||
$ go run build/ci.go debsrc -workdir dist
|
||||
|
||||
Then go into the source package directory for your running distribution and build the package:
|
||||
|
||||
$ cd dist/ethereum-unstable-1.6.0+xenial
|
||||
$ dpkg-buildpackage
|
||||
|
||||
Built packages are placed in the dist/ directory.
|
||||
|
||||
$ cd ..
|
||||
$ dpkg-deb -c geth-unstable_1.6.0+xenial_amd64.deb
|
File diff suppressed because it is too large
Load Diff
|
@ -1,5 +0,0 @@
|
|||
{{.Name}} ({{.VersionString}}) {{.Distro}}; urgency=low
|
||||
|
||||
* git build of {{.Env.Commit}}
|
||||
|
||||
-- {{.Author}} {{.Time}}
|
|
@ -1,25 +0,0 @@
|
|||
Source: {{.Name}}
|
||||
Section: science
|
||||
Priority: extra
|
||||
Maintainer: {{.Author}}
|
||||
Build-Depends: debhelper (>= 8.0.0), golang-1.9
|
||||
Standards-Version: 3.9.5
|
||||
Homepage: https://ethereum.org
|
||||
Vcs-Git: git://github.com/ethereum/go-ethereum.git
|
||||
Vcs-Browser: https://github.com/ethereum/go-ethereum
|
||||
|
||||
Package: {{.Name}}
|
||||
Architecture: any
|
||||
Depends: ${misc:Depends}, {{.ExeList}}
|
||||
Description: Meta-package to install geth and other tools
|
||||
Meta-package to install geth and other tools
|
||||
|
||||
{{range .Executables}}
|
||||
Package: {{$.ExeName .}}
|
||||
Conflicts: {{$.ExeConflicts .}}
|
||||
Architecture: any
|
||||
Depends: ${shlibs:Depends}, ${misc:Depends}
|
||||
Built-Using: ${misc:Built-Using}
|
||||
Description: {{.Description}}
|
||||
{{.Description}}
|
||||
{{end}}
|
|
@ -1,14 +0,0 @@
|
|||
Copyright 2016 The go-ethereum Authors
|
||||
|
||||
go-ethereum is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
go-ethereum is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
@ -1 +0,0 @@
|
|||
AUTHORS
|
|
@ -1 +0,0 @@
|
|||
build/bin/{{.Name}} usr/bin
|
|
@ -1,13 +0,0 @@
|
|||
#!/usr/bin/make -f
|
||||
# -*- makefile -*-
|
||||
|
||||
# Uncomment this to turn on verbose mode.
|
||||
#export DH_VERBOSE=1
|
||||
|
||||
override_dh_auto_build:
|
||||
build/env.sh /usr/lib/go-1.9/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
|
||||
|
||||
override_dh_auto_test:
|
||||
|
||||
%:
|
||||
dh $@
|
|
@ -1,39 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
if [ ! -f "build/env.sh" ]; then
|
||||
echo "$0 must be run from the root of the repository."
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# Create fake Go workspace if it doesn't exist yet.
|
||||
workspace="$PWD/build/_workspace"
|
||||
root="$PWD"
|
||||
ethdir="$workspace/src/github.com/ethereum"
|
||||
if [ ! -L "$ethdir/go-ethereum" ]; then
|
||||
mkdir -p "$ethdir"
|
||||
cd "$ethdir"
|
||||
ln -s ../../../../../. go-ethereum
|
||||
cd "$root"
|
||||
fi
|
||||
|
||||
# Link status-go lib
|
||||
statusgodir="$workspace/src/github.com/status-im"
|
||||
if [ ! -L "$statusgodir/status-go" ]; then
|
||||
mkdir -p "$statusgodir"
|
||||
cd "$statusgodir"
|
||||
ln -s ../../../../../../../status-im/status-go status-go
|
||||
cd "$root"
|
||||
fi
|
||||
|
||||
# Set up the environment to use the workspace.
|
||||
GOPATH="$workspace"
|
||||
export GOPATH
|
||||
|
||||
# Run the command inside the workspace.
|
||||
cd "$ethdir/go-ethereum"
|
||||
PWD="$ethdir/go-ethereum"
|
||||
|
||||
# Launch the arguments with the configured environment.
|
||||
exec "$@"
|
|
@ -1,57 +0,0 @@
|
|||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
|
||||
http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<groupId>org.ethereum</groupId>
|
||||
<artifactId>geth</artifactId>
|
||||
<version>{{.Version}}</version>
|
||||
<packaging>aar</packaging>
|
||||
|
||||
<name>Android Ethereum Client</name>
|
||||
<description>Android port of the go-ethereum libraries and node</description>
|
||||
<url>https://github.com/ethereum/go-ethereum</url>
|
||||
<inceptionYear>2015</inceptionYear>
|
||||
|
||||
<licenses>
|
||||
<license>
|
||||
<name>GNU Lesser General Public License, Version 3.0</name>
|
||||
<url>https://www.gnu.org/licenses/lgpl-3.0.en.html</url>
|
||||
<distribution>repo</distribution>
|
||||
</license>
|
||||
</licenses>
|
||||
|
||||
<organization>
|
||||
<name>Ethereum</name>
|
||||
<url>https://ethereum.org</url>
|
||||
</organization>
|
||||
|
||||
<developers>
|
||||
<developer>
|
||||
<id>karalabe</id>
|
||||
<name>Péter Szilágyi</name>
|
||||
<email>peterke@gmail.com</email>
|
||||
<url>https://github.com/karalabe</url>
|
||||
<properties>
|
||||
<picUrl>https://www.gravatar.com/avatar/2ecbf0f5b4b79eebf8c193e5d324357f?s=256</picUrl>
|
||||
</properties>
|
||||
</developer>
|
||||
</developers>
|
||||
|
||||
<contributors>{{range .Contributors}}
|
||||
<contributor>
|
||||
<name>{{.Name}}</name>
|
||||
<email>{{.Email}}</email>
|
||||
</contributor>{{end}}
|
||||
</contributors>
|
||||
|
||||
<issueManagement>
|
||||
<system>GitHub Issues</system>
|
||||
<url>https://github.com/ethereum/go-ethereum/issues/</url>
|
||||
</issueManagement>
|
||||
|
||||
<scm>
|
||||
<url>https://github.com/ethereum/go-ethereum</url>
|
||||
</scm>
|
||||
</project>
|
|
@ -1,24 +0,0 @@
|
|||
<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0
|
||||
http://maven.apache.org/xsd/settings-1.0.0.xsd">
|
||||
<servers>
|
||||
<server>
|
||||
<id>ossrh</id>
|
||||
<username>${env.ANDROID_SONATYPE_USERNAME}</username>
|
||||
<password>${env.ANDROID_SONATYPE_PASSWORD}</password>
|
||||
</server>
|
||||
</servers>
|
||||
<profiles>
|
||||
<profile>
|
||||
<id>ossrh</id>
|
||||
<activation>
|
||||
<activeByDefault>true</activeByDefault>
|
||||
</activation>
|
||||
<properties>
|
||||
<gpg.executable>gpg</gpg.executable>
|
||||
<gpg.passphrase></gpg.passphrase>
|
||||
</properties>
|
||||
</profile>
|
||||
</profiles>
|
||||
</settings>
|
|
@ -1,327 +0,0 @@
|
|||
/**
|
||||
* EnvVarUpdate.nsh
|
||||
* : Environmental Variables: append, prepend, and remove entries
|
||||
*
|
||||
* WARNING: If you use StrFunc.nsh header then include it before this file
|
||||
* with all required definitions. This is to avoid conflicts
|
||||
*
|
||||
* Usage:
|
||||
* ${EnvVarUpdate} "ResultVar" "EnvVarName" "Action" "RegLoc" "PathString"
|
||||
*
|
||||
* Credits:
|
||||
* Version 1.0
|
||||
* * Cal Turney (turnec2)
|
||||
* * Amir Szekely (KiCHiK) and e-circ for developing the forerunners of this
|
||||
* function: AddToPath, un.RemoveFromPath, AddToEnvVar, un.RemoveFromEnvVar,
|
||||
* WriteEnvStr, and un.DeleteEnvStr
|
||||
* * Diego Pedroso (deguix) for StrTok
|
||||
* * Kevin English (kenglish_hi) for StrContains
|
||||
* * Hendri Adriaens (Smile2Me), Diego Pedroso (deguix), and Dan Fuhry
|
||||
* (dandaman32) for StrReplace
|
||||
*
|
||||
* Version 1.1 (compatibility with StrFunc.nsh)
|
||||
* * techtonik
|
||||
*
|
||||
* http://nsis.sourceforge.net/Environmental_Variables:_append%2C_prepend%2C_and_remove_entries
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
!ifndef ENVVARUPDATE_FUNCTION
|
||||
!define ENVVARUPDATE_FUNCTION
|
||||
!verbose push
|
||||
!verbose 3
|
||||
!include "LogicLib.nsh"
|
||||
!include "WinMessages.NSH"
|
||||
!include "StrFunc.nsh"
|
||||
|
||||
; ---- Fix for conflict if StrFunc.nsh is already includes in main file -----------------------
|
||||
!macro _IncludeStrFunction StrFuncName
|
||||
!ifndef ${StrFuncName}_INCLUDED
|
||||
${${StrFuncName}}
|
||||
!endif
|
||||
!ifndef Un${StrFuncName}_INCLUDED
|
||||
${Un${StrFuncName}}
|
||||
!endif
|
||||
!define un.${StrFuncName} "${Un${StrFuncName}}"
|
||||
!macroend
|
||||
|
||||
!insertmacro _IncludeStrFunction StrTok
|
||||
!insertmacro _IncludeStrFunction StrStr
|
||||
!insertmacro _IncludeStrFunction StrRep
|
||||
|
||||
; ---------------------------------- Macro Definitions ----------------------------------------
|
||||
!macro _EnvVarUpdateConstructor ResultVar EnvVarName Action Regloc PathString
|
||||
Push "${EnvVarName}"
|
||||
Push "${Action}"
|
||||
Push "${RegLoc}"
|
||||
Push "${PathString}"
|
||||
Call EnvVarUpdate
|
||||
Pop "${ResultVar}"
|
||||
!macroend
|
||||
!define EnvVarUpdate '!insertmacro "_EnvVarUpdateConstructor"'
|
||||
|
||||
!macro _unEnvVarUpdateConstructor ResultVar EnvVarName Action Regloc PathString
|
||||
Push "${EnvVarName}"
|
||||
Push "${Action}"
|
||||
Push "${RegLoc}"
|
||||
Push "${PathString}"
|
||||
Call un.EnvVarUpdate
|
||||
Pop "${ResultVar}"
|
||||
!macroend
|
||||
!define un.EnvVarUpdate '!insertmacro "_unEnvVarUpdateConstructor"'
|
||||
; ---------------------------------- Macro Definitions end-------------------------------------
|
||||
|
||||
;----------------------------------- EnvVarUpdate start----------------------------------------
|
||||
!define hklm_all_users 'HKLM "SYSTEM\CurrentControlSet\Control\Session Manager\Environment"'
|
||||
!define hkcu_current_user 'HKCU "Environment"'
|
||||
|
||||
!macro EnvVarUpdate UN
|
||||
|
||||
Function ${UN}EnvVarUpdate
|
||||
|
||||
Push $0
|
||||
Exch 4
|
||||
Exch $1
|
||||
Exch 3
|
||||
Exch $2
|
||||
Exch 2
|
||||
Exch $3
|
||||
Exch
|
||||
Exch $4
|
||||
Push $5
|
||||
Push $6
|
||||
Push $7
|
||||
Push $8
|
||||
Push $9
|
||||
Push $R0
|
||||
|
||||
/* After this point:
|
||||
-------------------------
|
||||
$0 = ResultVar (returned)
|
||||
$1 = EnvVarName (input)
|
||||
$2 = Action (input)
|
||||
$3 = RegLoc (input)
|
||||
$4 = PathString (input)
|
||||
$5 = Orig EnvVar (read from registry)
|
||||
$6 = Len of $0 (temp)
|
||||
$7 = tempstr1 (temp)
|
||||
$8 = Entry counter (temp)
|
||||
$9 = tempstr2 (temp)
|
||||
$R0 = tempChar (temp) */
|
||||
|
||||
; Step 1: Read contents of EnvVarName from RegLoc
|
||||
;
|
||||
; Check for empty EnvVarName
|
||||
${If} $1 == ""
|
||||
SetErrors
|
||||
DetailPrint "ERROR: EnvVarName is blank"
|
||||
Goto EnvVarUpdate_Restore_Vars
|
||||
${EndIf}
|
||||
|
||||
; Check for valid Action
|
||||
${If} $2 != "A"
|
||||
${AndIf} $2 != "P"
|
||||
${AndIf} $2 != "R"
|
||||
SetErrors
|
||||
DetailPrint "ERROR: Invalid Action - must be A, P, or R"
|
||||
Goto EnvVarUpdate_Restore_Vars
|
||||
${EndIf}
|
||||
|
||||
${If} $3 == HKLM
|
||||
ReadRegStr $5 ${hklm_all_users} $1 ; Get EnvVarName from all users into $5
|
||||
${ElseIf} $3 == HKCU
|
||||
ReadRegStr $5 ${hkcu_current_user} $1 ; Read EnvVarName from current user into $5
|
||||
${Else}
|
||||
SetErrors
|
||||
DetailPrint 'ERROR: Action is [$3] but must be "HKLM" or HKCU"'
|
||||
Goto EnvVarUpdate_Restore_Vars
|
||||
${EndIf}
|
||||
|
||||
; Check for empty PathString
|
||||
${If} $4 == ""
|
||||
SetErrors
|
||||
DetailPrint "ERROR: PathString is blank"
|
||||
Goto EnvVarUpdate_Restore_Vars
|
||||
${EndIf}
|
||||
|
||||
; Make sure we've got some work to do
|
||||
${If} $5 == ""
|
||||
${AndIf} $2 == "R"
|
||||
SetErrors
|
||||
DetailPrint "$1 is empty - Nothing to remove"
|
||||
Goto EnvVarUpdate_Restore_Vars
|
||||
${EndIf}
|
||||
|
||||
; Step 2: Scrub EnvVar
|
||||
;
|
||||
StrCpy $0 $5 ; Copy the contents to $0
|
||||
; Remove spaces around semicolons (NOTE: spaces before the 1st entry or
|
||||
; after the last one are not removed here but instead in Step 3)
|
||||
${If} $0 != "" ; If EnvVar is not empty ...
|
||||
${Do}
|
||||
${${UN}StrStr} $7 $0 " ;"
|
||||
${If} $7 == ""
|
||||
${ExitDo}
|
||||
${EndIf}
|
||||
${${UN}StrRep} $0 $0 " ;" ";" ; Remove '<space>;'
|
||||
${Loop}
|
||||
${Do}
|
||||
${${UN}StrStr} $7 $0 "; "
|
||||
${If} $7 == ""
|
||||
${ExitDo}
|
||||
${EndIf}
|
||||
${${UN}StrRep} $0 $0 "; " ";" ; Remove ';<space>'
|
||||
${Loop}
|
||||
${Do}
|
||||
${${UN}StrStr} $7 $0 ";;"
|
||||
${If} $7 == ""
|
||||
${ExitDo}
|
||||
${EndIf}
|
||||
${${UN}StrRep} $0 $0 ";;" ";"
|
||||
${Loop}
|
||||
|
||||
; Remove a leading or trailing semicolon from EnvVar
|
||||
StrCpy $7 $0 1 0
|
||||
${If} $7 == ";"
|
||||
StrCpy $0 $0 "" 1 ; Change ';<EnvVar>' to '<EnvVar>'
|
||||
${EndIf}
|
||||
StrLen $6 $0
|
||||
IntOp $6 $6 - 1
|
||||
StrCpy $7 $0 1 $6
|
||||
${If} $7 == ";"
|
||||
StrCpy $0 $0 $6 ; Change ';<EnvVar>' to '<EnvVar>'
|
||||
${EndIf}
|
||||
; DetailPrint "Scrubbed $1: [$0]" ; Uncomment to debug
|
||||
${EndIf}
|
||||
|
||||
/* Step 3. Remove all instances of the target path/string (even if "A" or "P")
|
||||
$6 = bool flag (1 = found and removed PathString)
|
||||
$7 = a string (e.g. path) delimited by semicolon(s)
|
||||
$8 = entry counter starting at 0
|
||||
$9 = copy of $0
|
||||
$R0 = tempChar */
|
||||
|
||||
${If} $5 != "" ; If EnvVar is not empty ...
|
||||
StrCpy $9 $0
|
||||
StrCpy $0 ""
|
||||
StrCpy $8 0
|
||||
StrCpy $6 0
|
||||
|
||||
${Do}
|
||||
${${UN}StrTok} $7 $9 ";" $8 "0" ; $7 = next entry, $8 = entry counter
|
||||
|
||||
${If} $7 == "" ; If we've run out of entries,
|
||||
${ExitDo} ; were done
|
||||
${EndIf} ;
|
||||
|
||||
; Remove leading and trailing spaces from this entry (critical step for Action=Remove)
|
||||
${Do}
|
||||
StrCpy $R0 $7 1
|
||||
${If} $R0 != " "
|
||||
${ExitDo}
|
||||
${EndIf}
|
||||
StrCpy $7 $7 "" 1 ; Remove leading space
|
||||
${Loop}
|
||||
${Do}
|
||||
StrCpy $R0 $7 1 -1
|
||||
${If} $R0 != " "
|
||||
${ExitDo}
|
||||
${EndIf}
|
||||
StrCpy $7 $7 -1 ; Remove trailing space
|
||||
${Loop}
|
||||
${If} $7 == $4 ; If string matches, remove it by not appending it
|
||||
StrCpy $6 1 ; Set 'found' flag
|
||||
${ElseIf} $7 != $4 ; If string does NOT match
|
||||
${AndIf} $0 == "" ; and the 1st string being added to $0,
|
||||
StrCpy $0 $7 ; copy it to $0 without a prepended semicolon
|
||||
${ElseIf} $7 != $4 ; If string does NOT match
|
||||
${AndIf} $0 != "" ; and this is NOT the 1st string to be added to $0,
|
||||
StrCpy $0 $0;$7 ; append path to $0 with a prepended semicolon
|
||||
${EndIf} ;
|
||||
|
||||
IntOp $8 $8 + 1 ; Bump counter
|
||||
${Loop} ; Check for duplicates until we run out of paths
|
||||
${EndIf}
|
||||
|
||||
; Step 4: Perform the requested Action
|
||||
;
|
||||
${If} $2 != "R" ; If Append or Prepend
|
||||
${If} $6 == 1 ; And if we found the target
|
||||
DetailPrint "Target is already present in $1. It will be removed and"
|
||||
${EndIf}
|
||||
${If} $0 == "" ; If EnvVar is (now) empty
|
||||
StrCpy $0 $4 ; just copy PathString to EnvVar
|
||||
${If} $6 == 0 ; If found flag is either 0
|
||||
${OrIf} $6 == "" ; or blank (if EnvVarName is empty)
|
||||
DetailPrint "$1 was empty and has been updated with the target"
|
||||
${EndIf}
|
||||
${ElseIf} $2 == "A" ; If Append (and EnvVar is not empty),
|
||||
StrCpy $0 $0;$4 ; append PathString
|
||||
${If} $6 == 1
|
||||
DetailPrint "appended to $1"
|
||||
${Else}
|
||||
DetailPrint "Target was appended to $1"
|
||||
${EndIf}
|
||||
${Else} ; If Prepend (and EnvVar is not empty),
|
||||
StrCpy $0 $4;$0 ; prepend PathString
|
||||
${If} $6 == 1
|
||||
DetailPrint "prepended to $1"
|
||||
${Else}
|
||||
DetailPrint "Target was prepended to $1"
|
||||
${EndIf}
|
||||
${EndIf}
|
||||
${Else} ; If Action = Remove
|
||||
${If} $6 == 1 ; and we found the target
|
||||
DetailPrint "Target was found and removed from $1"
|
||||
${Else}
|
||||
DetailPrint "Target was NOT found in $1 (nothing to remove)"
|
||||
${EndIf}
|
||||
${If} $0 == ""
|
||||
DetailPrint "$1 is now empty"
|
||||
${EndIf}
|
||||
${EndIf}
|
||||
|
||||
; Step 5: Update the registry at RegLoc with the updated EnvVar and announce the change
|
||||
;
|
||||
ClearErrors
|
||||
${If} $3 == HKLM
|
||||
WriteRegExpandStr ${hklm_all_users} $1 $0 ; Write it in all users section
|
||||
${ElseIf} $3 == HKCU
|
||||
WriteRegExpandStr ${hkcu_current_user} $1 $0 ; Write it to current user section
|
||||
${EndIf}
|
||||
|
||||
IfErrors 0 +4
|
||||
MessageBox MB_OK|MB_ICONEXCLAMATION "Could not write updated $1 to $3"
|
||||
DetailPrint "Could not write updated $1 to $3"
|
||||
Goto EnvVarUpdate_Restore_Vars
|
||||
|
||||
; "Export" our change
|
||||
SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000
|
||||
|
||||
EnvVarUpdate_Restore_Vars:
|
||||
;
|
||||
; Restore the user's variables and return ResultVar
|
||||
Pop $R0
|
||||
Pop $9
|
||||
Pop $8
|
||||
Pop $7
|
||||
Pop $6
|
||||
Pop $5
|
||||
Pop $4
|
||||
Pop $3
|
||||
Pop $2
|
||||
Pop $1
|
||||
Push $0 ; Push my $0 (ResultVar)
|
||||
Exch
|
||||
Pop $0 ; Restore his $0
|
||||
|
||||
FunctionEnd
|
||||
|
||||
!macroend ; EnvVarUpdate UN
|
||||
!insertmacro EnvVarUpdate ""
|
||||
!insertmacro EnvVarUpdate "un."
|
||||
;----------------------------------- EnvVarUpdate end----------------------------------------
|
||||
|
||||
!verbose pop
|
||||
!endif
|
|
@ -1,70 +0,0 @@
|
|||
# Builds a Windows installer with NSIS.
|
||||
# It expects the following command line arguments:
|
||||
# - OUTPUTFILE, filename of the installer (without extension)
|
||||
# - MAJORVERSION, major build version
|
||||
# - MINORVERSION, minor build version
|
||||
# - BUILDVERSION, build id version
|
||||
#
|
||||
# The created installer executes the following steps:
|
||||
# 1. install geth for all users
|
||||
# 2. install optional development tools such as abigen
|
||||
# 3. create an uninstaller
|
||||
# 4. configures the Windows firewall for geth
|
||||
# 5. create geth, attach and uninstall start menu entries
|
||||
# 6. configures the registry that allows Windows to manage the package through its platform tools
|
||||
# 7. adds the environment system wide variable ETHEREUM_SOCKET
|
||||
# 8. adds the install directory to %PATH%
|
||||
#
|
||||
# Requirements:
|
||||
# - NSIS, http://nsis.sourceforge.net/Main_Page
|
||||
# - NSIS Large Strings build, http://nsis.sourceforge.net/Special_Builds
|
||||
# - SFP, http://nsis.sourceforge.net/NSIS_Simple_Firewall_Plugin (put dll in NSIS\Plugins\x86-ansi)
|
||||
#
|
||||
# After intalling NSIS extra the NSIS Large Strings build zip and replace the makensis.exe and the
|
||||
# files found in Stub.
|
||||
#
|
||||
# based on: http://nsis.sourceforge.net/A_simple_installer_with_start_menu_shortcut_and_uninstaller
|
||||
#
|
||||
# TODO:
|
||||
# - sign installer
|
||||
CRCCheck on
|
||||
|
||||
!define GROUPNAME "Ethereum"
|
||||
!define APPNAME "Geth"
|
||||
!define DESCRIPTION "Official Go implementation of the Ethereum protocol"
|
||||
!addplugindir .\
|
||||
|
||||
# Require admin rights on NT6+ (When UAC is turned on)
|
||||
RequestExecutionLevel admin
|
||||
|
||||
# Use LZMA compression
|
||||
SetCompressor /SOLID lzma
|
||||
|
||||
!include LogicLib.nsh
|
||||
!include PathUpdate.nsh
|
||||
!include EnvVarUpdate.nsh
|
||||
|
||||
!macro VerifyUserIsAdmin
|
||||
UserInfo::GetAccountType
|
||||
pop $0
|
||||
${If} $0 != "admin" # Require admin rights on NT4+
|
||||
messageBox mb_iconstop "Administrator rights required!"
|
||||
setErrorLevel 740 # ERROR_ELEVATION_REQUIRED
|
||||
quit
|
||||
${EndIf}
|
||||
!macroend
|
||||
|
||||
function .onInit
|
||||
# make vars are global for all users since geth is installed global
|
||||
setShellVarContext all
|
||||
!insertmacro VerifyUserIsAdmin
|
||||
|
||||
${If} ${ARCH} == "amd64"
|
||||
StrCpy $InstDir "$PROGRAMFILES64\${APPNAME}"
|
||||
${Else}
|
||||
StrCpy $InstDir "$PROGRAMFILES32\${APPNAME}"
|
||||
${Endif}
|
||||
functionEnd
|
||||
|
||||
!include install.nsh
|
||||
!include uninstall.nsh
|
|
@ -1,103 +0,0 @@
|
|||
Name "geth ${MAJORVERSION}.${MINORVERSION}.${BUILDVERSION}" # VERSION variables set through command line arguments
|
||||
InstallDir "$InstDir"
|
||||
OutFile "${OUTPUTFILE}" # set through command line arguments
|
||||
|
||||
# Links for "Add/Remove Programs"
|
||||
!define HELPURL "https://github.com/ethereum/go-ethereum/issues"
|
||||
!define UPDATEURL "https://github.com/ethereum/go-ethereum/releases"
|
||||
!define ABOUTURL "https://github.com/ethereum/go-ethereum#ethereum-go"
|
||||
!define /date NOW "%Y%m%d"
|
||||
|
||||
PageEx license
|
||||
LicenseData {{.License}}
|
||||
PageExEnd
|
||||
|
||||
# Install geth binary
|
||||
Section "Geth" GETH_IDX
|
||||
SetOutPath $INSTDIR
|
||||
file {{.Geth}}
|
||||
|
||||
# Create start menu launcher
|
||||
createDirectory "$SMPROGRAMS\${APPNAME}"
|
||||
createShortCut "$SMPROGRAMS\${APPNAME}\${APPNAME}.lnk" "$INSTDIR\geth.exe" "--fast" "--cache=512"
|
||||
createShortCut "$SMPROGRAMS\${APPNAME}\Attach.lnk" "$INSTDIR\geth.exe" "attach" "" ""
|
||||
createShortCut "$SMPROGRAMS\${APPNAME}\Uninstall.lnk" "$INSTDIR\uninstall.exe" "" "" ""
|
||||
|
||||
# Firewall - remove rules (if exists)
|
||||
SimpleFC::AdvRemoveRule "Geth incoming peers (TCP:30303)"
|
||||
SimpleFC::AdvRemoveRule "Geth outgoing peers (TCP:30303)"
|
||||
SimpleFC::AdvRemoveRule "Geth UDP discovery (UDP:30303)"
|
||||
|
||||
# Firewall - add rules
|
||||
SimpleFC::AdvAddRule "Geth incoming peers (TCP:30303)" "" 6 1 1 2147483647 1 "$INSTDIR\geth.exe" "" "" "Ethereum" 30303 "" "" ""
|
||||
SimpleFC::AdvAddRule "Geth outgoing peers (TCP:30303)" "" 6 2 1 2147483647 1 "$INSTDIR\geth.exe" "" "" "Ethereum" "" 30303 "" ""
|
||||
SimpleFC::AdvAddRule "Geth UDP discovery (UDP:30303)" "" 17 2 1 2147483647 1 "$INSTDIR\geth.exe" "" "" "Ethereum" "" 30303 "" ""
|
||||
|
||||
# Set default IPC endpoint (https://github.com/ethereum/EIPs/issues/147)
|
||||
${EnvVarUpdate} $0 "ETHEREUM_SOCKET" "R" "HKLM" "\\.\pipe\geth.ipc"
|
||||
${EnvVarUpdate} $0 "ETHEREUM_SOCKET" "A" "HKLM" "\\.\pipe\geth.ipc"
|
||||
|
||||
# Add instdir to PATH
|
||||
Push "$INSTDIR"
|
||||
Call AddToPath
|
||||
SectionEnd
|
||||
|
||||
# Install optional develop tools.
|
||||
Section /o "Development tools" DEV_TOOLS_IDX
|
||||
SetOutPath $INSTDIR
|
||||
{{range .DevTools}}file {{.}}
|
||||
{{end}}
|
||||
SectionEnd
|
||||
|
||||
# Return on top of stack the total size (as DWORD) of the selected/installed sections.
|
||||
Var GetInstalledSize.total
|
||||
Function GetInstalledSize
|
||||
StrCpy $GetInstalledSize.total 0
|
||||
|
||||
${if} ${SectionIsSelected} ${GETH_IDX}
|
||||
SectionGetSize ${GETH_IDX} $0
|
||||
IntOp $GetInstalledSize.total $GetInstalledSize.total + $0
|
||||
${endif}
|
||||
|
||||
${if} ${SectionIsSelected} ${DEV_TOOLS_IDX}
|
||||
SectionGetSize ${DEV_TOOLS_IDX} $0
|
||||
IntOp $GetInstalledSize.total $GetInstalledSize.total + $0
|
||||
${endif}
|
||||
|
||||
IntFmt $GetInstalledSize.total "0x%08X" $GetInstalledSize.total
|
||||
Push $GetInstalledSize.total
|
||||
FunctionEnd
|
||||
|
||||
# Write registry, Windows uses these values in various tools such as add/remove program.
|
||||
# PowerShell: Get-ItemProperty HKLM:\Software\Wow6432Node\Microsoft\Windows\CurrentVersion\Uninstall\* | Select-Object DisplayName, InstallLocation, InstallDate | Format-Table –AutoSize
|
||||
function .onInstSuccess
|
||||
# Save information in registry in HKEY_LOCAL_MACHINE branch, Windows add/remove functionality depends on this
|
||||
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "DisplayName" "${GROUPNAME} - ${APPNAME} - ${DESCRIPTION}"
|
||||
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "UninstallString" "$\"$INSTDIR\uninstall.exe$\""
|
||||
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "QuietUninstallString" "$\"$INSTDIR\uninstall.exe$\" /S"
|
||||
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "InstallLocation" "$INSTDIR"
|
||||
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "InstallDate" "${NOW}"
|
||||
# Wait for Alex
|
||||
#WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "DisplayIcon" "$\"$INSTDIR\logo.ico$\""
|
||||
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "Publisher" "${GROUPNAME}"
|
||||
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "HelpLink" "${HELPURL}"
|
||||
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "URLUpdateInfo" "${UPDATEURL}"
|
||||
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "URLInfoAbout" "${ABOUTURL}"
|
||||
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "DisplayVersion" "${MAJORVERSION}.${MINORVERSION}.${BUILDVERSION}"
|
||||
WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "VersionMajor" ${MAJORVERSION}
|
||||
WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "VersionMinor" ${MINORVERSION}
|
||||
# There is no option for modifying or repairing the install
|
||||
WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "NoModify" 1
|
||||
WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "NoRepair" 1
|
||||
|
||||
Call GetInstalledSize
|
||||
Pop $0
|
||||
WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "EstimatedSize" "$0"
|
||||
|
||||
# Create uninstaller
|
||||
writeUninstaller "$INSTDIR\uninstall.exe"
|
||||
functionEnd
|
||||
|
||||
Page components
|
||||
Page directory
|
||||
Page instfiles
|
|
@ -1,153 +0,0 @@
|
|||
!include "WinMessages.nsh"
|
||||
|
||||
; see https://support.microsoft.com/en-us/kb/104011
|
||||
!define Environ 'HKLM "SYSTEM\CurrentControlSet\Control\Session Manager\Environment"'
|
||||
; HKEY_LOCAL_MACHINE = 0x80000002
|
||||
|
||||
; AddToPath - Appends dir to PATH
|
||||
; (does not work on Win9x/ME)
|
||||
;
|
||||
; Usage:
|
||||
; Push "dir"
|
||||
; Call AddToPath
|
||||
Function AddToPath
|
||||
Exch $0
|
||||
Push $1
|
||||
Push $2
|
||||
Push $3
|
||||
Push $4
|
||||
|
||||
; NSIS ReadRegStr returns empty string on string overflow
|
||||
; Native calls are used here to check actual length of PATH
|
||||
; $4 = RegOpenKey(HKEY_LOCAL_MACHINE, "SYSTEM\CurrentControlSet\Control\Session Manager\Environment", &$3)
|
||||
System::Call "advapi32::RegOpenKey(i 0x80000002, t'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', *i.r3) i.r4"
|
||||
IntCmp $4 0 0 done done
|
||||
|
||||
; $4 = RegQueryValueEx($3, "PATH", (DWORD*)0, (DWORD*)0, &$1, ($2=NSIS_MAX_STRLEN, &$2))
|
||||
; RegCloseKey($3)
|
||||
System::Call "advapi32::RegQueryValueEx(i $3, t'PATH', i 0, i 0, t.r1, *i ${NSIS_MAX_STRLEN} r2) i.r4"
|
||||
System::Call "advapi32::RegCloseKey(i $3)"
|
||||
|
||||
IntCmp $4 234 0 +4 +4 ; $4 == ERROR_MORE_DATA
|
||||
DetailPrint "AddToPath: original length $2 > ${NSIS_MAX_STRLEN}"
|
||||
MessageBox MB_OK "PATH not updated, original length $2 > ${NSIS_MAX_STRLEN}"
|
||||
Goto done
|
||||
|
||||
IntCmp $4 0 +5 ; $4 != NO_ERROR
|
||||
IntCmp $4 2 +3 ; $4 != ERROR_FILE_NOT_FOUND
|
||||
DetailPrint "AddToPath: unexpected error code $4"
|
||||
Goto done
|
||||
StrCpy $1 ""
|
||||
|
||||
; Check if already in PATH
|
||||
Push "$1;"
|
||||
Push "$0;"
|
||||
Call StrStr
|
||||
Pop $2
|
||||
StrCmp $2 "" 0 done
|
||||
Push "$1;"
|
||||
Push "$0\;"
|
||||
Call StrStr
|
||||
Pop $2
|
||||
StrCmp $2 "" 0 done
|
||||
|
||||
; Prevent NSIS string overflow
|
||||
StrLen $2 $0
|
||||
StrLen $3 $1
|
||||
IntOp $2 $2 + $3
|
||||
IntOp $2 $2 + 2 ; $2 = strlen(dir) + strlen(PATH) + sizeof(";")
|
||||
IntCmp $2 ${NSIS_MAX_STRLEN} +4 +4 0
|
||||
DetailPrint "AddToPath: new length $2 > ${NSIS_MAX_STRLEN}"
|
||||
MessageBox MB_OK "PATH not updated, new length $2 > ${NSIS_MAX_STRLEN}."
|
||||
Goto done
|
||||
|
||||
; Append dir to PATH
|
||||
DetailPrint "Add to PATH: $0"
|
||||
StrCpy $2 $1 1 -1
|
||||
StrCmp $2 ";" 0 +2
|
||||
StrCpy $1 $1 -1 ; remove trailing ';'
|
||||
StrCmp $1 "" +2 ; no leading ';'
|
||||
StrCpy $0 "$1;$0"
|
||||
|
||||
WriteRegExpandStr ${Environ} "PATH" $0
|
||||
SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000
|
||||
|
||||
done:
|
||||
Pop $4
|
||||
Pop $3
|
||||
Pop $2
|
||||
Pop $1
|
||||
Pop $0
|
||||
FunctionEnd
|
||||
|
||||
|
||||
; RemoveFromPath - Removes dir from PATH
|
||||
;
|
||||
; Usage:
|
||||
; Push "dir"
|
||||
; Call RemoveFromPath
|
||||
Function un.RemoveFromPath
|
||||
Exch $0
|
||||
Push $1
|
||||
Push $2
|
||||
Push $3
|
||||
Push $4
|
||||
Push $5
|
||||
Push $6
|
||||
|
||||
; NSIS ReadRegStr returns empty string on string overflow
|
||||
; Native calls are used here to check actual length of PATH
|
||||
; $4 = RegOpenKey(HKEY_LOCAL_MACHINE, "SYSTEM\CurrentControlSet\Control\Session Manager\Environment", &$3)
|
||||
System::Call "advapi32::RegOpenKey(i 0x80000002, t'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', *i.r3) i.r4"
|
||||
IntCmp $4 0 0 done done
|
||||
|
||||
; $4 = RegQueryValueEx($3, "PATH", (DWORD*)0, (DWORD*)0, &$1, ($2=NSIS_MAX_STRLEN, &$2))
|
||||
; RegCloseKey($3)
|
||||
System::Call "advapi32::RegQueryValueEx(i $3, t'PATH', i 0, i 0, t.r1, *i ${NSIS_MAX_STRLEN} r2) i.r4"
|
||||
System::Call "advapi32::RegCloseKey(i $3)"
|
||||
|
||||
IntCmp $4 234 0 +4 +4 ; $4 == ERROR_MORE_DATA
|
||||
DetailPrint "RemoveFromPath: original length $2 > ${NSIS_MAX_STRLEN}"
|
||||
MessageBox MB_OK "PATH not updated, original length $2 > ${NSIS_MAX_STRLEN}"
|
||||
Goto done
|
||||
|
||||
IntCmp $4 0 +5 ; $4 != NO_ERROR
|
||||
IntCmp $4 2 +3 ; $4 != ERROR_FILE_NOT_FOUND
|
||||
DetailPrint "RemoveFromPath: unexpected error code $4"
|
||||
Goto done
|
||||
StrCpy $1 ""
|
||||
|
||||
; length < ${NSIS_MAX_STRLEN} -> ReadRegStr can be used
|
||||
ReadRegStr $1 ${Environ} "PATH"
|
||||
StrCpy $5 $1 1 -1
|
||||
StrCmp $5 ";" +2
|
||||
StrCpy $1 "$1;" ; ensure trailing ';'
|
||||
Push $1
|
||||
Push "$0;"
|
||||
Call un.StrStr
|
||||
Pop $2 ; pos of our dir
|
||||
StrCmp $2 "" done
|
||||
|
||||
DetailPrint "Remove from PATH: $0"
|
||||
StrLen $3 "$0;"
|
||||
StrLen $4 $2
|
||||
StrCpy $5 $1 -$4 ; $5 is now the part before the path to remove
|
||||
StrCpy $6 $2 "" $3 ; $6 is now the part after the path to remove
|
||||
StrCpy $3 "$5$6"
|
||||
StrCpy $5 $3 1 -1
|
||||
StrCmp $5 ";" 0 +2
|
||||
StrCpy $3 $3 -1 ; remove trailing ';'
|
||||
WriteRegExpandStr ${Environ} "PATH" $3
|
||||
SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000
|
||||
|
||||
done:
|
||||
Pop $6
|
||||
Pop $5
|
||||
Pop $4
|
||||
Pop $3
|
||||
Pop $2
|
||||
Pop $1
|
||||
Pop $0
|
||||
FunctionEnd
|
||||
|
||||
|
Binary file not shown.
Binary file not shown.
|
@ -1,33 +0,0 @@
|
|||
Section "Uninstall"
|
||||
# uninstall for all users
|
||||
setShellVarContext all
|
||||
|
||||
# Delete (optionally) installed files
|
||||
{{range $}}Delete $INSTDIR\{{.}}
|
||||
{{end}}
|
||||
Delete $INSTDIR\uninstall.exe
|
||||
|
||||
# Delete install directory
|
||||
rmDir $INSTDIR
|
||||
|
||||
# Delete start menu launcher
|
||||
Delete "$SMPROGRAMS\${APPNAME}\${APPNAME}.lnk"
|
||||
Delete "$SMPROGRAMS\${APPNAME}\Attach.lnk"
|
||||
Delete "$SMPROGRAMS\${APPNAME}\Uninstall.lnk"
|
||||
rmDir "$SMPROGRAMS\${APPNAME}"
|
||||
|
||||
# Firewall - remove rules if exists
|
||||
SimpleFC::AdvRemoveRule "Geth incoming peers (TCP:30303)"
|
||||
SimpleFC::AdvRemoveRule "Geth outgoing peers (TCP:30303)"
|
||||
SimpleFC::AdvRemoveRule "Geth UDP discovery (UDP:30303)"
|
||||
|
||||
# Remove IPC endpoint (https://github.com/ethereum/EIPs/issues/147)
|
||||
${un.EnvVarUpdate} $0 "ETHEREUM_SOCKET" "R" "HKLM" "\\.\pipe\geth.ipc"
|
||||
|
||||
# Remove install directory from PATH
|
||||
Push "$INSTDIR"
|
||||
Call un.RemoveFromPath
|
||||
|
||||
# Cleanup registry (deletes all sub keys)
|
||||
DeleteRegKey HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}"
|
||||
SectionEnd
|
|
@ -1,22 +0,0 @@
|
|||
Pod::Spec.new do |spec|
|
||||
spec.name = 'Geth'
|
||||
spec.version = '{{.Version}}'
|
||||
spec.license = { :type => 'GNU Lesser General Public License, Version 3.0' }
|
||||
spec.homepage = 'https://github.com/ethereum/go-ethereum'
|
||||
spec.authors = { {{range .Contributors}}
|
||||
'{{.Name}}' => '{{.Email}}',{{end}}
|
||||
}
|
||||
spec.summary = 'iOS Ethereum Client'
|
||||
spec.source = { :git => 'https://github.com/ethereum/go-ethereum.git', :commit => '{{.Commit}}' }
|
||||
|
||||
spec.platform = :ios
|
||||
spec.ios.deployment_target = '9.0'
|
||||
spec.ios.vendored_frameworks = 'Frameworks/Geth.framework'
|
||||
|
||||
spec.prepare_command = <<-CMD
|
||||
curl https://gethstore.blob.core.windows.net/builds/{{.Archive}}.tar.gz | tar -xvz
|
||||
mkdir Frameworks
|
||||
mv {{.Archive}}/Geth.framework Frameworks
|
||||
rm -rf {{.Archive}}
|
||||
CMD
|
||||
end
|
|
@ -1,396 +0,0 @@
|
|||
// +build none
|
||||
|
||||
/*
|
||||
This command generates GPL license headers on top of all source files.
|
||||
You can run it once per month, before cutting a release or just
|
||||
whenever you feel like it.
|
||||
|
||||
go run update-license.go
|
||||
|
||||
All authors (people who have contributed code) are listed in the
|
||||
AUTHORS file. The author names are mapped and deduplicated using the
|
||||
.mailmap file. You can use .mailmap to set the canonical name and
|
||||
address for each author. See git-shortlog(1) for an explanation of the
|
||||
.mailmap format.
|
||||
|
||||
Please review the resulting diff to check whether the correct
|
||||
copyright assignments are performed.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/template"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// only files with these extensions will be considered
|
||||
extensions = []string{".go", ".js", ".qml"}
|
||||
|
||||
// paths with any of these prefixes will be skipped
|
||||
skipPrefixes = []string{
|
||||
// boring stuff
|
||||
"vendor/", "tests/testdata/", "build/",
|
||||
// don't relicense vendored sources
|
||||
"cmd/internal/browser",
|
||||
"consensus/ethash/xor.go",
|
||||
"crypto/bn256/",
|
||||
"crypto/ecies/",
|
||||
"crypto/secp256k1/curve.go",
|
||||
"crypto/sha3/",
|
||||
"internal/jsre/deps",
|
||||
"log/",
|
||||
// don't license generated files
|
||||
"contracts/chequebook/contract/",
|
||||
"contracts/ens/contract/",
|
||||
"contracts/release/contract.go",
|
||||
}
|
||||
|
||||
// paths with this prefix are licensed as GPL. all other files are LGPL.
|
||||
gplPrefixes = []string{"cmd/"}
|
||||
|
||||
// this regexp must match the entire license comment at the
|
||||
// beginning of each file.
|
||||
licenseCommentRE = regexp.MustCompile(`^//\s*(Copyright|This file is part of).*?\n(?://.*?\n)*\n*`)
|
||||
|
||||
// this text appears at the start of AUTHORS
|
||||
authorsFileHeader = "# This is the official list of go-ethereum authors for copyright purposes.\n\n"
|
||||
)
|
||||
|
||||
// this template generates the license comment.
|
||||
// its input is an info structure.
|
||||
var licenseT = template.Must(template.New("").Parse(`
|
||||
// Copyright {{.Year}} The go-ethereum Authors
|
||||
// This file is part of {{.Whole false}}.
|
||||
//
|
||||
// {{.Whole true}} is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU {{.License}} as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// {{.Whole true}} is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU {{.License}} for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU {{.License}}
|
||||
// along with {{.Whole false}}. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
`[1:]))
|
||||
|
||||
type info struct {
|
||||
file string
|
||||
Year int64
|
||||
}
|
||||
|
||||
func (i info) License() string {
|
||||
if i.gpl() {
|
||||
return "General Public License"
|
||||
}
|
||||
return "Lesser General Public License"
|
||||
}
|
||||
|
||||
func (i info) ShortLicense() string {
|
||||
if i.gpl() {
|
||||
return "GPL"
|
||||
}
|
||||
return "LGPL"
|
||||
}
|
||||
|
||||
func (i info) Whole(startOfSentence bool) string {
|
||||
if i.gpl() {
|
||||
return "go-ethereum"
|
||||
}
|
||||
if startOfSentence {
|
||||
return "The go-ethereum library"
|
||||
}
|
||||
return "the go-ethereum library"
|
||||
}
|
||||
|
||||
func (i info) gpl() bool {
|
||||
for _, p := range gplPrefixes {
|
||||
if strings.HasPrefix(i.file, p) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func main() {
|
||||
var (
|
||||
files = getFiles()
|
||||
filec = make(chan string)
|
||||
infoc = make(chan *info, 20)
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
|
||||
writeAuthors(files)
|
||||
|
||||
go func() {
|
||||
for _, f := range files {
|
||||
filec <- f
|
||||
}
|
||||
close(filec)
|
||||
}()
|
||||
for i := runtime.NumCPU(); i >= 0; i-- {
|
||||
// getting file info is slow and needs to be parallel.
|
||||
// it traverses git history for each file.
|
||||
wg.Add(1)
|
||||
go getInfo(filec, infoc, &wg)
|
||||
}
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(infoc)
|
||||
}()
|
||||
writeLicenses(infoc)
|
||||
}
|
||||
|
||||
func skipFile(path string) bool {
|
||||
if strings.Contains(path, "/testdata/") {
|
||||
return true
|
||||
}
|
||||
for _, p := range skipPrefixes {
|
||||
if strings.HasPrefix(path, p) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getFiles() []string {
|
||||
cmd := exec.Command("git", "ls-tree", "-r", "--name-only", "HEAD")
|
||||
var files []string
|
||||
err := doLines(cmd, func(line string) {
|
||||
if skipFile(line) {
|
||||
return
|
||||
}
|
||||
ext := filepath.Ext(line)
|
||||
for _, wantExt := range extensions {
|
||||
if ext == wantExt {
|
||||
goto keep
|
||||
}
|
||||
}
|
||||
return
|
||||
keep:
|
||||
files = append(files, line)
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal("error getting files:", err)
|
||||
}
|
||||
return files
|
||||
}
|
||||
|
||||
var authorRegexp = regexp.MustCompile(`\s*[0-9]+\s*(.*)`)
|
||||
|
||||
func gitAuthors(files []string) []string {
|
||||
cmds := []string{"shortlog", "-s", "-n", "-e", "HEAD", "--"}
|
||||
cmds = append(cmds, files...)
|
||||
cmd := exec.Command("git", cmds...)
|
||||
var authors []string
|
||||
err := doLines(cmd, func(line string) {
|
||||
m := authorRegexp.FindStringSubmatch(line)
|
||||
if len(m) > 1 {
|
||||
authors = append(authors, m[1])
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalln("error getting authors:", err)
|
||||
}
|
||||
return authors
|
||||
}
|
||||
|
||||
func readAuthors() []string {
|
||||
content, err := ioutil.ReadFile("AUTHORS")
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
log.Fatalln("error reading AUTHORS:", err)
|
||||
}
|
||||
var authors []string
|
||||
for _, a := range bytes.Split(content, []byte("\n")) {
|
||||
if len(a) > 0 && a[0] != '#' {
|
||||
authors = append(authors, string(a))
|
||||
}
|
||||
}
|
||||
// Retranslate existing authors through .mailmap.
|
||||
// This should catch email address changes.
|
||||
authors = mailmapLookup(authors)
|
||||
return authors
|
||||
}
|
||||
|
||||
func mailmapLookup(authors []string) []string {
|
||||
if len(authors) == 0 {
|
||||
return nil
|
||||
}
|
||||
cmds := []string{"check-mailmap", "--"}
|
||||
cmds = append(cmds, authors...)
|
||||
cmd := exec.Command("git", cmds...)
|
||||
var translated []string
|
||||
err := doLines(cmd, func(line string) {
|
||||
translated = append(translated, line)
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalln("error translating authors:", err)
|
||||
}
|
||||
return translated
|
||||
}
|
||||
|
||||
func writeAuthors(files []string) {
|
||||
merge := make(map[string]bool)
|
||||
// Add authors that Git reports as contributorxs.
|
||||
// This is the primary source of author information.
|
||||
for _, a := range gitAuthors(files) {
|
||||
merge[a] = true
|
||||
}
|
||||
// Add existing authors from the file. This should ensure that we
|
||||
// never lose authors, even if Git stops listing them. We can also
|
||||
// add authors manually this way.
|
||||
for _, a := range readAuthors() {
|
||||
merge[a] = true
|
||||
}
|
||||
// Write sorted list of authors back to the file.
|
||||
var result []string
|
||||
for a := range merge {
|
||||
result = append(result, a)
|
||||
}
|
||||
sort.Strings(result)
|
||||
content := new(bytes.Buffer)
|
||||
content.WriteString(authorsFileHeader)
|
||||
for _, a := range result {
|
||||
content.WriteString(a)
|
||||
content.WriteString("\n")
|
||||
}
|
||||
fmt.Println("writing AUTHORS")
|
||||
if err := ioutil.WriteFile("AUTHORS", content.Bytes(), 0644); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
func getInfo(files <-chan string, out chan<- *info, wg *sync.WaitGroup) {
|
||||
for file := range files {
|
||||
stat, err := os.Lstat(file)
|
||||
if err != nil {
|
||||
fmt.Printf("ERROR %s: %v\n", file, err)
|
||||
continue
|
||||
}
|
||||
if !stat.Mode().IsRegular() {
|
||||
continue
|
||||
}
|
||||
if isGenerated(file) {
|
||||
continue
|
||||
}
|
||||
info, err := fileInfo(file)
|
||||
if err != nil {
|
||||
fmt.Printf("ERROR %s: %v\n", file, err)
|
||||
continue
|
||||
}
|
||||
out <- info
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
func isGenerated(file string) bool {
|
||||
fd, err := os.Open(file)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer fd.Close()
|
||||
buf := make([]byte, 2048)
|
||||
n, _ := fd.Read(buf)
|
||||
buf = buf[:n]
|
||||
for _, l := range bytes.Split(buf, []byte("\n")) {
|
||||
if bytes.HasPrefix(l, []byte("// Code generated")) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// fileInfo finds the lowest year in which the given file was committed.
|
||||
func fileInfo(file string) (*info, error) {
|
||||
info := &info{file: file, Year: int64(time.Now().Year())}
|
||||
cmd := exec.Command("git", "log", "--follow", "--find-renames=80", "--find-copies=80", "--pretty=format:%ai", "--", file)
|
||||
err := doLines(cmd, func(line string) {
|
||||
y, err := strconv.ParseInt(line[:4], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Printf("cannot parse year: %q", line[:4])
|
||||
}
|
||||
if y < info.Year {
|
||||
info.Year = y
|
||||
}
|
||||
})
|
||||
return info, err
|
||||
}
|
||||
|
||||
func writeLicenses(infos <-chan *info) {
|
||||
for i := range infos {
|
||||
writeLicense(i)
|
||||
}
|
||||
}
|
||||
|
||||
func writeLicense(info *info) {
|
||||
fi, err := os.Stat(info.file)
|
||||
if os.IsNotExist(err) {
|
||||
fmt.Println("skipping (does not exist)", info.file)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("error stat'ing %s: %v\n", info.file, err)
|
||||
}
|
||||
content, err := ioutil.ReadFile(info.file)
|
||||
if err != nil {
|
||||
log.Fatalf("error reading %s: %v\n", info.file, err)
|
||||
}
|
||||
// Construct new file content.
|
||||
buf := new(bytes.Buffer)
|
||||
licenseT.Execute(buf, info)
|
||||
if m := licenseCommentRE.FindIndex(content); m != nil && m[0] == 0 {
|
||||
buf.Write(content[:m[0]])
|
||||
buf.Write(content[m[1]:])
|
||||
} else {
|
||||
buf.Write(content)
|
||||
}
|
||||
// Write it to the file.
|
||||
if bytes.Equal(content, buf.Bytes()) {
|
||||
fmt.Println("skipping (no changes)", info.file)
|
||||
return
|
||||
}
|
||||
fmt.Println("writing", info.ShortLicense(), info.file)
|
||||
if err := ioutil.WriteFile(info.file, buf.Bytes(), fi.Mode()); err != nil {
|
||||
log.Fatalf("error writing %s: %v", info.file, err)
|
||||
}
|
||||
}
|
||||
|
||||
func doLines(cmd *exec.Cmd, f func(string)) error {
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cmd.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
s := bufio.NewScanner(stdout)
|
||||
for s.Scan() {
|
||||
f(s.Text())
|
||||
}
|
||||
if s.Err() != nil {
|
||||
return s.Err()
|
||||
}
|
||||
if err := cmd.Wait(); err != nil {
|
||||
return fmt.Errorf("%v (for %s)", err, strings.Join(cmd.Args, " "))
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -40,7 +40,7 @@ func (l *JSONLogger) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cos
|
|||
log := vm.StructLog{
|
||||
Pc: pc,
|
||||
Op: op,
|
||||
Gas: gas + cost,
|
||||
Gas: gas,
|
||||
GasCost: cost,
|
||||
MemorySize: memory.Len(),
|
||||
Storage: nil,
|
||||
|
|
|
@ -31,7 +31,9 @@ import (
|
|||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
|
@ -71,7 +73,7 @@ It expects the genesis file as argument.`,
|
|||
The import command imports blocks from an RLP-encoded form. The form can be one file
|
||||
with several RLP-encoded blocks, or several files can be used.
|
||||
|
||||
If only one file is used, import error will result in failure. If several files are used,
|
||||
If only one file is used, import error will result in failure. If several files are used,
|
||||
processing will proceed even if an individual RLP-file import failure occurs.`,
|
||||
}
|
||||
exportCommand = cli.Command{
|
||||
|
@ -90,6 +92,23 @@ Requires a first argument of the file to write to.
|
|||
Optional second and third arguments control the first and
|
||||
last block to write. In this mode, the file will be appended
|
||||
if already existing.`,
|
||||
}
|
||||
copydbCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(copyDb),
|
||||
Name: "copydb",
|
||||
Usage: "Create a local chain from a target chaindata folder",
|
||||
ArgsUsage: "<sourceChaindataDir>",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.CacheFlag,
|
||||
utils.SyncModeFlag,
|
||||
utils.FakePoWFlag,
|
||||
utils.TestnetFlag,
|
||||
utils.RinkebyFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
The first argument must be the directory containing the blockchain to download from`,
|
||||
}
|
||||
removedbCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(removeDB),
|
||||
|
@ -268,6 +287,54 @@ func exportChain(ctx *cli.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func copyDb(ctx *cli.Context) error {
|
||||
// Ensure we have a source chain directory to copy
|
||||
if len(ctx.Args()) != 1 {
|
||||
utils.Fatalf("Source chaindata directory path argument missing")
|
||||
}
|
||||
// Initialize a new chain for the running node to sync into
|
||||
stack := makeFullNode(ctx)
|
||||
chain, chainDb := utils.MakeChain(ctx, stack)
|
||||
|
||||
syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
|
||||
dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil)
|
||||
|
||||
// Create a source peer to satisfy downloader requests from
|
||||
db, err := ethdb.NewLDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
peer := downloader.NewFakePeer("local", db, hc, dl)
|
||||
if err = dl.RegisterPeer("local", 63, peer); err != nil {
|
||||
return err
|
||||
}
|
||||
// Synchronise with the simulated peer
|
||||
start := time.Now()
|
||||
|
||||
currentHeader := hc.CurrentHeader()
|
||||
if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil {
|
||||
return err
|
||||
}
|
||||
for dl.Synchronising() {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
fmt.Printf("Database copy done in %v\n", time.Since(start))
|
||||
|
||||
// Compact the entire database to remove any sync overhead
|
||||
start = time.Now()
|
||||
fmt.Println("Compacting entire database...")
|
||||
if err = chainDb.(*ethdb.LDBDatabase).LDB().CompactRange(util.Range{}); err != nil {
|
||||
utils.Fatalf("Compaction failed: %v", err)
|
||||
}
|
||||
fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeDB(ctx *cli.Context) error {
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
|
||||
|
|
|
@ -146,6 +146,7 @@ func init() {
|
|||
initCommand,
|
||||
importCommand,
|
||||
exportCommand,
|
||||
copydbCommand,
|
||||
removedbCommand,
|
||||
dumpCommand,
|
||||
// See monitorcmd.go:
|
||||
|
|
|
@ -0,0 +1,414 @@
|
|||
// p2psim provides a command-line client for a simulation HTTP API.
|
||||
//
|
||||
// Here is an example of creating a 2 node network with the first node
|
||||
// connected to the second:
|
||||
//
|
||||
// $ p2psim node create
|
||||
// Created node01
|
||||
//
|
||||
// $ p2psim node start node01
|
||||
// Started node01
|
||||
//
|
||||
// $ p2psim node create
|
||||
// Created node02
|
||||
//
|
||||
// $ p2psim node start node02
|
||||
// Started node02
|
||||
//
|
||||
// $ p2psim node connect node01 node02
|
||||
// Connected node01 to node02
|
||||
//
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var client *simulations.Client
|
||||
|
||||
func main() {
|
||||
app := cli.NewApp()
|
||||
app.Usage = "devp2p simulation command-line client"
|
||||
app.Flags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "api",
|
||||
Value: "http://localhost:8888",
|
||||
Usage: "simulation API URL",
|
||||
EnvVar: "P2PSIM_API_URL",
|
||||
},
|
||||
}
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
client = simulations.NewClient(ctx.GlobalString("api"))
|
||||
return nil
|
||||
}
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "show",
|
||||
Usage: "show network information",
|
||||
Action: showNetwork,
|
||||
},
|
||||
{
|
||||
Name: "events",
|
||||
Usage: "stream network events",
|
||||
Action: streamNetwork,
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "current",
|
||||
Usage: "get existing nodes and conns first",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "filter",
|
||||
Value: "",
|
||||
Usage: "message filter",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "snapshot",
|
||||
Usage: "create a network snapshot to stdout",
|
||||
Action: createSnapshot,
|
||||
},
|
||||
{
|
||||
Name: "load",
|
||||
Usage: "load a network snapshot from stdin",
|
||||
Action: loadSnapshot,
|
||||
},
|
||||
{
|
||||
Name: "node",
|
||||
Usage: "manage simulation nodes",
|
||||
Action: listNodes,
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "list",
|
||||
Usage: "list nodes",
|
||||
Action: listNodes,
|
||||
},
|
||||
{
|
||||
Name: "create",
|
||||
Usage: "create a node",
|
||||
Action: createNode,
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "name",
|
||||
Value: "",
|
||||
Usage: "node name",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "services",
|
||||
Value: "",
|
||||
Usage: "node services (comma separated)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key",
|
||||
Value: "",
|
||||
Usage: "node private key (hex encoded)",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "show",
|
||||
ArgsUsage: "<node>",
|
||||
Usage: "show node information",
|
||||
Action: showNode,
|
||||
},
|
||||
{
|
||||
Name: "start",
|
||||
ArgsUsage: "<node>",
|
||||
Usage: "start a node",
|
||||
Action: startNode,
|
||||
},
|
||||
{
|
||||
Name: "stop",
|
||||
ArgsUsage: "<node>",
|
||||
Usage: "stop a node",
|
||||
Action: stopNode,
|
||||
},
|
||||
{
|
||||
Name: "connect",
|
||||
ArgsUsage: "<node> <peer>",
|
||||
Usage: "connect a node to a peer node",
|
||||
Action: connectNode,
|
||||
},
|
||||
{
|
||||
Name: "disconnect",
|
||||
ArgsUsage: "<node> <peer>",
|
||||
Usage: "disconnect a node from a peer node",
|
||||
Action: disconnectNode,
|
||||
},
|
||||
{
|
||||
Name: "rpc",
|
||||
ArgsUsage: "<node> <method> [<args>]",
|
||||
Usage: "call a node RPC method",
|
||||
Action: rpcNode,
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "subscribe",
|
||||
Usage: "method is a subscription",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
app.Run(os.Args)
|
||||
}
|
||||
|
||||
func showNetwork(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
network, err := client.GetNetwork()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w := tabwriter.NewWriter(ctx.App.Writer, 1, 2, 2, ' ', 0)
|
||||
defer w.Flush()
|
||||
fmt.Fprintf(w, "NODES\t%d\n", len(network.Nodes))
|
||||
fmt.Fprintf(w, "CONNS\t%d\n", len(network.Conns))
|
||||
return nil
|
||||
}
|
||||
|
||||
func streamNetwork(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
events := make(chan *simulations.Event)
|
||||
sub, err := client.SubscribeNetwork(events, simulations.SubscribeOpts{
|
||||
Current: ctx.Bool("current"),
|
||||
Filter: ctx.String("filter"),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sub.Unsubscribe()
|
||||
enc := json.NewEncoder(ctx.App.Writer)
|
||||
for {
|
||||
select {
|
||||
case event := <-events:
|
||||
if err := enc.Encode(event); err != nil {
|
||||
return err
|
||||
}
|
||||
case err := <-sub.Err():
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createSnapshot(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
snap, err := client.CreateSnapshot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return json.NewEncoder(os.Stdout).Encode(snap)
|
||||
}
|
||||
|
||||
func loadSnapshot(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
snap := &simulations.Snapshot{}
|
||||
if err := json.NewDecoder(os.Stdin).Decode(snap); err != nil {
|
||||
return err
|
||||
}
|
||||
return client.LoadSnapshot(snap)
|
||||
}
|
||||
|
||||
func listNodes(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
nodes, err := client.GetNodes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w := tabwriter.NewWriter(ctx.App.Writer, 1, 2, 2, ' ', 0)
|
||||
defer w.Flush()
|
||||
fmt.Fprintf(w, "NAME\tPROTOCOLS\tID\n")
|
||||
for _, node := range nodes {
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\n", node.Name, strings.Join(protocolList(node), ","), node.ID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func protocolList(node *p2p.NodeInfo) []string {
|
||||
protos := make([]string, 0, len(node.Protocols))
|
||||
for name := range node.Protocols {
|
||||
protos = append(protos, name)
|
||||
}
|
||||
return protos
|
||||
}
|
||||
|
||||
func createNode(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) != 0 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
config := &adapters.NodeConfig{
|
||||
Name: ctx.String("name"),
|
||||
}
|
||||
if key := ctx.String("key"); key != "" {
|
||||
privKey, err := crypto.HexToECDSA(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.ID = discover.PubkeyID(&privKey.PublicKey)
|
||||
config.PrivateKey = privKey
|
||||
}
|
||||
if services := ctx.String("services"); services != "" {
|
||||
config.Services = strings.Split(services, ",")
|
||||
}
|
||||
node, err := client.CreateNode(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(ctx.App.Writer, "Created", node.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func showNode(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
if len(args) != 1 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
nodeName := args[0]
|
||||
node, err := client.GetNode(nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w := tabwriter.NewWriter(ctx.App.Writer, 1, 2, 2, ' ', 0)
|
||||
defer w.Flush()
|
||||
fmt.Fprintf(w, "NAME\t%s\n", node.Name)
|
||||
fmt.Fprintf(w, "PROTOCOLS\t%s\n", strings.Join(protocolList(node), ","))
|
||||
fmt.Fprintf(w, "ID\t%s\n", node.ID)
|
||||
fmt.Fprintf(w, "ENODE\t%s\n", node.Enode)
|
||||
for name, proto := range node.Protocols {
|
||||
fmt.Fprintln(w)
|
||||
fmt.Fprintf(w, "--- PROTOCOL INFO: %s\n", name)
|
||||
fmt.Fprintf(w, "%v\n", proto)
|
||||
fmt.Fprintf(w, "---\n")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func startNode(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
if len(args) != 1 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
nodeName := args[0]
|
||||
if err := client.StartNode(nodeName); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(ctx.App.Writer, "Started", nodeName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func stopNode(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
if len(args) != 1 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
nodeName := args[0]
|
||||
if err := client.StopNode(nodeName); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(ctx.App.Writer, "Stopped", nodeName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func connectNode(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
if len(args) != 2 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
nodeName := args[0]
|
||||
peerName := args[1]
|
||||
if err := client.ConnectNode(nodeName, peerName); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(ctx.App.Writer, "Connected", nodeName, "to", peerName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func disconnectNode(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
if len(args) != 2 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
nodeName := args[0]
|
||||
peerName := args[1]
|
||||
if err := client.DisconnectNode(nodeName, peerName); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(ctx.App.Writer, "Disconnected", nodeName, "from", peerName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func rpcNode(ctx *cli.Context) error {
|
||||
args := ctx.Args()
|
||||
if len(args) < 2 {
|
||||
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
|
||||
}
|
||||
nodeName := args[0]
|
||||
method := args[1]
|
||||
rpcClient, err := client.RPCClient(context.Background(), nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ctx.Bool("subscribe") {
|
||||
return rpcSubscribe(rpcClient, ctx.App.Writer, method, args[3:]...)
|
||||
}
|
||||
var result interface{}
|
||||
params := make([]interface{}, len(args[3:]))
|
||||
for i, v := range args[3:] {
|
||||
params[i] = v
|
||||
}
|
||||
if err := rpcClient.Call(&result, method, params...); err != nil {
|
||||
return err
|
||||
}
|
||||
return json.NewEncoder(ctx.App.Writer).Encode(result)
|
||||
}
|
||||
|
||||
func rpcSubscribe(client *rpc.Client, out io.Writer, method string, args ...string) error {
|
||||
parts := strings.SplitN(method, "_", 2)
|
||||
namespace := parts[0]
|
||||
method = parts[1]
|
||||
ch := make(chan interface{})
|
||||
subArgs := make([]interface{}, len(args)+1)
|
||||
subArgs[0] = method
|
||||
for i, v := range args {
|
||||
subArgs[i+1] = v
|
||||
}
|
||||
sub, err := client.Subscribe(context.Background(), namespace, ch, subArgs...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sub.Unsubscribe()
|
||||
enc := json.NewEncoder(out)
|
||||
for {
|
||||
select {
|
||||
case v := <-ch:
|
||||
if err := enc.Encode(v); err != nil {
|
||||
return err
|
||||
}
|
||||
case err := <-sub.Err():
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
|
@ -161,6 +161,28 @@ func (w *wizard) readDefaultInt(def int) int {
|
|||
}
|
||||
}
|
||||
|
||||
// readDefaultBigInt reads a single line from stdin, trimming if from spaces,
|
||||
// enforcing it to parse into a big integer. If an empty line is entered, the
|
||||
// default value is returned.
|
||||
func (w *wizard) readDefaultBigInt(def *big.Int) *big.Int {
|
||||
for {
|
||||
fmt.Printf("> ")
|
||||
text, err := w.in.ReadString('\n')
|
||||
if err != nil {
|
||||
log.Crit("Failed to read user input", "err", err)
|
||||
}
|
||||
if text = strings.TrimSpace(text); text == "" {
|
||||
return def
|
||||
}
|
||||
val, ok := new(big.Int).SetString(text, 0)
|
||||
if !ok {
|
||||
log.Error("Invalid input, expected big integer")
|
||||
continue
|
||||
}
|
||||
return val
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
// readFloat reads a single line from stdin, trimming if from spaces, enforcing it
|
||||
// to parse into a float.
|
||||
|
@ -280,8 +302,10 @@ func (w *wizard) readJSON() string {
|
|||
}
|
||||
|
||||
// readIPAddress reads a single line from stdin, trimming if from spaces and
|
||||
// converts it to a network IP address.
|
||||
func (w *wizard) readIPAddress() net.IP {
|
||||
// returning it if it's convertible to an IP address. The reason for keeping
|
||||
// the user input format instead of returning a Go net.IP is to match with
|
||||
// weird formats used by ethstats, which compares IPs textually, not by value.
|
||||
func (w *wizard) readIPAddress() string {
|
||||
for {
|
||||
// Read the IP address from the user
|
||||
fmt.Printf("> ")
|
||||
|
@ -290,14 +314,13 @@ func (w *wizard) readIPAddress() net.IP {
|
|||
log.Crit("Failed to read user input", "err", err)
|
||||
}
|
||||
if text = strings.TrimSpace(text); text == "" {
|
||||
return nil
|
||||
return ""
|
||||
}
|
||||
// Make sure it looks ok and return it if so
|
||||
ip := net.ParseIP(text)
|
||||
if ip == nil {
|
||||
if ip := net.ParseIP(text); ip == nil {
|
||||
log.Error("Invalid IP address, please retry")
|
||||
continue
|
||||
}
|
||||
return ip
|
||||
return text
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@ package main
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
@ -64,17 +65,37 @@ func (w *wizard) deployEthstats() {
|
|||
fmt.Println()
|
||||
fmt.Printf("Keep existing IP %v blacklist (y/n)? (default = yes)\n", infos.banned)
|
||||
if w.readDefaultString("y") != "y" {
|
||||
infos.banned = nil
|
||||
|
||||
// The user might want to clear the entire list, although generally probably not
|
||||
fmt.Println()
|
||||
fmt.Println("Which IP addresses should be blacklisted?")
|
||||
fmt.Printf("Clear out blacklist and start over (y/n)? (default = no)\n")
|
||||
if w.readDefaultString("n") != "n" {
|
||||
infos.banned = nil
|
||||
}
|
||||
// Offer the user to explicitly add/remove certain IP addresses
|
||||
fmt.Println()
|
||||
fmt.Println("Which additional IP addresses should be blacklisted?")
|
||||
for {
|
||||
if ip := w.readIPAddress(); ip != nil {
|
||||
infos.banned = append(infos.banned, ip.String())
|
||||
if ip := w.readIPAddress(); ip != "" {
|
||||
infos.banned = append(infos.banned, ip)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
fmt.Println()
|
||||
fmt.Println("Which IP addresses should not be blacklisted?")
|
||||
for {
|
||||
if ip := w.readIPAddress(); ip != "" {
|
||||
for i, addr := range infos.banned {
|
||||
if ip == addr {
|
||||
infos.banned = append(infos.banned[:i], infos.banned[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
sort.Strings(infos.banned)
|
||||
}
|
||||
// Try to deploy the ethstats server on the host
|
||||
trusted := make([]string, 0, len(w.servers))
|
||||
|
|
|
@ -18,7 +18,9 @@ package main
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
@ -42,6 +44,7 @@ func (w *wizard) makeGenesis() {
|
|||
EIP150Block: big.NewInt(2),
|
||||
EIP155Block: big.NewInt(3),
|
||||
EIP158Block: big.NewInt(3),
|
||||
ByzantiumBlock: big.NewInt(4),
|
||||
},
|
||||
}
|
||||
// Figure out which consensus engine to choose
|
||||
|
@ -134,3 +137,53 @@ func (w *wizard) makeGenesis() {
|
|||
// All done, store the genesis and flush to disk
|
||||
w.conf.genesis = genesis
|
||||
}
|
||||
|
||||
// manageGenesis permits the modification of chain configuration parameters in
|
||||
// a genesis config and the export of the entire genesis spec.
|
||||
func (w *wizard) manageGenesis() {
|
||||
// Figure out whether to modify or export the genesis
|
||||
fmt.Println()
|
||||
fmt.Println(" 1. Modify existing fork rules")
|
||||
fmt.Println(" 2. Export genesis configuration")
|
||||
|
||||
choice := w.read()
|
||||
switch {
|
||||
case choice == "1":
|
||||
// Fork rule updating requested, iterate over each fork
|
||||
fmt.Println()
|
||||
fmt.Printf("Which block should Homestead come into effect? (default = %v)\n", w.conf.genesis.Config.HomesteadBlock)
|
||||
w.conf.genesis.Config.HomesteadBlock = w.readDefaultBigInt(w.conf.genesis.Config.HomesteadBlock)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("Which block should EIP150 come into effect? (default = %v)\n", w.conf.genesis.Config.EIP150Block)
|
||||
w.conf.genesis.Config.EIP150Block = w.readDefaultBigInt(w.conf.genesis.Config.EIP150Block)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("Which block should EIP155 come into effect? (default = %v)\n", w.conf.genesis.Config.EIP155Block)
|
||||
w.conf.genesis.Config.EIP155Block = w.readDefaultBigInt(w.conf.genesis.Config.EIP155Block)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("Which block should EIP158 come into effect? (default = %v)\n", w.conf.genesis.Config.EIP158Block)
|
||||
w.conf.genesis.Config.EIP158Block = w.readDefaultBigInt(w.conf.genesis.Config.EIP158Block)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("Which block should Byzantium come into effect? (default = %v)\n", w.conf.genesis.Config.ByzantiumBlock)
|
||||
w.conf.genesis.Config.ByzantiumBlock = w.readDefaultBigInt(w.conf.genesis.Config.ByzantiumBlock)
|
||||
|
||||
out, _ := json.MarshalIndent(w.conf.genesis.Config, "", " ")
|
||||
fmt.Printf("Chain configuration updated:\n\n%s\n", out)
|
||||
|
||||
case choice == "2":
|
||||
// Save whatever genesis configuration we currently have
|
||||
fmt.Println()
|
||||
fmt.Printf("Which file to save the genesis into? (default = %s.json)\n", w.network)
|
||||
out, _ := json.MarshalIndent(w.conf.genesis, "", " ")
|
||||
if err := ioutil.WriteFile(w.readDefaultString(fmt.Sprintf("%s.json", w.network)), out, 0644); err != nil {
|
||||
log.Error("Failed to save genesis file", "err", err)
|
||||
}
|
||||
log.Info("Exported existing genesis block")
|
||||
|
||||
default:
|
||||
log.Error("That's not something I can do")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -98,7 +98,7 @@ func (w *wizard) run() {
|
|||
if w.conf.genesis == nil {
|
||||
fmt.Println(" 2. Configure new genesis")
|
||||
} else {
|
||||
fmt.Println(" 2. Save existing genesis")
|
||||
fmt.Println(" 2. Manage existing genesis")
|
||||
}
|
||||
if len(w.servers) == 0 {
|
||||
fmt.Println(" 3. Track new remote server")
|
||||
|
@ -118,18 +118,10 @@ func (w *wizard) run() {
|
|||
w.networkStats(false)
|
||||
|
||||
case choice == "2":
|
||||
// If we don't have a genesis, make one
|
||||
if w.conf.genesis == nil {
|
||||
w.makeGenesis()
|
||||
} else {
|
||||
// Otherwise just save whatever we currently have
|
||||
fmt.Println()
|
||||
fmt.Printf("Which file to save the genesis into? (default = %s.json)\n", w.network)
|
||||
out, _ := json.MarshalIndent(w.conf.genesis, "", " ")
|
||||
if err := ioutil.WriteFile(w.readDefaultString(fmt.Sprintf("%s.json", w.network)), out, 0644); err != nil {
|
||||
log.Error("Failed to save genesis file", "err", err)
|
||||
}
|
||||
log.Info("Exported existing genesis block")
|
||||
w.manageGenesis()
|
||||
}
|
||||
case choice == "3":
|
||||
if len(w.servers) == 0 {
|
||||
|
|
|
@ -129,7 +129,7 @@ func (w *wizard) networkStats(tips bool) {
|
|||
}
|
||||
}
|
||||
// If a genesis block was found, load it into our configs
|
||||
if protips.genesis != "" {
|
||||
if protips.genesis != "" && w.conf.genesis == nil {
|
||||
genesis := new(core.Genesis)
|
||||
if err := json.Unmarshal([]byte(protips.genesis), genesis); err != nil {
|
||||
log.Error("Failed to parse remote genesis", "err", err)
|
||||
|
|
|
@ -31,6 +31,8 @@ import (
|
|||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/consensus/clique"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
|
@ -305,7 +307,7 @@ var (
|
|||
}
|
||||
PasswordFileFlag = cli.StringFlag{
|
||||
Name: "password",
|
||||
Usage: "Password file to use for non-inteactive password input",
|
||||
Usage: "Password file to use for non-interactive password input",
|
||||
Value: "",
|
||||
}
|
||||
|
||||
|
@ -1086,17 +1088,22 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
|
|||
var err error
|
||||
chainDb = MakeChainDatabase(ctx, stack)
|
||||
|
||||
engine := ethash.NewFaker()
|
||||
if !ctx.GlobalBool(FakePoWFlag.Name) {
|
||||
engine = ethash.New(
|
||||
stack.ResolvePath(eth.DefaultConfig.EthashCacheDir), eth.DefaultConfig.EthashCachesInMem, eth.DefaultConfig.EthashCachesOnDisk,
|
||||
stack.ResolvePath(eth.DefaultConfig.EthashDatasetDir), eth.DefaultConfig.EthashDatasetsInMem, eth.DefaultConfig.EthashDatasetsOnDisk,
|
||||
)
|
||||
}
|
||||
config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx))
|
||||
if err != nil {
|
||||
Fatalf("%v", err)
|
||||
}
|
||||
var engine consensus.Engine
|
||||
if config.Clique != nil {
|
||||
engine = clique.New(config.Clique, chainDb)
|
||||
} else {
|
||||
engine = ethash.NewFaker()
|
||||
if !ctx.GlobalBool(FakePoWFlag.Name) {
|
||||
engine = ethash.New(
|
||||
stack.ResolvePath(eth.DefaultConfig.EthashCacheDir), eth.DefaultConfig.EthashCachesInMem, eth.DefaultConfig.EthashCachesOnDisk,
|
||||
stack.ResolvePath(eth.DefaultConfig.EthashDatasetDir), eth.DefaultConfig.EthashDatasetsInMem, eth.DefaultConfig.EthashDatasetsOnDisk,
|
||||
)
|
||||
}
|
||||
}
|
||||
vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)}
|
||||
chain, err = core.NewBlockChain(chainDb, config, engine, vmcfg)
|
||||
if err != nil {
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/consensus/misc"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
|
@ -313,6 +314,10 @@ func (c *Clique) verifyHeader(chain consensus.ChainReader, header *types.Header,
|
|||
return errInvalidDifficulty
|
||||
}
|
||||
}
|
||||
// If all checks passed, validate any special fields for hard forks
|
||||
if err := misc.VerifyForkHashes(chain.Config(), header, false); err != nil {
|
||||
return err
|
||||
}
|
||||
// All basic checks passed, verify cascading fields
|
||||
return c.verifyCascadingFields(chain, header, parents)
|
||||
}
|
||||
|
|
|
@ -102,7 +102,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
|
|||
header.Cap *= 4
|
||||
cache := *(*[]byte)(unsafe.Pointer(&header))
|
||||
|
||||
// Calculate the number of thoretical rows (we'll store in one buffer nonetheless)
|
||||
// Calculate the number of theoretical rows (we'll store in one buffer nonetheless)
|
||||
size := uint64(len(cache))
|
||||
rows := int(size) / hashBytes
|
||||
|
||||
|
@ -187,7 +187,7 @@ func fnvHash(mix []uint32, data []uint32) {
|
|||
// generateDatasetItem combines data from 256 pseudorandomly selected cache nodes,
|
||||
// and hashes that to compute a single dataset node.
|
||||
func generateDatasetItem(cache []uint32, index uint32, keccak512 hasher) []byte {
|
||||
// Calculate the number of thoretical rows (we use one buffer nonetheless)
|
||||
// Calculate the number of theoretical rows (we use one buffer nonetheless)
|
||||
rows := uint32(len(cache) / hashWords)
|
||||
|
||||
// Initialize the mix
|
||||
|
@ -287,7 +287,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
|
|||
// hashimoto aggregates data from the full dataset in order to produce our final
|
||||
// value for a particular header hash and nonce.
|
||||
func hashimoto(hash []byte, nonce uint64, size uint64, lookup func(index uint32) []uint32) ([]byte, []byte) {
|
||||
// Calculate the number of thoretical rows (we use one buffer nonetheless)
|
||||
// Calculate the number of theoretical rows (we use one buffer nonetheless)
|
||||
rows := uint32(size / mixBytes)
|
||||
|
||||
// Combine header+nonce into a 64 byte seed
|
||||
|
|
2
vendor/github.com/ethereum/go-ethereum/containers/docker/master-alpine/Dockerfile
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/containers/docker/master-alpine/Dockerfile
generated
vendored
|
@ -2,7 +2,7 @@ FROM alpine:3.5
|
|||
|
||||
RUN \
|
||||
apk add --update go git make gcc musl-dev linux-headers ca-certificates && \
|
||||
git clone --depth 1 --branch release/1.6 https://github.com/ethereum/go-ethereum && \
|
||||
git clone --depth 1 --branch release/1.7 https://github.com/ethereum/go-ethereum && \
|
||||
(cd go-ethereum && make geth) && \
|
||||
cp go-ethereum/build/bin/geth /geth && \
|
||||
apk del go git make gcc musl-dev linux-headers && \
|
||||
|
|
2
vendor/github.com/ethereum/go-ethereum/containers/docker/master-ubuntu/Dockerfile
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/containers/docker/master-ubuntu/Dockerfile
generated
vendored
|
@ -3,7 +3,7 @@ FROM ubuntu:xenial
|
|||
RUN \
|
||||
apt-get update && apt-get upgrade -q -y && \
|
||||
apt-get install -y --no-install-recommends golang git make gcc libc-dev ca-certificates && \
|
||||
git clone --depth 1 --branch release/1.5 https://github.com/ethereum/go-ethereum && \
|
||||
git clone --depth 1 --branch release/1.7 https://github.com/ethereum/go-ethereum && \
|
||||
(cd go-ethereum && make geth) && \
|
||||
cp go-ethereum/build/bin/geth /geth && \
|
||||
apt-get remove -y golang git make gcc libc-dev && apt autoremove -y && apt-get clean && \
|
||||
|
|
5
vendor/github.com/ethereum/go-ethereum/contracts/chequebook/contract/chequebook.sol
generated
vendored
5
vendor/github.com/ethereum/go-ethereum/contracts/chequebook/contract/chequebook.sol
generated
vendored
|
@ -27,10 +27,11 @@ contract chequebook is mortal {
|
|||
if(owner != ecrecover(hash, sig_v, sig_r, sig_s)) return;
|
||||
// Attempt sending the difference between the cumulative amount on the cheque
|
||||
// and the cumulative amount on the last cashed cheque to beneficiary.
|
||||
if (amount - sent[beneficiary] >= this.balance) {
|
||||
uint256 diff = amount - sent[beneficiary];
|
||||
if (diff <= this.balance) {
|
||||
// update the cumulative amount before sending
|
||||
sent[beneficiary] = amount;
|
||||
if (!beneficiary.send(amount - sent[beneficiary])) {
|
||||
if (!beneficiary.send(diff)) {
|
||||
// Upon failure to execute send, revert everything
|
||||
throw;
|
||||
}
|
||||
|
|
|
@ -796,11 +796,6 @@ func (bc *BlockChain) WriteBlockAndState(block *types.Block, receipts []*types.R
|
|||
bc.mu.Lock()
|
||||
defer bc.mu.Unlock()
|
||||
|
||||
if bc.HasBlock(block.Hash(), block.NumberU64()) {
|
||||
log.Trace("Block existed", "hash", block.Hash())
|
||||
return
|
||||
}
|
||||
|
||||
localTd := bc.GetTd(bc.currentBlock.Hash(), bc.currentBlock.NumberU64())
|
||||
externTd := new(big.Int).Add(block.Difficulty(), ptd)
|
||||
|
||||
|
|
|
@ -80,7 +80,8 @@ type Matcher struct {
|
|||
}
|
||||
|
||||
// NewMatcher creates a new pipeline for retrieving bloom bit streams and doing
|
||||
// address and topic filtering on them.
|
||||
// address and topic filtering on them. Setting a filter component to `nil` is
|
||||
// allowed and will result in that filter rule being skipped (OR 0x11...1).
|
||||
func NewMatcher(sectionSize uint64, filters [][][]byte) *Matcher {
|
||||
// Create the matcher instance
|
||||
m := &Matcher{
|
||||
|
@ -95,11 +96,22 @@ func NewMatcher(sectionSize uint64, filters [][][]byte) *Matcher {
|
|||
m.filters = nil
|
||||
|
||||
for _, filter := range filters {
|
||||
// Gather the bit indexes of the filter rule, special casing the nil filter
|
||||
if len(filter) == 0 {
|
||||
continue
|
||||
}
|
||||
bloomBits := make([]bloomIndexes, len(filter))
|
||||
for i, clause := range filter {
|
||||
if clause == nil {
|
||||
bloomBits = nil
|
||||
break
|
||||
}
|
||||
bloomBits[i] = calcBloomIndexes(clause)
|
||||
}
|
||||
m.filters = append(m.filters, bloomBits)
|
||||
// Accumulate the filter rules if no nil rule was within
|
||||
if bloomBits != nil {
|
||||
m.filters = append(m.filters, bloomBits)
|
||||
}
|
||||
}
|
||||
// For every bit, create a scheduler to load/download the bit vectors
|
||||
for _, bloomIndexLists := range m.filters {
|
||||
|
|
|
@ -132,6 +132,7 @@ func (ch addLogChange) undo(s *StateDB) {
|
|||
} else {
|
||||
s.logs[ch.txhash] = logs[:len(logs)-1]
|
||||
}
|
||||
s.logSize--
|
||||
}
|
||||
|
||||
func (ch addPreimageChange) undo(s *StateDB) {
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
func (r Receipt) MarshalJSON() ([]byte, error) {
|
||||
type Receipt struct {
|
||||
PostState hexutil.Bytes `json:"root"`
|
||||
Failed bool `json:"failed"`
|
||||
Status hexutil.Uint `json:"status"`
|
||||
CumulativeGasUsed *hexutil.Big `json:"cumulativeGasUsed" gencodec:"required"`
|
||||
Bloom Bloom `json:"logsBloom" gencodec:"required"`
|
||||
Logs []*Log `json:"logs" gencodec:"required"`
|
||||
|
@ -24,7 +24,7 @@ func (r Receipt) MarshalJSON() ([]byte, error) {
|
|||
}
|
||||
var enc Receipt
|
||||
enc.PostState = r.PostState
|
||||
enc.Failed = r.Failed
|
||||
enc.Status = hexutil.Uint(r.Status)
|
||||
enc.CumulativeGasUsed = (*hexutil.Big)(r.CumulativeGasUsed)
|
||||
enc.Bloom = r.Bloom
|
||||
enc.Logs = r.Logs
|
||||
|
@ -37,7 +37,7 @@ func (r Receipt) MarshalJSON() ([]byte, error) {
|
|||
func (r *Receipt) UnmarshalJSON(input []byte) error {
|
||||
type Receipt struct {
|
||||
PostState hexutil.Bytes `json:"root"`
|
||||
Failed *bool `json:"failed"`
|
||||
Status *hexutil.Uint `json:"status"`
|
||||
CumulativeGasUsed *hexutil.Big `json:"cumulativeGasUsed" gencodec:"required"`
|
||||
Bloom *Bloom `json:"logsBloom" gencodec:"required"`
|
||||
Logs []*Log `json:"logs" gencodec:"required"`
|
||||
|
@ -52,8 +52,8 @@ func (r *Receipt) UnmarshalJSON(input []byte) error {
|
|||
if dec.PostState != nil {
|
||||
r.PostState = dec.PostState
|
||||
}
|
||||
if dec.Failed != nil {
|
||||
r.Failed = *dec.Failed
|
||||
if dec.Status != nil {
|
||||
r.Status = uint(*dec.Status)
|
||||
}
|
||||
if dec.CumulativeGasUsed == nil {
|
||||
return errors.New("missing required field 'cumulativeGasUsed' for Receipt")
|
||||
|
|
|
@ -30,15 +30,23 @@ import (
|
|||
//go:generate gencodec -type Receipt -field-override receiptMarshaling -out gen_receipt_json.go
|
||||
|
||||
var (
|
||||
receiptStatusFailed = []byte{}
|
||||
receiptStatusSuccessful = []byte{0x01}
|
||||
receiptStatusFailedRLP = []byte{}
|
||||
receiptStatusSuccessfulRLP = []byte{0x01}
|
||||
)
|
||||
|
||||
const (
|
||||
// ReceiptStatusFailed is the status code of a transaction if execution failed.
|
||||
ReceiptStatusFailed = uint(0)
|
||||
|
||||
// ReceiptStatusSuccessful is the status code of a transaction if execution succeeded.
|
||||
ReceiptStatusSuccessful = uint(1)
|
||||
)
|
||||
|
||||
// Receipt represents the results of a transaction.
|
||||
type Receipt struct {
|
||||
// Consensus fields
|
||||
PostState []byte `json:"root"`
|
||||
Failed bool `json:"failed"`
|
||||
Status uint `json:"status"`
|
||||
CumulativeGasUsed *big.Int `json:"cumulativeGasUsed" gencodec:"required"`
|
||||
Bloom Bloom `json:"logsBloom" gencodec:"required"`
|
||||
Logs []*Log `json:"logs" gencodec:"required"`
|
||||
|
@ -51,6 +59,7 @@ type Receipt struct {
|
|||
|
||||
type receiptMarshaling struct {
|
||||
PostState hexutil.Bytes
|
||||
Status hexutil.Uint
|
||||
CumulativeGasUsed *hexutil.Big
|
||||
GasUsed *hexutil.Big
|
||||
}
|
||||
|
@ -75,7 +84,13 @@ type receiptStorageRLP struct {
|
|||
|
||||
// NewReceipt creates a barebone transaction receipt, copying the init fields.
|
||||
func NewReceipt(root []byte, failed bool, cumulativeGasUsed *big.Int) *Receipt {
|
||||
return &Receipt{PostState: common.CopyBytes(root), Failed: failed, CumulativeGasUsed: new(big.Int).Set(cumulativeGasUsed)}
|
||||
r := &Receipt{PostState: common.CopyBytes(root), CumulativeGasUsed: new(big.Int).Set(cumulativeGasUsed)}
|
||||
if failed {
|
||||
r.Status = ReceiptStatusFailed
|
||||
} else {
|
||||
r.Status = ReceiptStatusSuccessful
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// EncodeRLP implements rlp.Encoder, and flattens the consensus fields of a receipt
|
||||
|
@ -100,10 +115,10 @@ func (r *Receipt) DecodeRLP(s *rlp.Stream) error {
|
|||
|
||||
func (r *Receipt) setStatus(postStateOrStatus []byte) error {
|
||||
switch {
|
||||
case bytes.Equal(postStateOrStatus, receiptStatusSuccessful):
|
||||
r.Failed = false
|
||||
case bytes.Equal(postStateOrStatus, receiptStatusFailed):
|
||||
r.Failed = true
|
||||
case bytes.Equal(postStateOrStatus, receiptStatusSuccessfulRLP):
|
||||
r.Status = ReceiptStatusSuccessful
|
||||
case bytes.Equal(postStateOrStatus, receiptStatusFailedRLP):
|
||||
r.Status = ReceiptStatusFailed
|
||||
case len(postStateOrStatus) == len(common.Hash{}):
|
||||
r.PostState = postStateOrStatus
|
||||
default:
|
||||
|
@ -114,19 +129,18 @@ func (r *Receipt) setStatus(postStateOrStatus []byte) error {
|
|||
|
||||
func (r *Receipt) statusEncoding() []byte {
|
||||
if len(r.PostState) == 0 {
|
||||
if r.Failed {
|
||||
return receiptStatusFailed
|
||||
} else {
|
||||
return receiptStatusSuccessful
|
||||
if r.Status == ReceiptStatusFailed {
|
||||
return receiptStatusFailedRLP
|
||||
}
|
||||
return receiptStatusSuccessfulRLP
|
||||
}
|
||||
return r.PostState
|
||||
}
|
||||
|
||||
// String implements the Stringer interface.
|
||||
func (r *Receipt) String() string {
|
||||
if r.PostState == nil {
|
||||
return fmt.Sprintf("receipt{failed=%t cgas=%v bloom=%x logs=%v}", r.Failed, r.CumulativeGasUsed, r.Bloom, r.Logs)
|
||||
if len(r.PostState) == 0 {
|
||||
return fmt.Sprintf("receipt{status=%d cgas=%v bloom=%x logs=%v}", r.Status, r.CumulativeGasUsed, r.Bloom, r.Logs)
|
||||
}
|
||||
return fmt.Sprintf("receipt{med=%x cgas=%v bloom=%x logs=%v}", r.PostState, r.CumulativeGasUsed, r.Bloom, r.Logs)
|
||||
}
|
||||
|
|
|
@ -209,12 +209,6 @@ func (tx *Transaction) Hash() common.Hash {
|
|||
return v
|
||||
}
|
||||
|
||||
// SigHash returns the hash to be signed by the sender.
|
||||
// It does not uniquely identify the transaction.
|
||||
func (tx *Transaction) SigHash(signer Signer) common.Hash {
|
||||
return signer.Hash(tx)
|
||||
}
|
||||
|
||||
func (tx *Transaction) Size() common.StorageSize {
|
||||
if size := tx.size.Load(); size != nil {
|
||||
return size.(common.StorageSize)
|
||||
|
@ -249,7 +243,13 @@ func (tx *Transaction) AsMessage(s Signer) (Message, error) {
|
|||
// WithSignature returns a new transaction with the given signature.
|
||||
// This signature needs to be formatted as described in the yellow paper (v+27).
|
||||
func (tx *Transaction) WithSignature(signer Signer, sig []byte) (*Transaction, error) {
|
||||
return signer.WithSignature(tx, sig)
|
||||
r, s, v, err := signer.SignatureValues(tx, sig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpy := &Transaction{data: tx.data}
|
||||
cpy.data.R, cpy.data.S, cpy.data.V = r, s, v
|
||||
return cpy, nil
|
||||
}
|
||||
|
||||
// Cost returns amount + gasprice * gaslimit.
|
||||
|
|
|
@ -29,9 +29,6 @@ import (
|
|||
|
||||
var (
|
||||
ErrInvalidChainId = errors.New("invalid chain id for signer")
|
||||
|
||||
errAbstractSigner = errors.New("abstract signer")
|
||||
abstractSignerAddress = common.HexToAddress("ffffffffffffffffffffffffffffffffffffffff")
|
||||
)
|
||||
|
||||
// sigCache is used to cache the derived sender and contains
|
||||
|
@ -62,12 +59,9 @@ func SignTx(tx *Transaction, s Signer, prv *ecdsa.PrivateKey) (*Transaction, err
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.WithSignature(tx, sig)
|
||||
return tx.WithSignature(s, sig)
|
||||
}
|
||||
|
||||
// Sender derives the sender from the tx using the signer derivation
|
||||
// functions.
|
||||
|
||||
// Sender returns the address derived from the signature (V, R, S) using secp256k1
|
||||
// elliptic curve and an error if it failed deriving or upon an incorrect
|
||||
// signature.
|
||||
|
@ -86,33 +80,30 @@ func Sender(signer Signer, tx *Transaction) (common.Address, error) {
|
|||
}
|
||||
}
|
||||
|
||||
pubkey, err := signer.PublicKey(tx)
|
||||
addr, err := signer.Sender(tx)
|
||||
if err != nil {
|
||||
return common.Address{}, err
|
||||
}
|
||||
var addr common.Address
|
||||
copy(addr[:], crypto.Keccak256(pubkey[1:])[12:])
|
||||
tx.from.Store(sigCache{signer: signer, from: addr})
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
// Signer encapsulates transaction signature handling. Note that this interface is not a
|
||||
// stable API and may change at any time to accommodate new protocol rules.
|
||||
type Signer interface {
|
||||
// Hash returns the rlp encoded hash for signatures
|
||||
// Sender returns the sender address of the transaction.
|
||||
Sender(tx *Transaction) (common.Address, error)
|
||||
// SignatureValues returns the raw R, S, V values corresponding to the
|
||||
// given signature.
|
||||
SignatureValues(tx *Transaction, sig []byte) (r, s, v *big.Int, err error)
|
||||
// Hash returns the hash to be signed.
|
||||
Hash(tx *Transaction) common.Hash
|
||||
// PubilcKey returns the public key derived from the signature
|
||||
PublicKey(tx *Transaction) ([]byte, error)
|
||||
// WithSignature returns a copy of the transaction with the given signature.
|
||||
// The signature must be encoded in [R || S || V] format where V is 0 or 1.
|
||||
WithSignature(tx *Transaction, sig []byte) (*Transaction, error)
|
||||
// Checks for equality on the signers
|
||||
// Equal returns true if the given signer is the same as the receiver.
|
||||
Equal(Signer) bool
|
||||
}
|
||||
|
||||
// EIP155Transaction implements TransactionInterface using the
|
||||
// EIP155 rules
|
||||
// EIP155Transaction implements Signer using the EIP155 rules.
|
||||
type EIP155Signer struct {
|
||||
HomesteadSigner
|
||||
|
||||
chainId, chainIdMul *big.Int
|
||||
}
|
||||
|
||||
|
@ -131,55 +122,32 @@ func (s EIP155Signer) Equal(s2 Signer) bool {
|
|||
return ok && eip155.chainId.Cmp(s.chainId) == 0
|
||||
}
|
||||
|
||||
func (s EIP155Signer) PublicKey(tx *Transaction) ([]byte, error) {
|
||||
// if the transaction is not protected fall back to homestead signer
|
||||
var big8 = big.NewInt(8)
|
||||
|
||||
func (s EIP155Signer) Sender(tx *Transaction) (common.Address, error) {
|
||||
if !tx.Protected() {
|
||||
return (HomesteadSigner{}).PublicKey(tx)
|
||||
return HomesteadSigner{}.Sender(tx)
|
||||
}
|
||||
|
||||
if tx.ChainId().Cmp(s.chainId) != 0 {
|
||||
return nil, ErrInvalidChainId
|
||||
return common.Address{}, ErrInvalidChainId
|
||||
}
|
||||
|
||||
V := byte(new(big.Int).Sub(tx.data.V, s.chainIdMul).Uint64() - 35)
|
||||
if !crypto.ValidateSignatureValues(V, tx.data.R, tx.data.S, true) {
|
||||
return nil, ErrInvalidSig
|
||||
}
|
||||
// encode the signature in uncompressed format
|
||||
R, S := tx.data.R.Bytes(), tx.data.S.Bytes()
|
||||
sig := make([]byte, 65)
|
||||
copy(sig[32-len(R):32], R)
|
||||
copy(sig[64-len(S):64], S)
|
||||
sig[64] = V
|
||||
|
||||
// recover the public key from the signature
|
||||
hash := s.Hash(tx)
|
||||
pub, err := crypto.Ecrecover(hash[:], sig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(pub) == 0 || pub[0] != 4 {
|
||||
return nil, errors.New("invalid public key")
|
||||
}
|
||||
return pub, nil
|
||||
V := new(big.Int).Sub(tx.data.V, s.chainIdMul)
|
||||
V.Sub(V, big8)
|
||||
return recoverPlain(s.Hash(tx), tx.data.R, tx.data.S, V, true)
|
||||
}
|
||||
|
||||
// WithSignature returns a new transaction with the given signature. This signature
|
||||
// needs to be in the [R || S || V] format where V is 0 or 1.
|
||||
func (s EIP155Signer) WithSignature(tx *Transaction, sig []byte) (*Transaction, error) {
|
||||
if len(sig) != 65 {
|
||||
panic(fmt.Sprintf("wrong size for signature: got %d, want 65", len(sig)))
|
||||
func (s EIP155Signer) SignatureValues(tx *Transaction, sig []byte) (R, S, V *big.Int, err error) {
|
||||
R, S, V, err = HomesteadSigner{}.SignatureValues(tx, sig)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
cpy := &Transaction{data: tx.data}
|
||||
cpy.data.R = new(big.Int).SetBytes(sig[:32])
|
||||
cpy.data.S = new(big.Int).SetBytes(sig[32:64])
|
||||
cpy.data.V = new(big.Int).SetBytes([]byte{sig[64]})
|
||||
if s.chainId.Sign() != 0 {
|
||||
cpy.data.V = big.NewInt(int64(sig[64] + 35))
|
||||
cpy.data.V.Add(cpy.data.V, s.chainIdMul)
|
||||
V = big.NewInt(int64(sig[64] + 35))
|
||||
V.Add(V, s.chainIdMul)
|
||||
}
|
||||
return cpy, nil
|
||||
return R, S, V, nil
|
||||
}
|
||||
|
||||
// Hash returns the hash to be signed by the sender.
|
||||
|
@ -205,44 +173,14 @@ func (s HomesteadSigner) Equal(s2 Signer) bool {
|
|||
return ok
|
||||
}
|
||||
|
||||
// WithSignature returns a new transaction with the given signature. This signature
|
||||
// SignatureValues returns signature values. This signature
|
||||
// needs to be in the [R || S || V] format where V is 0 or 1.
|
||||
func (hs HomesteadSigner) WithSignature(tx *Transaction, sig []byte) (*Transaction, error) {
|
||||
if len(sig) != 65 {
|
||||
panic(fmt.Sprintf("wrong size for snature: got %d, want 65", len(sig)))
|
||||
}
|
||||
cpy := &Transaction{data: tx.data}
|
||||
cpy.data.R = new(big.Int).SetBytes(sig[:32])
|
||||
cpy.data.S = new(big.Int).SetBytes(sig[32:64])
|
||||
cpy.data.V = new(big.Int).SetBytes([]byte{sig[64] + 27})
|
||||
return cpy, nil
|
||||
func (hs HomesteadSigner) SignatureValues(tx *Transaction, sig []byte) (r, s, v *big.Int, err error) {
|
||||
return hs.FrontierSigner.SignatureValues(tx, sig)
|
||||
}
|
||||
|
||||
func (hs HomesteadSigner) PublicKey(tx *Transaction) ([]byte, error) {
|
||||
if tx.data.V.BitLen() > 8 {
|
||||
return nil, ErrInvalidSig
|
||||
}
|
||||
V := byte(tx.data.V.Uint64() - 27)
|
||||
if !crypto.ValidateSignatureValues(V, tx.data.R, tx.data.S, true) {
|
||||
return nil, ErrInvalidSig
|
||||
}
|
||||
// encode the snature in uncompressed format
|
||||
r, s := tx.data.R.Bytes(), tx.data.S.Bytes()
|
||||
sig := make([]byte, 65)
|
||||
copy(sig[32-len(r):32], r)
|
||||
copy(sig[64-len(s):64], s)
|
||||
sig[64] = V
|
||||
|
||||
// recover the public key from the snature
|
||||
hash := hs.Hash(tx)
|
||||
pub, err := crypto.Ecrecover(hash[:], sig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(pub) == 0 || pub[0] != 4 {
|
||||
return nil, errors.New("invalid public key")
|
||||
}
|
||||
return pub, nil
|
||||
func (hs HomesteadSigner) Sender(tx *Transaction) (common.Address, error) {
|
||||
return recoverPlain(hs.Hash(tx), tx.data.R, tx.data.S, tx.data.V, true)
|
||||
}
|
||||
|
||||
type FrontierSigner struct{}
|
||||
|
@ -252,20 +190,19 @@ func (s FrontierSigner) Equal(s2 Signer) bool {
|
|||
return ok
|
||||
}
|
||||
|
||||
// WithSignature returns a new transaction with the given signature. This signature
|
||||
// SignatureValues returns signature values. This signature
|
||||
// needs to be in the [R || S || V] format where V is 0 or 1.
|
||||
func (fs FrontierSigner) WithSignature(tx *Transaction, sig []byte) (*Transaction, error) {
|
||||
func (fs FrontierSigner) SignatureValues(tx *Transaction, sig []byte) (r, s, v *big.Int, err error) {
|
||||
if len(sig) != 65 {
|
||||
panic(fmt.Sprintf("wrong size for snature: got %d, want 65", len(sig)))
|
||||
panic(fmt.Sprintf("wrong size for signature: got %d, want 65", len(sig)))
|
||||
}
|
||||
cpy := &Transaction{data: tx.data}
|
||||
cpy.data.R = new(big.Int).SetBytes(sig[:32])
|
||||
cpy.data.S = new(big.Int).SetBytes(sig[32:64])
|
||||
cpy.data.V = new(big.Int).SetBytes([]byte{sig[64] + 27})
|
||||
return cpy, nil
|
||||
r = new(big.Int).SetBytes(sig[:32])
|
||||
s = new(big.Int).SetBytes(sig[32:64])
|
||||
v = new(big.Int).SetBytes([]byte{sig[64] + 27})
|
||||
return r, s, v, nil
|
||||
}
|
||||
|
||||
// Hash returns the hash to be sned by the sender.
|
||||
// Hash returns the hash to be signed by the sender.
|
||||
// It does not uniquely identify the transaction.
|
||||
func (fs FrontierSigner) Hash(tx *Transaction) common.Hash {
|
||||
return rlpHash([]interface{}{
|
||||
|
@ -278,32 +215,35 @@ func (fs FrontierSigner) Hash(tx *Transaction) common.Hash {
|
|||
})
|
||||
}
|
||||
|
||||
func (fs FrontierSigner) PublicKey(tx *Transaction) ([]byte, error) {
|
||||
if tx.data.V.BitLen() > 8 {
|
||||
return nil, ErrInvalidSig
|
||||
}
|
||||
func (fs FrontierSigner) Sender(tx *Transaction) (common.Address, error) {
|
||||
return recoverPlain(fs.Hash(tx), tx.data.R, tx.data.S, tx.data.V, false)
|
||||
}
|
||||
|
||||
V := byte(tx.data.V.Uint64() - 27)
|
||||
if !crypto.ValidateSignatureValues(V, tx.data.R, tx.data.S, false) {
|
||||
return nil, ErrInvalidSig
|
||||
func recoverPlain(sighash common.Hash, R, S, Vb *big.Int, homestead bool) (common.Address, error) {
|
||||
if Vb.BitLen() > 8 {
|
||||
return common.Address{}, ErrInvalidSig
|
||||
}
|
||||
V := byte(Vb.Uint64() - 27)
|
||||
if !crypto.ValidateSignatureValues(V, R, S, homestead) {
|
||||
return common.Address{}, ErrInvalidSig
|
||||
}
|
||||
// encode the snature in uncompressed format
|
||||
r, s := tx.data.R.Bytes(), tx.data.S.Bytes()
|
||||
r, s := R.Bytes(), S.Bytes()
|
||||
sig := make([]byte, 65)
|
||||
copy(sig[32-len(r):32], r)
|
||||
copy(sig[64-len(s):64], s)
|
||||
sig[64] = V
|
||||
|
||||
// recover the public key from the snature
|
||||
hash := fs.Hash(tx)
|
||||
pub, err := crypto.Ecrecover(hash[:], sig)
|
||||
pub, err := crypto.Ecrecover(sighash[:], sig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return common.Address{}, err
|
||||
}
|
||||
if len(pub) == 0 || pub[0] != 4 {
|
||||
return nil, errors.New("invalid public key")
|
||||
return common.Address{}, errors.New("invalid public key")
|
||||
}
|
||||
return pub, nil
|
||||
var addr common.Address
|
||||
copy(addr[:], crypto.Keccak256(pub[1:])[12:])
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
// deriveChainId derives the chain id from the given v parameter
|
||||
|
|
|
@ -137,12 +137,17 @@ func (in *Interpreter) Run(snapshot int, contract *Contract, input []byte) (ret
|
|||
// to be uint256. Practically much less so feasible.
|
||||
pc = uint64(0) // program counter
|
||||
cost uint64
|
||||
// copies used by tracer
|
||||
stackCopy = newstack() // stackCopy needed for Tracer since stack is mutated by 63/64 gas rule
|
||||
pcCopy uint64 // needed for the deferred Tracer
|
||||
gasCopy uint64 // for Tracer to log gas remaining before execution
|
||||
logged bool // deferred Tracer should ignore already logged steps
|
||||
)
|
||||
contract.Input = input
|
||||
|
||||
defer func() {
|
||||
if err != nil && in.cfg.Debug {
|
||||
in.cfg.Tracer.CaptureState(in.evm, pc, op, contract.Gas, cost, mem, stack, contract, in.evm.depth, err)
|
||||
if err != nil && !logged && in.cfg.Debug {
|
||||
in.cfg.Tracer.CaptureState(in.evm, pcCopy, op, gasCopy, cost, mem, stackCopy, contract, in.evm.depth, err)
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -154,22 +159,29 @@ func (in *Interpreter) Run(snapshot int, contract *Contract, input []byte) (ret
|
|||
// Get the memory location of pc
|
||||
op = contract.GetOp(pc)
|
||||
|
||||
// get the operation from the jump table matching the opcode
|
||||
operation := in.cfg.JumpTable[op]
|
||||
if err := in.enforceRestrictions(op, operation, stack); err != nil {
|
||||
return nil, err
|
||||
if in.cfg.Debug {
|
||||
logged = false
|
||||
pcCopy = uint64(pc)
|
||||
gasCopy = uint64(contract.Gas)
|
||||
stackCopy = newstack()
|
||||
for _, val := range stack.data {
|
||||
stackCopy.push(val)
|
||||
}
|
||||
}
|
||||
|
||||
// if the op is invalid abort the process and return an error
|
||||
// Get the operation from the jump table matching the opcode and validate the
|
||||
// stack and make sure there enough stack items available to perform the operation
|
||||
operation := in.cfg.JumpTable[op]
|
||||
if !operation.valid {
|
||||
return nil, fmt.Errorf("invalid opcode 0x%x", int(op))
|
||||
}
|
||||
|
||||
// validate the stack and make sure there enough stack items available
|
||||
// to perform the operation
|
||||
if err := operation.validateStack(stack); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// If the operation is valid, enforce and write restrictions
|
||||
if err := in.enforceRestrictions(op, operation, stack); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var memorySize uint64
|
||||
// calculate the new memory size and expand the memory to fit
|
||||
|
@ -199,7 +211,8 @@ func (in *Interpreter) Run(snapshot int, contract *Contract, input []byte) (ret
|
|||
}
|
||||
|
||||
if in.cfg.Debug {
|
||||
in.cfg.Tracer.CaptureState(in.evm, pc, op, contract.Gas, cost, mem, stack, contract, in.evm.depth, err)
|
||||
in.cfg.Tracer.CaptureState(in.evm, pc, op, gasCopy, cost, mem, stackCopy, contract, in.evm.depth, err)
|
||||
logged = true
|
||||
}
|
||||
|
||||
// execute the operation
|
||||
|
|
|
@ -51,7 +51,7 @@ type PublicEthereumAPI struct {
|
|||
e *Ethereum
|
||||
}
|
||||
|
||||
// NewPublicEthereumAPI creates a new Etheruem protocol API for full nodes.
|
||||
// NewPublicEthereumAPI creates a new Ethereum protocol API for full nodes.
|
||||
func NewPublicEthereumAPI(e *Ethereum) *PublicEthereumAPI {
|
||||
return &PublicEthereumAPI{e}
|
||||
}
|
||||
|
@ -205,7 +205,7 @@ func (api *PrivateMinerAPI) GetHashrate() uint64 {
|
|||
return uint64(api.e.miner.HashRate())
|
||||
}
|
||||
|
||||
// PrivateAdminAPI is the collection of Etheruem full node-related APIs
|
||||
// PrivateAdminAPI is the collection of Ethereum full node-related APIs
|
||||
// exposed over the private admin endpoint.
|
||||
type PrivateAdminAPI struct {
|
||||
eth *Ethereum
|
||||
|
@ -298,7 +298,7 @@ func (api *PrivateAdminAPI) ImportChain(file string) (bool, error) {
|
|||
return true, nil
|
||||
}
|
||||
|
||||
// PublicDebugAPI is the collection of Etheruem full node APIs exposed
|
||||
// PublicDebugAPI is the collection of Ethereum full node APIs exposed
|
||||
// over the public debugging endpoint.
|
||||
type PublicDebugAPI struct {
|
||||
eth *Ethereum
|
||||
|
@ -335,7 +335,7 @@ func (api *PublicDebugAPI) DumpBlock(blockNr rpc.BlockNumber) (state.Dump, error
|
|||
return stateDb.RawDump(), nil
|
||||
}
|
||||
|
||||
// PrivateDebugAPI is the collection of Etheruem full node APIs exposed over
|
||||
// PrivateDebugAPI is the collection of Ethereum full node APIs exposed over
|
||||
// the private debugging endpoint.
|
||||
type PrivateDebugAPI struct {
|
||||
config *params.ChainConfig
|
||||
|
@ -523,8 +523,7 @@ func (api *PrivateDebugAPI) TraceTransaction(ctx context.Context, txHash common.
|
|||
|
||||
// Run the transaction with tracing enabled.
|
||||
vmenv := vm.NewEVM(context, statedb, api.config, vm.Config{Debug: true, Tracer: tracer})
|
||||
// TODO utilize failed flag
|
||||
ret, gas, _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas()))
|
||||
ret, gas, failed, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tracing failed: %v", err)
|
||||
}
|
||||
|
@ -532,6 +531,7 @@ func (api *PrivateDebugAPI) TraceTransaction(ctx context.Context, txHash common.
|
|||
case *vm.StructLogger:
|
||||
return ðapi.ExecutionResult{
|
||||
Gas: gas,
|
||||
Failed: failed,
|
||||
ReturnValue: fmt.Sprintf("%x", ret),
|
||||
StructLogs: ethapi.FormatLogs(tracer.StructLogs()),
|
||||
}, nil
|
||||
|
|
|
@ -327,7 +327,7 @@ func (s *Ethereum) StartMining(local bool) error {
|
|||
wallet, err := s.accountManager.Find(accounts.Account{Address: eb})
|
||||
if wallet == nil || err != nil {
|
||||
log.Error("Etherbase account unavailable locally", "err", err)
|
||||
return fmt.Errorf("singer missing: %v", err)
|
||||
return fmt.Errorf("signer missing: %v", err)
|
||||
}
|
||||
clique.Authorize(eb, wallet.SignHash)
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ type ContractBackend struct {
|
|||
}
|
||||
|
||||
// NewContractBackend creates a new native contract backend using an existing
|
||||
// Etheruem object.
|
||||
// Ethereum object.
|
||||
func NewContractBackend(apiBackend ethapi.Backend) *ContractBackend {
|
||||
return &ContractBackend{
|
||||
eapi: ethapi.NewPublicEthereumAPI(apiBackend),
|
||||
|
|
160
vendor/github.com/ethereum/go-ethereum/eth/downloader/fakepeer.go
generated
vendored
Normal file
160
vendor/github.com/ethereum/go-ethereum/eth/downloader/fakepeer.go
generated
vendored
Normal file
|
@ -0,0 +1,160 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package downloader
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
)
|
||||
|
||||
// FakePeer is a mock downloader peer that operates on a local database instance
|
||||
// instead of being an actual live node. It's useful for testing and to implement
|
||||
// sync commands from an xisting local database.
|
||||
type FakePeer struct {
|
||||
id string
|
||||
db ethdb.Database
|
||||
hc *core.HeaderChain
|
||||
dl *Downloader
|
||||
}
|
||||
|
||||
// NewFakePeer creates a new mock downloader peer with the given data sources.
|
||||
func NewFakePeer(id string, db ethdb.Database, hc *core.HeaderChain, dl *Downloader) *FakePeer {
|
||||
return &FakePeer{id: id, db: db, hc: hc, dl: dl}
|
||||
}
|
||||
|
||||
// Head implements downloader.Peer, returning the current head hash and number
|
||||
// of the best known header.
|
||||
func (p *FakePeer) Head() (common.Hash, *big.Int) {
|
||||
header := p.hc.CurrentHeader()
|
||||
return header.Hash(), header.Number
|
||||
}
|
||||
|
||||
// RequestHeadersByHash implements downloader.Peer, returning a batch of headers
|
||||
// defined by the origin hash and the associaed query parameters.
|
||||
func (p *FakePeer) RequestHeadersByHash(hash common.Hash, amount int, skip int, reverse bool) error {
|
||||
var (
|
||||
headers []*types.Header
|
||||
unknown bool
|
||||
)
|
||||
for !unknown && len(headers) < amount {
|
||||
origin := p.hc.GetHeaderByHash(hash)
|
||||
if origin == nil {
|
||||
break
|
||||
}
|
||||
number := origin.Number.Uint64()
|
||||
headers = append(headers, origin)
|
||||
if reverse {
|
||||
for i := 0; i < int(skip)+1; i++ {
|
||||
if header := p.hc.GetHeader(hash, number); header != nil {
|
||||
hash = header.ParentHash
|
||||
number--
|
||||
} else {
|
||||
unknown = true
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
var (
|
||||
current = origin.Number.Uint64()
|
||||
next = current + uint64(skip) + 1
|
||||
)
|
||||
if header := p.hc.GetHeaderByNumber(next); header != nil {
|
||||
if p.hc.GetBlockHashesFromHash(header.Hash(), uint64(skip+1))[skip] == hash {
|
||||
hash = header.Hash()
|
||||
} else {
|
||||
unknown = true
|
||||
}
|
||||
} else {
|
||||
unknown = true
|
||||
}
|
||||
}
|
||||
}
|
||||
p.dl.DeliverHeaders(p.id, headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RequestHeadersByNumber implements downloader.Peer, returning a batch of headers
|
||||
// defined by the origin number and the associaed query parameters.
|
||||
func (p *FakePeer) RequestHeadersByNumber(number uint64, amount int, skip int, reverse bool) error {
|
||||
var (
|
||||
headers []*types.Header
|
||||
unknown bool
|
||||
)
|
||||
for !unknown && len(headers) < amount {
|
||||
origin := p.hc.GetHeaderByNumber(number)
|
||||
if origin == nil {
|
||||
break
|
||||
}
|
||||
if reverse {
|
||||
if number >= uint64(skip+1) {
|
||||
number -= uint64(skip + 1)
|
||||
} else {
|
||||
unknown = true
|
||||
}
|
||||
} else {
|
||||
number += uint64(skip + 1)
|
||||
}
|
||||
headers = append(headers, origin)
|
||||
}
|
||||
p.dl.DeliverHeaders(p.id, headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RequestBodies implements downloader.Peer, returning a batch of block bodies
|
||||
// corresponding to the specified block hashes.
|
||||
func (p *FakePeer) RequestBodies(hashes []common.Hash) error {
|
||||
var (
|
||||
txs [][]*types.Transaction
|
||||
uncles [][]*types.Header
|
||||
)
|
||||
for _, hash := range hashes {
|
||||
block := core.GetBlock(p.db, hash, p.hc.GetBlockNumber(hash))
|
||||
|
||||
txs = append(txs, block.Transactions())
|
||||
uncles = append(uncles, block.Uncles())
|
||||
}
|
||||
p.dl.DeliverBodies(p.id, txs, uncles)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RequestReceipts implements downloader.Peer, returning a batch of transaction
|
||||
// receipts corresponding to the specified block hashes.
|
||||
func (p *FakePeer) RequestReceipts(hashes []common.Hash) error {
|
||||
var receipts [][]*types.Receipt
|
||||
for _, hash := range hashes {
|
||||
receipts = append(receipts, core.GetBlockReceipts(p.db, hash, p.hc.GetBlockNumber(hash)))
|
||||
}
|
||||
p.dl.DeliverReceipts(p.id, receipts)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RequestNodeData implements downloader.Peer, returning a batch of state trie
|
||||
// nodes corresponding to the specified trie hashes.
|
||||
func (p *FakePeer) RequestNodeData(hashes []common.Hash) error {
|
||||
var data [][]byte
|
||||
for _, hash := range hashes {
|
||||
if entry, err := p.db.Get(hash.Bytes()); err == nil {
|
||||
data = append(data, entry)
|
||||
}
|
||||
}
|
||||
p.dl.DeliverNodeData(p.id, data)
|
||||
return nil
|
||||
}
|
|
@ -83,6 +83,7 @@ type announce struct {
|
|||
|
||||
// headerFilterTask represents a batch of headers needing fetcher filtering.
|
||||
type headerFilterTask struct {
|
||||
peer string // The source peer of block headers
|
||||
headers []*types.Header // Collection of headers to filter
|
||||
time time.Time // Arrival time of the headers
|
||||
}
|
||||
|
@ -90,6 +91,7 @@ type headerFilterTask struct {
|
|||
// headerFilterTask represents a batch of block bodies (transactions and uncles)
|
||||
// needing fetcher filtering.
|
||||
type bodyFilterTask struct {
|
||||
peer string // The source peer of block bodies
|
||||
transactions [][]*types.Transaction // Collection of transactions per block bodies
|
||||
uncles [][]*types.Header // Collection of uncles per block bodies
|
||||
time time.Time // Arrival time of the blocks' contents
|
||||
|
@ -218,8 +220,8 @@ func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
|
|||
|
||||
// FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
|
||||
// returning those that should be handled differently.
|
||||
func (f *Fetcher) FilterHeaders(headers []*types.Header, time time.Time) []*types.Header {
|
||||
log.Trace("Filtering headers", "headers", len(headers))
|
||||
func (f *Fetcher) FilterHeaders(peer string, headers []*types.Header, time time.Time) []*types.Header {
|
||||
log.Trace("Filtering headers", "peer", peer, "headers", len(headers))
|
||||
|
||||
// Send the filter channel to the fetcher
|
||||
filter := make(chan *headerFilterTask)
|
||||
|
@ -231,7 +233,7 @@ func (f *Fetcher) FilterHeaders(headers []*types.Header, time time.Time) []*type
|
|||
}
|
||||
// Request the filtering of the header list
|
||||
select {
|
||||
case filter <- &headerFilterTask{headers: headers, time: time}:
|
||||
case filter <- &headerFilterTask{peer: peer, headers: headers, time: time}:
|
||||
case <-f.quit:
|
||||
return nil
|
||||
}
|
||||
|
@ -246,8 +248,8 @@ func (f *Fetcher) FilterHeaders(headers []*types.Header, time time.Time) []*type
|
|||
|
||||
// FilterBodies extracts all the block bodies that were explicitly requested by
|
||||
// the fetcher, returning those that should be handled differently.
|
||||
func (f *Fetcher) FilterBodies(transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {
|
||||
log.Trace("Filtering bodies", "txs", len(transactions), "uncles", len(uncles))
|
||||
func (f *Fetcher) FilterBodies(peer string, transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {
|
||||
log.Trace("Filtering bodies", "peer", peer, "txs", len(transactions), "uncles", len(uncles))
|
||||
|
||||
// Send the filter channel to the fetcher
|
||||
filter := make(chan *bodyFilterTask)
|
||||
|
@ -259,7 +261,7 @@ func (f *Fetcher) FilterBodies(transactions [][]*types.Transaction, uncles [][]*
|
|||
}
|
||||
// Request the filtering of the body list
|
||||
select {
|
||||
case filter <- &bodyFilterTask{transactions: transactions, uncles: uncles, time: time}:
|
||||
case filter <- &bodyFilterTask{peer: peer, transactions: transactions, uncles: uncles, time: time}:
|
||||
case <-f.quit:
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -444,7 +446,7 @@ func (f *Fetcher) loop() {
|
|||
hash := header.Hash()
|
||||
|
||||
// Filter fetcher-requested headers from other synchronisation algorithms
|
||||
if announce := f.fetching[hash]; announce != nil && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil {
|
||||
if announce := f.fetching[hash]; announce != nil && announce.origin == task.peer && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil {
|
||||
// If the delivered header does not match the promised number, drop the announcer
|
||||
if header.Number.Uint64() != announce.number {
|
||||
log.Trace("Invalid block number fetched", "peer", announce.origin, "hash", header.Hash(), "announced", announce.number, "provided", header.Number)
|
||||
|
@ -523,7 +525,7 @@ func (f *Fetcher) loop() {
|
|||
txnHash := types.DeriveSha(types.Transactions(task.transactions[i]))
|
||||
uncleHash := types.CalcUncleHash(task.uncles[i])
|
||||
|
||||
if txnHash == announce.header.TxHash && uncleHash == announce.header.UncleHash {
|
||||
if txnHash == announce.header.TxHash && uncleHash == announce.header.UncleHash && announce.origin == task.peer {
|
||||
// Mark the body matched, reassemble if still unknown
|
||||
matched = true
|
||||
|
||||
|
|
|
@ -498,7 +498,6 @@ func (args *FilterCriteria) UnmarshalJSON(data []byte) error {
|
|||
switch topic := t.(type) {
|
||||
case nil:
|
||||
// ignore topic when matching logs
|
||||
args.Topics[i] = []common.Hash{{}}
|
||||
|
||||
case string:
|
||||
// match specific topic
|
||||
|
@ -507,12 +506,16 @@ func (args *FilterCriteria) UnmarshalJSON(data []byte) error {
|
|||
return err
|
||||
}
|
||||
args.Topics[i] = []common.Hash{top}
|
||||
|
||||
case []interface{}:
|
||||
// or case e.g. [null, "topic0", "topic1"]
|
||||
for _, rawTopic := range topic {
|
||||
if rawTopic == nil {
|
||||
args.Topics[i] = append(args.Topics[i], common.Hash{})
|
||||
} else if topic, ok := rawTopic.(string); ok {
|
||||
// null component, match all
|
||||
args.Topics[i] = nil
|
||||
break
|
||||
}
|
||||
if topic, ok := rawTopic.(string); ok {
|
||||
parsed, err := decodeTopic(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -60,7 +60,9 @@ type Filter struct {
|
|||
// New creates a new filter which uses a bloom filter on blocks to figure out whether
|
||||
// a particular block is interesting or not.
|
||||
func New(backend Backend, begin, end int64, addresses []common.Address, topics [][]common.Hash) *Filter {
|
||||
// Flatten the address and topic filter clauses into a single filter system
|
||||
// Flatten the address and topic filter clauses into a single bloombits filter
|
||||
// system. Since the bloombits are not positional, nil topics are permitted,
|
||||
// which get flattened into a nil byte slice.
|
||||
var filters [][][]byte
|
||||
if len(addresses) > 0 {
|
||||
filter := make([][]byte, len(addresses))
|
||||
|
@ -235,32 +237,24 @@ Logs:
|
|||
if len(addresses) > 0 && !includes(addresses, log.Address) {
|
||||
continue
|
||||
}
|
||||
|
||||
logTopics := make([]common.Hash, len(topics))
|
||||
copy(logTopics, log.Topics)
|
||||
|
||||
// If the to filtered topics is greater than the amount of topics in logs, skip.
|
||||
if len(topics) > len(log.Topics) {
|
||||
continue Logs
|
||||
}
|
||||
|
||||
for i, topics := range topics {
|
||||
var match bool
|
||||
match := len(topics) == 0 // empty rule set == wildcard
|
||||
for _, topic := range topics {
|
||||
// common.Hash{} is a match all (wildcard)
|
||||
if (topic == common.Hash{}) || log.Topics[i] == topic {
|
||||
if log.Topics[i] == topic {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !match {
|
||||
continue Logs
|
||||
}
|
||||
}
|
||||
ret = append(ret, log)
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
|
@ -273,16 +267,15 @@ func bloomFilter(bloom types.Bloom, addresses []common.Address, topics [][]commo
|
|||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !included {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for _, sub := range topics {
|
||||
var included bool
|
||||
included := len(sub) == 0 // empty rule set == wildcard
|
||||
for _, topic := range sub {
|
||||
if (topic == common.Hash{}) || types.BloomLookup(bloom, topic) {
|
||||
if types.BloomLookup(bloom, topic) {
|
||||
included = true
|
||||
break
|
||||
}
|
||||
|
@ -291,6 +284,5 @@ func bloomFilter(bloom types.Bloom, addresses []common.Address, topics [][]commo
|
|||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -212,7 +212,6 @@ func (es *EventSystem) subscribeMinedPendingLogs(crit FilterCriteria, logs chan
|
|||
installed: make(chan struct{}),
|
||||
err: make(chan error),
|
||||
}
|
||||
|
||||
return es.subscribe(sub)
|
||||
}
|
||||
|
||||
|
@ -230,7 +229,6 @@ func (es *EventSystem) subscribeLogs(crit FilterCriteria, logs chan []*types.Log
|
|||
installed: make(chan struct{}),
|
||||
err: make(chan error),
|
||||
}
|
||||
|
||||
return es.subscribe(sub)
|
||||
}
|
||||
|
||||
|
@ -248,7 +246,6 @@ func (es *EventSystem) subscribePendingLogs(crit FilterCriteria, logs chan []*ty
|
|||
installed: make(chan struct{}),
|
||||
err: make(chan error),
|
||||
}
|
||||
|
||||
return es.subscribe(sub)
|
||||
}
|
||||
|
||||
|
@ -265,7 +262,6 @@ func (es *EventSystem) SubscribeNewHeads(headers chan *types.Header) *Subscripti
|
|||
installed: make(chan struct{}),
|
||||
err: make(chan error),
|
||||
}
|
||||
|
||||
return es.subscribe(sub)
|
||||
}
|
||||
|
||||
|
@ -282,7 +278,6 @@ func (es *EventSystem) SubscribePendingTxEvents(hashes chan common.Hash) *Subscr
|
|||
installed: make(chan struct{}),
|
||||
err: make(chan error),
|
||||
}
|
||||
|
||||
return es.subscribe(sub)
|
||||
}
|
||||
|
||||
|
|
|
@ -450,7 +450,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||
return nil
|
||||
}
|
||||
// Irrelevant of the fork checks, send the header to the fetcher just in case
|
||||
headers = pm.fetcher.FilterHeaders(headers, time.Now())
|
||||
headers = pm.fetcher.FilterHeaders(p.id, headers, time.Now())
|
||||
}
|
||||
if len(headers) > 0 || !filter {
|
||||
err := pm.downloader.DeliverHeaders(p.id, headers)
|
||||
|
@ -503,7 +503,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||
// Filter out any explicitly requested bodies, deliver the rest to the downloader
|
||||
filter := len(trasactions) > 0 || len(uncles) > 0
|
||||
if filter {
|
||||
trasactions, uncles = pm.fetcher.FilterBodies(trasactions, uncles, time.Now())
|
||||
trasactions, uncles = pm.fetcher.FilterBodies(p.id, trasactions, uncles, time.Now())
|
||||
}
|
||||
if len(trasactions) > 0 || len(uncles) > 0 || !filter {
|
||||
err := pm.downloader.DeliverBodies(p.id, trasactions, uncles)
|
||||
|
|
|
@ -20,6 +20,7 @@ package ethclient
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
|
@ -70,9 +71,9 @@ func (ec *Client) BlockByNumber(ctx context.Context, number *big.Int) (*types.Bl
|
|||
}
|
||||
|
||||
type rpcBlock struct {
|
||||
Hash common.Hash `json:"hash"`
|
||||
Transactions []*types.Transaction `json:"transactions"`
|
||||
UncleHashes []common.Hash `json:"uncles"`
|
||||
Hash common.Hash `json:"hash"`
|
||||
Transactions []rpcTransaction `json:"transactions"`
|
||||
UncleHashes []common.Hash `json:"uncles"`
|
||||
}
|
||||
|
||||
func (ec *Client) getBlock(ctx context.Context, method string, args ...interface{}) (*types.Block, error) {
|
||||
|
@ -129,7 +130,13 @@ func (ec *Client) getBlock(ctx context.Context, method string, args ...interface
|
|||
}
|
||||
}
|
||||
}
|
||||
return types.NewBlockWithHeader(head).WithBody(body.Transactions, uncles), nil
|
||||
// Fill the sender cache of transactions in the block.
|
||||
txs := make([]*types.Transaction, len(body.Transactions))
|
||||
for i, tx := range body.Transactions {
|
||||
setSenderFromServer(tx.tx, tx.From, body.Hash)
|
||||
txs[i] = tx.tx
|
||||
}
|
||||
return types.NewBlockWithHeader(head).WithBody(txs, uncles), nil
|
||||
}
|
||||
|
||||
// HeaderByHash returns the block header with the given hash.
|
||||
|
@ -153,25 +160,62 @@ func (ec *Client) HeaderByNumber(ctx context.Context, number *big.Int) (*types.H
|
|||
return head, err
|
||||
}
|
||||
|
||||
type rpcTransaction struct {
|
||||
tx *types.Transaction
|
||||
txExtraInfo
|
||||
}
|
||||
|
||||
type txExtraInfo struct {
|
||||
BlockNumber *string
|
||||
BlockHash common.Hash
|
||||
From common.Address
|
||||
}
|
||||
|
||||
func (tx *rpcTransaction) UnmarshalJSON(msg []byte) error {
|
||||
if err := json.Unmarshal(msg, &tx.tx); err != nil {
|
||||
return err
|
||||
}
|
||||
return json.Unmarshal(msg, &tx.txExtraInfo)
|
||||
}
|
||||
|
||||
// TransactionByHash returns the transaction with the given hash.
|
||||
func (ec *Client) TransactionByHash(ctx context.Context, hash common.Hash) (tx *types.Transaction, isPending bool, err error) {
|
||||
var raw json.RawMessage
|
||||
err = ec.c.CallContext(ctx, &raw, "eth_getTransactionByHash", hash)
|
||||
var json *rpcTransaction
|
||||
err = ec.c.CallContext(ctx, &json, "eth_getTransactionByHash", hash)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
} else if len(raw) == 0 {
|
||||
} else if json == nil {
|
||||
return nil, false, ethereum.NotFound
|
||||
}
|
||||
if err := json.Unmarshal(raw, &tx); err != nil {
|
||||
return nil, false, err
|
||||
} else if _, r, _ := tx.RawSignatureValues(); r == nil {
|
||||
} else if _, r, _ := json.tx.RawSignatureValues(); r == nil {
|
||||
return nil, false, fmt.Errorf("server returned transaction without signature")
|
||||
}
|
||||
var block struct{ BlockNumber *string }
|
||||
if err := json.Unmarshal(raw, &block); err != nil {
|
||||
return nil, false, err
|
||||
setSenderFromServer(json.tx, json.From, json.BlockHash)
|
||||
return json.tx, json.BlockNumber == nil, nil
|
||||
}
|
||||
|
||||
// TransactionSender returns the sender address of the given transaction. The transaction
|
||||
// must be known to the remote node and included in the blockchain at the given block and
|
||||
// index. The sender is the one derived by the protocol at the time of inclusion.
|
||||
//
|
||||
// There is a fast-path for transactions retrieved by TransactionByHash and
|
||||
// TransactionInBlock. Getting their sender address can be done without an RPC interaction.
|
||||
func (ec *Client) TransactionSender(ctx context.Context, tx *types.Transaction, block common.Hash, index uint) (common.Address, error) {
|
||||
// Try to load the address from the cache.
|
||||
sender, err := types.Sender(&senderFromServer{blockhash: block}, tx)
|
||||
if err == nil {
|
||||
return sender, nil
|
||||
}
|
||||
return tx, block.BlockNumber == nil, nil
|
||||
var meta struct {
|
||||
Hash common.Hash
|
||||
From common.Address
|
||||
}
|
||||
if err = ec.c.CallContext(ctx, &meta, "eth_getTransactionByBlockHashAndIndex", block, hexutil.Uint64(index)); err != nil {
|
||||
return common.Address{}, err
|
||||
}
|
||||
if meta.Hash == (common.Hash{}) || meta.Hash != tx.Hash() {
|
||||
return common.Address{}, errors.New("wrong inclusion block/index")
|
||||
}
|
||||
return meta.From, nil
|
||||
}
|
||||
|
||||
// TransactionCount returns the total number of transactions in the given block.
|
||||
|
@ -183,16 +227,17 @@ func (ec *Client) TransactionCount(ctx context.Context, blockHash common.Hash) (
|
|||
|
||||
// TransactionInBlock returns a single transaction at index in the given block.
|
||||
func (ec *Client) TransactionInBlock(ctx context.Context, blockHash common.Hash, index uint) (*types.Transaction, error) {
|
||||
var tx *types.Transaction
|
||||
err := ec.c.CallContext(ctx, &tx, "eth_getTransactionByBlockHashAndIndex", blockHash, hexutil.Uint64(index))
|
||||
var json *rpcTransaction
|
||||
err := ec.c.CallContext(ctx, &json, "eth_getTransactionByBlockHashAndIndex", blockHash, hexutil.Uint64(index))
|
||||
if err == nil {
|
||||
if tx == nil {
|
||||
if json == nil {
|
||||
return nil, ethereum.NotFound
|
||||
} else if _, r, _ := tx.RawSignatureValues(); r == nil {
|
||||
} else if _, r, _ := json.tx.RawSignatureValues(); r == nil {
|
||||
return nil, fmt.Errorf("server returned transaction without signature")
|
||||
}
|
||||
}
|
||||
return tx, err
|
||||
setSenderFromServer(json.tx, json.From, json.BlockHash)
|
||||
return json.tx, err
|
||||
}
|
||||
|
||||
// TransactionReceipt returns the receipt of a transaction by transaction hash.
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ethclient
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// senderFromServer is a types.Signer that remembers the sender address returned by the RPC
|
||||
// server. It is stored in the transaction's sender address cache to avoid an additional
|
||||
// request in TransactionSender.
|
||||
type senderFromServer struct {
|
||||
addr common.Address
|
||||
blockhash common.Hash
|
||||
}
|
||||
|
||||
var errNotCached = errors.New("sender not cached")
|
||||
|
||||
func setSenderFromServer(tx *types.Transaction, addr common.Address, block common.Hash) {
|
||||
// Use types.Sender for side-effect to store our signer into the cache.
|
||||
types.Sender(&senderFromServer{addr, block}, tx)
|
||||
}
|
||||
|
||||
func (s *senderFromServer) Equal(other types.Signer) bool {
|
||||
os, ok := other.(*senderFromServer)
|
||||
return ok && os.blockhash == s.blockhash
|
||||
}
|
||||
|
||||
func (s *senderFromServer) Sender(tx *types.Transaction) (common.Address, error) {
|
||||
if s.blockhash == (common.Hash{}) {
|
||||
return common.Address{}, errNotCached
|
||||
}
|
||||
return s.addr, nil
|
||||
}
|
||||
|
||||
func (s *senderFromServer) Hash(tx *types.Transaction) common.Hash {
|
||||
panic("can't sign with senderFromServer")
|
||||
}
|
||||
func (s *senderFromServer) SignatureValues(tx *types.Transaction, sig []byte) (R, S, V *big.Int, err error) {
|
||||
panic("can't sign with senderFromServer")
|
||||
}
|
|
@ -56,7 +56,7 @@ func (mux *TypeMux) Subscribe(types ...interface{}) *TypeMuxSubscription {
|
|||
defer mux.mutex.Unlock()
|
||||
if mux.stopped {
|
||||
// set the status to closed so that calling Unsubscribe after this
|
||||
// call will short curuit
|
||||
// call will short circuit.
|
||||
sub.closed = true
|
||||
close(sub.postC)
|
||||
} else {
|
||||
|
|
|
@ -102,7 +102,7 @@ type SyncProgress struct {
|
|||
CurrentBlock uint64 // Current block number where sync is at
|
||||
HighestBlock uint64 // Highest alleged block number in the chain
|
||||
PulledStates uint64 // Number of state trie entries already downloaded
|
||||
KnownStates uint64 // Total number os state trie entries known about
|
||||
KnownStates uint64 // Total number of state trie entries known about
|
||||
}
|
||||
|
||||
// ChainSyncReader wraps access to the node's current sync status. If there's no
|
||||
|
@ -129,7 +129,7 @@ type ContractCaller interface {
|
|||
CallContract(ctx context.Context, call CallMsg, blockNumber *big.Int) ([]byte, error)
|
||||
}
|
||||
|
||||
// FilterQuery contains options for contact log filtering.
|
||||
// FilterQuery contains options for contract log filtering.
|
||||
type FilterQuery struct {
|
||||
FromBlock *big.Int // beginning of the queried range, nil means genesis block
|
||||
ToBlock *big.Int // end of the range, nil means latest block
|
||||
|
|
|
@ -54,7 +54,7 @@ type PublicEthereumAPI struct {
|
|||
b Backend
|
||||
}
|
||||
|
||||
// NewPublicEthereumAPI creates a new Etheruem protocol API.
|
||||
// NewPublicEthereumAPI creates a new Ethereum protocol API.
|
||||
func NewPublicEthereumAPI(b Backend) *PublicEthereumAPI {
|
||||
return &PublicEthereumAPI{b}
|
||||
}
|
||||
|
@ -462,7 +462,7 @@ type PublicBlockChainAPI struct {
|
|||
b Backend
|
||||
}
|
||||
|
||||
// NewPublicBlockChainAPI creates a new Etheruem blockchain API.
|
||||
// NewPublicBlockChainAPI creates a new Ethereum blockchain API.
|
||||
func NewPublicBlockChainAPI(b Backend) *PublicBlockChainAPI {
|
||||
return &PublicBlockChainAPI{b}
|
||||
}
|
||||
|
@ -594,12 +594,12 @@ type CallArgs struct {
|
|||
Data hexutil.Bytes `json:"data"`
|
||||
}
|
||||
|
||||
func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber, vmCfg vm.Config) ([]byte, *big.Int, error) {
|
||||
func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber, vmCfg vm.Config) ([]byte, *big.Int, bool, error) {
|
||||
defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now())
|
||||
|
||||
state, header, err := s.b.StateAndHeaderByNumber(ctx, blockNr)
|
||||
if state == nil || err != nil {
|
||||
return nil, common.Big0, err
|
||||
return nil, common.Big0, false, err
|
||||
}
|
||||
// Set sender address or use a default if none specified
|
||||
addr := args.From
|
||||
|
@ -637,7 +637,7 @@ func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr
|
|||
// Get a new instance of the EVM.
|
||||
evm, vmError, err := s.b.GetEVM(ctx, msg, state, header, vmCfg)
|
||||
if err != nil {
|
||||
return nil, common.Big0, err
|
||||
return nil, common.Big0, false, err
|
||||
}
|
||||
// Wait for the context to be done and cancel the evm. Even if the
|
||||
// EVM has finished, cancelling may be done (repeatedly)
|
||||
|
@ -649,26 +649,28 @@ func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr
|
|||
// Setup the gas pool (also for unmetered requests)
|
||||
// and apply the message.
|
||||
gp := new(core.GasPool).AddGas(math.MaxBig256)
|
||||
// TODO utilize failed flag to help gas estimation
|
||||
res, gas, _, err := core.ApplyMessage(evm, msg, gp)
|
||||
res, gas, failed, err := core.ApplyMessage(evm, msg, gp)
|
||||
if err := vmError(); err != nil {
|
||||
return nil, common.Big0, err
|
||||
return nil, common.Big0, false, err
|
||||
}
|
||||
return res, gas, err
|
||||
return res, gas, failed, err
|
||||
}
|
||||
|
||||
// Call executes the given transaction on the state for the given block number.
|
||||
// It doesn't make and changes in the state/blockchain and is useful to execute and retrieve values.
|
||||
func (s *PublicBlockChainAPI) Call(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber) (hexutil.Bytes, error) {
|
||||
result, _, err := s.doCall(ctx, args, blockNr, vm.Config{DisableGasMetering: true})
|
||||
result, _, _, err := s.doCall(ctx, args, blockNr, vm.Config{DisableGasMetering: true})
|
||||
return (hexutil.Bytes)(result), err
|
||||
}
|
||||
|
||||
// EstimateGas returns an estimate of the amount of gas needed to execute the given transaction.
|
||||
func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args CallArgs) (*hexutil.Big, error) {
|
||||
// Binary search the gas requirement, as it may be higher than the amount used
|
||||
var lo, hi uint64
|
||||
if (*big.Int)(&args.Gas).Sign() != 0 {
|
||||
var (
|
||||
lo uint64 = params.TxGas - 1
|
||||
hi uint64
|
||||
)
|
||||
if (*big.Int)(&args.Gas).Uint64() >= params.TxGas {
|
||||
hi = (*big.Int)(&args.Gas).Uint64()
|
||||
} else {
|
||||
// Retrieve the current pending block to act as the gas ceiling
|
||||
|
@ -683,10 +685,10 @@ func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args CallArgs) (*
|
|||
mid := (hi + lo) / 2
|
||||
(*big.Int)(&args.Gas).SetUint64(mid)
|
||||
|
||||
_, gas, err := s.doCall(ctx, args, rpc.PendingBlockNumber, vm.Config{})
|
||||
_, _, failed, err := s.doCall(ctx, args, rpc.PendingBlockNumber, vm.Config{})
|
||||
|
||||
// If the transaction became invalid or used all the gas (failed), raise the gas limit
|
||||
if err != nil || gas.Cmp((*big.Int)(&args.Gas)) == 0 {
|
||||
// If the transaction became invalid or execution failed, raise the gas limit
|
||||
if err != nil || failed {
|
||||
lo = mid
|
||||
continue
|
||||
}
|
||||
|
@ -697,10 +699,11 @@ func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args CallArgs) (*
|
|||
}
|
||||
|
||||
// ExecutionResult groups all structured logs emitted by the EVM
|
||||
// while replaying a transaction in debug mode as well as the amount of
|
||||
// gas used and the return value
|
||||
// while replaying a transaction in debug mode as well as transaction
|
||||
// execution status, the amount of gas used and the return value
|
||||
type ExecutionResult struct {
|
||||
Gas *big.Int `json:"gas"`
|
||||
Failed bool `json:"failed"`
|
||||
ReturnValue string `json:"returnValue"`
|
||||
StructLogs []StructLogRes `json:"structLogs"`
|
||||
}
|
||||
|
@ -1005,7 +1008,6 @@ func (s *PublicTransactionPoolAPI) GetTransactionReceipt(hash common.Hash) (map[
|
|||
from, _ := types.Sender(signer, tx)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"root": hexutil.Bytes(receipt.PostState),
|
||||
"blockHash": blockHash,
|
||||
"blockNumber": hexutil.Uint64(blockNumber),
|
||||
"transactionHash": hash,
|
||||
|
@ -1018,6 +1020,13 @@ func (s *PublicTransactionPoolAPI) GetTransactionReceipt(hash common.Hash) (map[
|
|||
"logs": receipt.Logs,
|
||||
"logsBloom": receipt.Bloom,
|
||||
}
|
||||
|
||||
// Assign receipt status or post state.
|
||||
if len(receipt.PostState) > 0 {
|
||||
fields["root"] = hexutil.Bytes(receipt.PostState)
|
||||
} else {
|
||||
fields["status"] = hexutil.Uint(receipt.Status)
|
||||
}
|
||||
if receipt.Logs == nil {
|
||||
fields["logs"] = [][]*types.Log{}
|
||||
}
|
||||
|
@ -1095,7 +1104,10 @@ func submitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (c
|
|||
}
|
||||
if tx.To() == nil {
|
||||
signer := types.MakeSigner(b.ChainConfig(), b.CurrentBlock().Number())
|
||||
from, _ := types.Sender(signer, tx)
|
||||
from, err := types.Sender(signer, tx)
|
||||
if err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
addr := crypto.CreateAddress(from, tx.Nonce())
|
||||
log.Info("Submitted contract creation", "fullhash", tx.Hash().Hex(), "contract", addr.Hex())
|
||||
} else {
|
||||
|
@ -1179,29 +1191,12 @@ func (s *PublicTransactionPoolAPI) SendTransaction(ctx context.Context, args Sen
|
|||
|
||||
// SendRawTransaction will add the signed transaction to the transaction pool.
|
||||
// The sender is responsible for signing the transaction and using the correct nonce.
|
||||
func (s *PublicTransactionPoolAPI) SendRawTransaction(ctx context.Context, encodedTx hexutil.Bytes) (string, error) {
|
||||
func (s *PublicTransactionPoolAPI) SendRawTransaction(ctx context.Context, encodedTx hexutil.Bytes) (common.Hash, error) {
|
||||
tx := new(types.Transaction)
|
||||
if err := rlp.DecodeBytes(encodedTx, tx); err != nil {
|
||||
return "", err
|
||||
return common.Hash{}, err
|
||||
}
|
||||
|
||||
if err := s.b.SendTx(ctx, tx); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
signer := types.MakeSigner(s.b.ChainConfig(), s.b.CurrentBlock().Number())
|
||||
if tx.To() == nil {
|
||||
from, err := types.Sender(signer, tx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
addr := crypto.CreateAddress(from, tx.Nonce())
|
||||
log.Info("Submitted contract creation", "fullhash", tx.Hash().Hex(), "contract", addr.Hex())
|
||||
} else {
|
||||
log.Info("Submitted transaction", "fullhash", tx.Hash().Hex(), "recipient", tx.To())
|
||||
}
|
||||
|
||||
return tx.Hash().Hex(), nil
|
||||
return submitTransaction(ctx, s.b, tx)
|
||||
}
|
||||
|
||||
// Sign calculates an ECDSA signature for:
|
||||
|
@ -1325,7 +1320,7 @@ func (s *PublicTransactionPoolAPI) Resend(ctx context.Context, sendArgs SendTxAr
|
|||
return common.Hash{}, fmt.Errorf("Transaction %#x not found", matchTx.Hash())
|
||||
}
|
||||
|
||||
// PublicDebugAPI is the collection of Etheruem APIs exposed over the public
|
||||
// PublicDebugAPI is the collection of Ethereum APIs exposed over the public
|
||||
// debugging endpoint.
|
||||
type PublicDebugAPI struct {
|
||||
b Backend
|
||||
|
@ -1368,7 +1363,7 @@ func (api *PublicDebugAPI) SeedHash(ctx context.Context, number uint64) (string,
|
|||
return fmt.Sprintf("0x%x", ethash.SeedHash(number)), nil
|
||||
}
|
||||
|
||||
// PrivateDebugAPI is the collection of Etheruem APIs exposed over the private
|
||||
// PrivateDebugAPI is the collection of Ethereum APIs exposed over the private
|
||||
// debugging endpoint.
|
||||
type PrivateDebugAPI struct {
|
||||
b Backend
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
|
@ -528,109 +528,6 @@ const Shh_JS = `
|
|||
web3._extend({
|
||||
property: 'shh',
|
||||
methods: [
|
||||
new web3._extend.Method({
|
||||
name: 'setMaxMessageLength',
|
||||
call: 'shh_setMaxMessageLength',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'setMinimumPoW',
|
||||
call: 'shh_setMinimumPoW',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'markTrustedPeer',
|
||||
call: 'shh_markTrustedPeer',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'hasKeyPair',
|
||||
call: 'shh_hasKeyPair',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'deleteKeyPair',
|
||||
call: 'shh_deleteKeyPair',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'newKeyPair',
|
||||
call: 'shh_newKeyPair'
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'getPublicKey',
|
||||
call: 'shh_getPublicKey',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'getPrivateKey',
|
||||
call: 'shh_getPrivateKey',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'newSymKey',
|
||||
call: 'shh_newSymKey',
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'addSymKey',
|
||||
call: 'shh_addSymKey',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'generateSymKeyFromPassword',
|
||||
call: 'shh_generateSymKeyFromPassword',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'hasSymKey',
|
||||
call: 'shh_hasSymKey',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'getSymKey',
|
||||
call: 'shh_getSymKey',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'deleteSymKey',
|
||||
call: 'shh_deleteSymKey',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'subscribe',
|
||||
call: 'shh_subscribe',
|
||||
params: 2
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'unsubscribe',
|
||||
call: 'shh_unsubscribe',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'post',
|
||||
call: 'shh_post',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'publicKey',
|
||||
call: 'shh_getPublicKey',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'getFilterMessages',
|
||||
call: 'shh_getFilterMessages',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'deleteMessageFilter',
|
||||
call: 'shh_deleteMessageFilter',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'newMessageFilter',
|
||||
call: 'shh_newMessageFilter',
|
||||
params: 1
|
||||
}),
|
||||
],
|
||||
properties:
|
||||
[
|
||||
|
|
|
@ -53,7 +53,19 @@ func (self *CpuAgent) Work() chan<- *Work { return self.workCh }
|
|||
func (self *CpuAgent) SetReturnCh(ch chan<- *Result) { self.returnCh = ch }
|
||||
|
||||
func (self *CpuAgent) Stop() {
|
||||
if !atomic.CompareAndSwapInt32(&self.isMining, 1, 0) {
|
||||
return // agent already stopped
|
||||
}
|
||||
self.stop <- struct{}{}
|
||||
done:
|
||||
// Empty work channel
|
||||
for {
|
||||
select {
|
||||
case <-self.workCh:
|
||||
default:
|
||||
break done
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (self *CpuAgent) Start() {
|
||||
|
@ -85,17 +97,6 @@ out:
|
|||
break out
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
// Empty work channel
|
||||
for {
|
||||
select {
|
||||
case <-self.workCh:
|
||||
default:
|
||||
break done
|
||||
}
|
||||
}
|
||||
atomic.StoreInt32(&self.isMining, 0)
|
||||
}
|
||||
|
||||
func (self *CpuAgent) mine(work *Work, stop <-chan struct{}) {
|
||||
|
|
|
@ -163,7 +163,7 @@ func (a *RemoteAgent) SubmitWork(nonce types.BlockNonce, mixDigest, hash common.
|
|||
}
|
||||
|
||||
// loop monitors mining events on the work and quit channels, updating the internal
|
||||
// state of the rmeote miner until a termination is requested.
|
||||
// state of the remote miner until a termination is requested.
|
||||
//
|
||||
// Note, the reason the work and quit channels are passed as parameters is because
|
||||
// RemoteAgent.Start() constantly recreates these channels, so the loop code cannot
|
||||
|
|
|
@ -165,7 +165,7 @@ func (c *BoundContract) Call(opts *CallOpts, out *Interfaces, method string, arg
|
|||
|
||||
// Transact invokes the (paid) contract method with params as input values.
|
||||
func (c *BoundContract) Transact(opts *TransactOpts, method string, args *Interfaces) (tx *Transaction, _ error) {
|
||||
rawTx, err := c.contract.Transact(&opts.opts, method, args.objects)
|
||||
rawTx, err := c.contract.Transact(&opts.opts, method, args.objects...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -77,6 +77,13 @@ func (ec *EthereumClient) GetTransactionByHash(ctx *Context, hash *Hash) (tx *Tr
|
|||
return &Transaction{rawTx}, err
|
||||
}
|
||||
|
||||
// GetTransactionSender returns the sender address of a transaction. The transaction must
|
||||
// be included in blockchain at the given block and index.
|
||||
func (ec *EthereumClient) GetTransactionSender(ctx *Context, tx *Transaction, blockhash *Hash, index int) (sender *Address, _ error) {
|
||||
addr, err := ec.client.TransactionSender(ctx.context, tx.tx, blockhash.hash, uint(index))
|
||||
return &Address{addr}, err
|
||||
}
|
||||
|
||||
// GetTransactionCount returns the total number of transactions in the given block.
|
||||
func (ec *EthereumClient) GetTransactionCount(ctx *Context, hash *Hash) (count int, _ error) {
|
||||
rawCount, err := ec.client.TransactionCount(ctx.context, hash.hash)
|
||||
|
|
|
@ -261,10 +261,13 @@ func (tx *Transaction) GetGasPrice() *BigInt { return &BigInt{tx.tx.GasPrice()}
|
|||
func (tx *Transaction) GetValue() *BigInt { return &BigInt{tx.tx.Value()} }
|
||||
func (tx *Transaction) GetNonce() int64 { return int64(tx.tx.Nonce()) }
|
||||
|
||||
func (tx *Transaction) GetHash() *Hash { return &Hash{tx.tx.Hash()} }
|
||||
func (tx *Transaction) GetSigHash() *Hash { return &Hash{tx.tx.SigHash(types.HomesteadSigner{})} }
|
||||
func (tx *Transaction) GetCost() *BigInt { return &BigInt{tx.tx.Cost()} }
|
||||
func (tx *Transaction) GetHash() *Hash { return &Hash{tx.tx.Hash()} }
|
||||
func (tx *Transaction) GetCost() *BigInt { return &BigInt{tx.tx.Cost()} }
|
||||
|
||||
// Deprecated: GetSigHash cannot know which signer to use.
|
||||
func (tx *Transaction) GetSigHash() *Hash { return &Hash{types.HomesteadSigner{}.Hash(tx.tx)} }
|
||||
|
||||
// Deprecated: use EthereumClient.TransactionSender
|
||||
func (tx *Transaction) GetFrom(chainID *BigInt) (address *Address, _ error) {
|
||||
var signer types.Signer = types.HomesteadSigner{}
|
||||
if chainID != nil {
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -25,6 +26,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
|
@ -73,6 +75,44 @@ func (api *PrivateAdminAPI) RemovePeer(url string) (bool, error) {
|
|||
return true, nil
|
||||
}
|
||||
|
||||
// PeerEvents creates an RPC subscription which receives peer events from the
|
||||
// node's p2p.Server
|
||||
func (api *PrivateAdminAPI) PeerEvents(ctx context.Context) (*rpc.Subscription, error) {
|
||||
// Make sure the server is running, fail otherwise
|
||||
server := api.node.Server()
|
||||
if server == nil {
|
||||
return nil, ErrNodeStopped
|
||||
}
|
||||
|
||||
// Create the subscription
|
||||
notifier, supported := rpc.NotifierFromContext(ctx)
|
||||
if !supported {
|
||||
return nil, rpc.ErrNotificationsUnsupported
|
||||
}
|
||||
rpcSub := notifier.CreateSubscription()
|
||||
|
||||
go func() {
|
||||
events := make(chan *p2p.PeerEvent)
|
||||
sub := server.SubscribeEvents(events)
|
||||
defer sub.Unsubscribe()
|
||||
|
||||
for {
|
||||
select {
|
||||
case event := <-events:
|
||||
notifier.Notify(rpcSub.ID, event)
|
||||
case <-sub.Err():
|
||||
return
|
||||
case <-rpcSub.Err():
|
||||
return
|
||||
case <-notifier.Closed():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return rpcSub, nil
|
||||
}
|
||||
|
||||
// StartRPC starts the HTTP RPC API server.
|
||||
func (api *PrivateAdminAPI) StartRPC(host *string, port *int, cors *string, apis *string) (bool, error) {
|
||||
api.node.lock.Lock()
|
||||
|
@ -163,7 +203,7 @@ func (api *PrivateAdminAPI) StartWS(host *string, port *int, allowedOrigins *str
|
|||
}
|
||||
}
|
||||
|
||||
if err := api.node.startWS(fmt.Sprintf("%s:%d", *host, *port), api.node.rpcAPIs, modules, origins); err != nil {
|
||||
if err := api.node.startWS(fmt.Sprintf("%s:%d", *host, *port), api.node.rpcAPIs, modules, origins, api.node.config.WSExposeAll); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
|
|
|
@ -128,6 +128,13 @@ type Config struct {
|
|||
// If the module list is empty, all RPC API endpoints designated public will be
|
||||
// exposed.
|
||||
WSModules []string `toml:",omitempty"`
|
||||
|
||||
// WSExposeAll exposes all API modules via the WebSocket RPC interface rather
|
||||
// than just the public ones.
|
||||
//
|
||||
// *WARNING* Only set this if the node is running in a trusted network, exposing
|
||||
// private APIs to untrusted users is a major security risk.
|
||||
WSExposeAll bool `toml:",omitempty"`
|
||||
}
|
||||
|
||||
// IPCEndpoint resolves an IPC endpoint based on a configured value, taking into
|
||||
|
|
|
@ -261,7 +261,7 @@ func (n *Node) startRPC(services map[reflect.Type]Service) error {
|
|||
n.stopInProc()
|
||||
return err
|
||||
}
|
||||
if err := n.startWS(n.wsEndpoint, apis, n.config.WSModules, n.config.WSOrigins); err != nil {
|
||||
if err := n.startWS(n.wsEndpoint, apis, n.config.WSModules, n.config.WSOrigins, n.config.WSExposeAll); err != nil {
|
||||
n.stopHTTP()
|
||||
n.stopIPC()
|
||||
n.stopInProc()
|
||||
|
@ -412,7 +412,7 @@ func (n *Node) stopHTTP() {
|
|||
}
|
||||
|
||||
// startWS initializes and starts the websocket RPC endpoint.
|
||||
func (n *Node) startWS(endpoint string, apis []rpc.API, modules []string, wsOrigins []string) error {
|
||||
func (n *Node) startWS(endpoint string, apis []rpc.API, modules []string, wsOrigins []string, exposeAll bool) error {
|
||||
// Short circuit if the WS endpoint isn't being exposed
|
||||
if endpoint == "" {
|
||||
return nil
|
||||
|
@ -425,7 +425,7 @@ func (n *Node) startWS(endpoint string, apis []rpc.API, modules []string, wsOrig
|
|||
// Register all the APIs exposed by the services
|
||||
handler := rpc.NewServer()
|
||||
for _, api := range apis {
|
||||
if whitelist[api.Namespace] || (len(whitelist) == 0 && api.Public) {
|
||||
if exposeAll || whitelist[api.Namespace] || (len(whitelist) == 0 && api.Public) {
|
||||
if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -441,7 +441,7 @@ func (n *Node) startWS(endpoint string, apis []rpc.API, modules []string, wsOrig
|
|||
return err
|
||||
}
|
||||
go rpc.NewWSServer(wsOrigins, handler).Serve(listener)
|
||||
log.Info(fmt.Sprintf("WebSocket endpoint opened: ws://%s", endpoint))
|
||||
log.Info(fmt.Sprintf("WebSocket endpoint opened: ws://%s", listener.Addr()))
|
||||
|
||||
// All listeners booted successfully
|
||||
n.wsEndpoint = endpoint
|
||||
|
@ -556,6 +556,17 @@ func (n *Node) Attach() (*rpc.Client, error) {
|
|||
return rpc.DialInProc(n.inprocHandler), nil
|
||||
}
|
||||
|
||||
// RPCHandler returns the in-process RPC request handler.
|
||||
func (n *Node) RPCHandler() (*rpc.Server, error) {
|
||||
n.lock.RLock()
|
||||
defer n.lock.RUnlock()
|
||||
|
||||
if n.inprocHandler == nil {
|
||||
return nil, ErrNodeStopped
|
||||
}
|
||||
return n.inprocHandler, nil
|
||||
}
|
||||
|
||||
// Server retrieves the currently running P2P network layer. This method is meant
|
||||
// only to inspect fields of the currently running server, life cycle management
|
||||
// should be left to this Node entity.
|
||||
|
|
|
@ -47,6 +47,24 @@ const (
|
|||
maxResolveDelay = time.Hour
|
||||
)
|
||||
|
||||
// NodeDialer is used to connect to nodes in the network, typically by using
|
||||
// an underlying net.Dialer but also using net.Pipe in tests
|
||||
type NodeDialer interface {
|
||||
Dial(*discover.Node) (net.Conn, error)
|
||||
}
|
||||
|
||||
// TCPDialer implements the NodeDialer interface by using a net.Dialer to
|
||||
// create TCP connections to nodes in the network
|
||||
type TCPDialer struct {
|
||||
*net.Dialer
|
||||
}
|
||||
|
||||
// Dial creates a TCP connection to the node
|
||||
func (t TCPDialer) Dial(dest *discover.Node) (net.Conn, error) {
|
||||
addr := &net.TCPAddr{IP: dest.IP, Port: int(dest.TCP)}
|
||||
return t.Dialer.Dial("tcp", addr.String())
|
||||
}
|
||||
|
||||
// dialstate schedules dials and discovery lookups.
|
||||
// it get's a chance to compute new tasks on every iteration
|
||||
// of the main loop in Server.run.
|
||||
|
@ -318,14 +336,13 @@ func (t *dialTask) resolve(srv *Server) bool {
|
|||
|
||||
// dial performs the actual connection attempt.
|
||||
func (t *dialTask) dial(srv *Server, dest *discover.Node) bool {
|
||||
addr := &net.TCPAddr{IP: dest.IP, Port: int(dest.TCP)}
|
||||
fd, err := srv.Dialer.Dial("tcp", addr.String())
|
||||
fd, err := srv.Dialer.Dial(dest)
|
||||
if err != nil {
|
||||
log.Trace("Dial error", "task", t, "err", err)
|
||||
return false
|
||||
}
|
||||
mfd := newMeteredConn(fd, false)
|
||||
srv.setupConn(mfd, t.flags, dest)
|
||||
srv.SetupConn(mfd, t.flags, dest)
|
||||
return true
|
||||
}
|
||||
|
||||
|
|
|
@ -225,6 +225,11 @@ func (n *Node) UnmarshalText(text []byte) error {
|
|||
// The node identifier is a marshaled elliptic curve public key.
|
||||
type NodeID [NodeIDBits / 8]byte
|
||||
|
||||
// Bytes returns a byte slice representation of the NodeID
|
||||
func (n NodeID) Bytes() []byte {
|
||||
return n[:]
|
||||
}
|
||||
|
||||
// NodeID prints as a long hexadecimal number.
|
||||
func (n NodeID) String() string {
|
||||
return fmt.Sprintf("%x", n[:])
|
||||
|
@ -240,6 +245,41 @@ func (n NodeID) TerminalString() string {
|
|||
return hex.EncodeToString(n[:8])
|
||||
}
|
||||
|
||||
// MarshalText implements the encoding.TextMarshaler interface.
|
||||
func (n NodeID) MarshalText() ([]byte, error) {
|
||||
return []byte(hex.EncodeToString(n[:])), nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements the encoding.TextUnmarshaler interface.
|
||||
func (n *NodeID) UnmarshalText(text []byte) error {
|
||||
id, err := HexID(string(text))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*n = id
|
||||
return nil
|
||||
}
|
||||
|
||||
// BytesID converts a byte slice to a NodeID
|
||||
func BytesID(b []byte) (NodeID, error) {
|
||||
var id NodeID
|
||||
if len(b) != len(id) {
|
||||
return id, fmt.Errorf("wrong length, want %d bytes", len(id))
|
||||
}
|
||||
copy(id[:], b)
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// MustBytesID converts a byte slice to a NodeID.
|
||||
// It panics if the byte slice is not a valid NodeID.
|
||||
func MustBytesID(b []byte) NodeID {
|
||||
id, err := BytesID(b)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// HexID converts a hex string to a NodeID.
|
||||
// The string may be prefixed with 0x.
|
||||
func HexID(in string) (NodeID, error) {
|
||||
|
|
|
@ -27,6 +27,8 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
|
@ -271,3 +273,67 @@ func ExpectMsg(r MsgReader, code uint64, content interface{}) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// msgEventer wraps a MsgReadWriter and sends events whenever a message is sent
|
||||
// or received
|
||||
type msgEventer struct {
|
||||
MsgReadWriter
|
||||
|
||||
feed *event.Feed
|
||||
peerID discover.NodeID
|
||||
Protocol string
|
||||
}
|
||||
|
||||
// newMsgEventer returns a msgEventer which sends message events to the given
|
||||
// feed
|
||||
func newMsgEventer(rw MsgReadWriter, feed *event.Feed, peerID discover.NodeID, proto string) *msgEventer {
|
||||
return &msgEventer{
|
||||
MsgReadWriter: rw,
|
||||
feed: feed,
|
||||
peerID: peerID,
|
||||
Protocol: proto,
|
||||
}
|
||||
}
|
||||
|
||||
// ReadMsg reads a message from the underlying MsgReadWriter and emits a
|
||||
// "message received" event
|
||||
func (self *msgEventer) ReadMsg() (Msg, error) {
|
||||
msg, err := self.MsgReadWriter.ReadMsg()
|
||||
if err != nil {
|
||||
return msg, err
|
||||
}
|
||||
self.feed.Send(&PeerEvent{
|
||||
Type: PeerEventTypeMsgRecv,
|
||||
Peer: self.peerID,
|
||||
Protocol: self.Protocol,
|
||||
MsgCode: &msg.Code,
|
||||
MsgSize: &msg.Size,
|
||||
})
|
||||
return msg, nil
|
||||
}
|
||||
|
||||
// WriteMsg writes a message to the underlying MsgReadWriter and emits a
|
||||
// "message sent" event
|
||||
func (self *msgEventer) WriteMsg(msg Msg) error {
|
||||
err := self.MsgReadWriter.WriteMsg(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
self.feed.Send(&PeerEvent{
|
||||
Type: PeerEventTypeMsgSend,
|
||||
Peer: self.peerID,
|
||||
Protocol: self.Protocol,
|
||||
MsgCode: &msg.Code,
|
||||
MsgSize: &msg.Size,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the underlying MsgReadWriter if it implements the io.Closer
|
||||
// interface
|
||||
func (self *msgEventer) Close() error {
|
||||
if v, ok := self.MsgReadWriter.(io.Closer); ok {
|
||||
return v.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -62,6 +62,7 @@ func (n *upnp) AddMapping(protocol string, extport, intport int, desc string, li
|
|||
}
|
||||
protocol = strings.ToUpper(protocol)
|
||||
lifetimeS := uint32(lifetime / time.Second)
|
||||
n.DeleteMapping(protocol, extport, intport)
|
||||
return n.client.AddPortMapping("", uint16(extport), protocol, uint16(intport), ip.String(), true, desc, lifetimeS)
|
||||
}
|
||||
|
||||
|
|
|
@ -25,16 +25,19 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/mclock"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
const (
|
||||
baseProtocolVersion = 4
|
||||
baseProtocolVersion = 5
|
||||
baseProtocolLength = uint64(16)
|
||||
baseProtocolMaxMsgSize = 2 * 1024
|
||||
|
||||
snappyProtocolVersion = 5
|
||||
|
||||
pingInterval = 15 * time.Second
|
||||
)
|
||||
|
||||
|
@ -60,6 +63,38 @@ type protoHandshake struct {
|
|||
Rest []rlp.RawValue `rlp:"tail"`
|
||||
}
|
||||
|
||||
// PeerEventType is the type of peer events emitted by a p2p.Server
|
||||
type PeerEventType string
|
||||
|
||||
const (
|
||||
// PeerEventTypeAdd is the type of event emitted when a peer is added
|
||||
// to a p2p.Server
|
||||
PeerEventTypeAdd PeerEventType = "add"
|
||||
|
||||
// PeerEventTypeDrop is the type of event emitted when a peer is
|
||||
// dropped from a p2p.Server
|
||||
PeerEventTypeDrop PeerEventType = "drop"
|
||||
|
||||
// PeerEventTypeMsgSend is the type of event emitted when a
|
||||
// message is successfully sent to a peer
|
||||
PeerEventTypeMsgSend PeerEventType = "msgsend"
|
||||
|
||||
// PeerEventTypeMsgRecv is the type of event emitted when a
|
||||
// message is received from a peer
|
||||
PeerEventTypeMsgRecv PeerEventType = "msgrecv"
|
||||
)
|
||||
|
||||
// PeerEvent is an event emitted when peers are either added or dropped from
|
||||
// a p2p.Server or when a message is sent or received on a peer connection
|
||||
type PeerEvent struct {
|
||||
Type PeerEventType `json:"type"`
|
||||
Peer discover.NodeID `json:"peer"`
|
||||
Error string `json:"error,omitempty"`
|
||||
Protocol string `json:"protocol,omitempty"`
|
||||
MsgCode *uint64 `json:"msg_code,omitempty"`
|
||||
MsgSize *uint32 `json:"msg_size,omitempty"`
|
||||
}
|
||||
|
||||
// Peer represents a connected remote node.
|
||||
type Peer struct {
|
||||
rw *conn
|
||||
|
@ -71,6 +106,9 @@ type Peer struct {
|
|||
protoErr chan error
|
||||
closed chan struct{}
|
||||
disc chan DiscReason
|
||||
|
||||
// events receives message send / receive events if set
|
||||
events *event.Feed
|
||||
}
|
||||
|
||||
// NewPeer returns a peer for testing purposes.
|
||||
|
@ -297,9 +335,13 @@ func (p *Peer) startProtocols(writeStart <-chan struct{}, writeErr chan<- error)
|
|||
proto.closed = p.closed
|
||||
proto.wstart = writeStart
|
||||
proto.werr = writeErr
|
||||
var rw MsgReadWriter = proto
|
||||
if p.events != nil {
|
||||
rw = newMsgEventer(rw, p.events, p.ID(), proto.Name)
|
||||
}
|
||||
p.log.Trace(fmt.Sprintf("Starting protocol %s/%d", proto.Name, proto.Version))
|
||||
go func() {
|
||||
err := proto.Run(p, proto)
|
||||
err := proto.Run(p, rw)
|
||||
if err == nil {
|
||||
p.log.Trace(fmt.Sprintf("Protocol %s/%d returned", proto.Name, proto.Version))
|
||||
err = errProtocolReturned
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
mrand "math/rand"
|
||||
"net"
|
||||
"sync"
|
||||
|
@ -40,6 +41,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/golang/snappy"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -68,6 +70,10 @@ const (
|
|||
discWriteTimeout = 1 * time.Second
|
||||
)
|
||||
|
||||
// errPlainMessageTooLarge is returned if a decompressed message length exceeds
|
||||
// the allowed 24 bits (i.e. length >= 16MB).
|
||||
var errPlainMessageTooLarge = errors.New("message length >= 16MB")
|
||||
|
||||
// rlpx is the transport protocol used by actual (non-test) connections.
|
||||
// It wraps the frame encoder with locks and read/write deadlines.
|
||||
type rlpx struct {
|
||||
|
@ -127,6 +133,9 @@ func (t *rlpx) doProtoHandshake(our *protoHandshake) (their *protoHandshake, err
|
|||
if err := <-werr; err != nil {
|
||||
return nil, fmt.Errorf("write error: %v", err)
|
||||
}
|
||||
// If the protocol version supports Snappy encoding, upgrade immediately
|
||||
t.rw.snappy = their.Version >= snappyProtocolVersion
|
||||
|
||||
return their, nil
|
||||
}
|
||||
|
||||
|
@ -556,6 +565,8 @@ type rlpxFrameRW struct {
|
|||
macCipher cipher.Block
|
||||
egressMAC hash.Hash
|
||||
ingressMAC hash.Hash
|
||||
|
||||
snappy bool
|
||||
}
|
||||
|
||||
func newRLPXFrameRW(conn io.ReadWriter, s secrets) *rlpxFrameRW {
|
||||
|
@ -583,6 +594,17 @@ func newRLPXFrameRW(conn io.ReadWriter, s secrets) *rlpxFrameRW {
|
|||
func (rw *rlpxFrameRW) WriteMsg(msg Msg) error {
|
||||
ptype, _ := rlp.EncodeToBytes(msg.Code)
|
||||
|
||||
// if snappy is enabled, compress message now
|
||||
if rw.snappy {
|
||||
if msg.Size > maxUint24 {
|
||||
return errPlainMessageTooLarge
|
||||
}
|
||||
payload, _ := ioutil.ReadAll(msg.Payload)
|
||||
payload = snappy.Encode(nil, payload)
|
||||
|
||||
msg.Payload = bytes.NewReader(payload)
|
||||
msg.Size = uint32(len(payload))
|
||||
}
|
||||
// write header
|
||||
headbuf := make([]byte, 32)
|
||||
fsize := uint32(len(ptype)) + msg.Size
|
||||
|
@ -668,6 +690,26 @@ func (rw *rlpxFrameRW) ReadMsg() (msg Msg, err error) {
|
|||
}
|
||||
msg.Size = uint32(content.Len())
|
||||
msg.Payload = content
|
||||
|
||||
// if snappy is enabled, verify and decompress message
|
||||
if rw.snappy {
|
||||
payload, err := ioutil.ReadAll(msg.Payload)
|
||||
if err != nil {
|
||||
return msg, err
|
||||
}
|
||||
size, err := snappy.DecodedLen(payload)
|
||||
if err != nil {
|
||||
return msg, err
|
||||
}
|
||||
if size > int(maxUint24) {
|
||||
return msg, errPlainMessageTooLarge
|
||||
}
|
||||
payload, err = snappy.Decode(nil, payload)
|
||||
if err != nil {
|
||||
return msg, err
|
||||
}
|
||||
msg.Size, msg.Payload = uint32(size), bytes.NewReader(payload)
|
||||
}
|
||||
return msg, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/mclock"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/discv5"
|
||||
|
@ -130,10 +131,14 @@ type Config struct {
|
|||
|
||||
// If Dialer is set to a non-nil value, the given Dialer
|
||||
// is used to dial outbound peer connections.
|
||||
Dialer *net.Dialer `toml:"-"`
|
||||
Dialer NodeDialer `toml:"-"`
|
||||
|
||||
// If NoDial is true, the server will not dial any peers.
|
||||
NoDial bool `toml:",omitempty"`
|
||||
|
||||
// If EnableMsgEvents is set then the server will emit PeerEvents
|
||||
// whenever a message is sent to or received from a peer
|
||||
EnableMsgEvents bool
|
||||
}
|
||||
|
||||
// Server manages all peer connections.
|
||||
|
@ -166,6 +171,7 @@ type Server struct {
|
|||
addpeer chan *conn
|
||||
delpeer chan peerDrop
|
||||
loopWG sync.WaitGroup // loop, listenLoop
|
||||
peerFeed event.Feed
|
||||
}
|
||||
|
||||
type peerOpFunc func(map[discover.NodeID]*Peer)
|
||||
|
@ -191,7 +197,7 @@ type conn struct {
|
|||
fd net.Conn
|
||||
transport
|
||||
flags connFlag
|
||||
cont chan error // The run loop uses cont to signal errors to setupConn.
|
||||
cont chan error // The run loop uses cont to signal errors to SetupConn.
|
||||
id discover.NodeID // valid after the encryption handshake
|
||||
caps []Cap // valid after the protocol handshake
|
||||
name string // valid after the protocol handshake
|
||||
|
@ -291,6 +297,11 @@ func (srv *Server) RemovePeer(node *discover.Node) {
|
|||
}
|
||||
}
|
||||
|
||||
// SubscribePeers subscribes the given channel to peer events
|
||||
func (srv *Server) SubscribeEvents(ch chan *PeerEvent) event.Subscription {
|
||||
return srv.peerFeed.Subscribe(ch)
|
||||
}
|
||||
|
||||
// Self returns the local node's endpoint information.
|
||||
func (srv *Server) Self() *discover.Node {
|
||||
srv.lock.Lock()
|
||||
|
@ -358,7 +369,7 @@ func (srv *Server) Start() (err error) {
|
|||
srv.newTransport = newRLPX
|
||||
}
|
||||
if srv.Dialer == nil {
|
||||
srv.Dialer = &net.Dialer{Timeout: defaultDialTimeout}
|
||||
srv.Dialer = TCPDialer{&net.Dialer{Timeout: defaultDialTimeout}}
|
||||
}
|
||||
srv.quit = make(chan struct{})
|
||||
srv.addpeer = make(chan *conn)
|
||||
|
@ -536,7 +547,11 @@ running:
|
|||
c.flags |= trustedConn
|
||||
}
|
||||
// TODO: track in-progress inbound node IDs (pre-Peer) to avoid dialing them.
|
||||
c.cont <- srv.encHandshakeChecks(peers, c)
|
||||
select {
|
||||
case c.cont <- srv.encHandshakeChecks(peers, c):
|
||||
case <-srv.quit:
|
||||
break running
|
||||
}
|
||||
case c := <-srv.addpeer:
|
||||
// At this point the connection is past the protocol handshake.
|
||||
// Its capabilities are known and the remote identity is verified.
|
||||
|
@ -544,6 +559,11 @@ running:
|
|||
if err == nil {
|
||||
// The handshakes are done and it passed all checks.
|
||||
p := newPeer(c, srv.Protocols)
|
||||
// If message events are enabled, pass the peerFeed
|
||||
// to the peer
|
||||
if srv.EnableMsgEvents {
|
||||
p.events = &srv.peerFeed
|
||||
}
|
||||
name := truncateName(c.name)
|
||||
log.Debug("Adding p2p peer", "id", c.id, "name", name, "addr", c.fd.RemoteAddr(), "peers", len(peers)+1)
|
||||
peers[c.id] = p
|
||||
|
@ -552,7 +572,11 @@ running:
|
|||
// The dialer logic relies on the assumption that
|
||||
// dial tasks complete after the peer has been added or
|
||||
// discarded. Unblock the task last.
|
||||
c.cont <- err
|
||||
select {
|
||||
case c.cont <- err:
|
||||
case <-srv.quit:
|
||||
break running
|
||||
}
|
||||
case pd := <-srv.delpeer:
|
||||
// A peer disconnected.
|
||||
d := common.PrettyDuration(mclock.Now() - pd.created)
|
||||
|
@ -665,16 +689,16 @@ func (srv *Server) listenLoop() {
|
|||
// Spawn the handler. It will give the slot back when the connection
|
||||
// has been established.
|
||||
go func() {
|
||||
srv.setupConn(fd, inboundConn, nil)
|
||||
srv.SetupConn(fd, inboundConn, nil)
|
||||
slots <- struct{}{}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// setupConn runs the handshakes and attempts to add the connection
|
||||
// SetupConn runs the handshakes and attempts to add the connection
|
||||
// as a peer. It returns when the connection has been added as a peer
|
||||
// or the handshakes have failed.
|
||||
func (srv *Server) setupConn(fd net.Conn, flags connFlag, dialDest *discover.Node) {
|
||||
func (srv *Server) SetupConn(fd net.Conn, flags connFlag, dialDest *discover.Node) {
|
||||
// Prevent leftover pending conns from entering the handshake.
|
||||
srv.lock.Lock()
|
||||
running := srv.running
|
||||
|
@ -755,7 +779,23 @@ func (srv *Server) runPeer(p *Peer) {
|
|||
if srv.newPeerHook != nil {
|
||||
srv.newPeerHook(p)
|
||||
}
|
||||
|
||||
// broadcast peer add
|
||||
srv.peerFeed.Send(&PeerEvent{
|
||||
Type: PeerEventTypeAdd,
|
||||
Peer: p.ID(),
|
||||
})
|
||||
|
||||
// run the protocol
|
||||
remoteRequested, err := p.run()
|
||||
|
||||
// broadcast peer drop
|
||||
srv.peerFeed.Send(&PeerEvent{
|
||||
Type: PeerEventTypeDrop,
|
||||
Peer: p.ID(),
|
||||
Error: err.Error(),
|
||||
})
|
||||
|
||||
// Note: run waits for existing peers to be sent on srv.delpeer
|
||||
// before returning, so this send should not select on srv.quit.
|
||||
srv.delpeer <- peerDrop{p, err, remoteRequested}
|
||||
|
|
|
@ -0,0 +1,181 @@
|
|||
# devp2p Simulations
|
||||
|
||||
The `p2p/simulations` package implements a simulation framework which supports
|
||||
creating a collection of devp2p nodes, connecting them together to form a
|
||||
simulation network, performing simulation actions in that network and then
|
||||
extracting useful information.
|
||||
|
||||
## Nodes
|
||||
|
||||
Each node in a simulation network runs multiple services by wrapping a collection
|
||||
of objects which implement the `node.Service` interface meaning they:
|
||||
|
||||
* can be started and stopped
|
||||
* run p2p protocols
|
||||
* expose RPC APIs
|
||||
|
||||
This means that any object which implements the `node.Service` interface can be
|
||||
used to run a node in the simulation.
|
||||
|
||||
## Services
|
||||
|
||||
Before running a simulation, a set of service initializers must be registered
|
||||
which can then be used to run nodes in the network.
|
||||
|
||||
A service initializer is a function with the following signature:
|
||||
|
||||
```go
|
||||
func(ctx *adapters.ServiceContext) (node.Service, error)
|
||||
```
|
||||
|
||||
These initializers should be registered by calling the `adapters.RegisterServices`
|
||||
function in an `init()` hook:
|
||||
|
||||
```go
|
||||
func init() {
|
||||
adapters.RegisterServices(adapters.Services{
|
||||
"service1": initService1,
|
||||
"service2": initService2,
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
## Node Adapters
|
||||
|
||||
The simulation framework includes multiple "node adapters" which are
|
||||
responsible for creating an environment in which a node runs.
|
||||
|
||||
### SimAdapter
|
||||
|
||||
The `SimAdapter` runs nodes in-memory, connecting them using an in-memory,
|
||||
synchronous `net.Pipe` and connecting to their RPC server using an in-memory
|
||||
`rpc.Client`.
|
||||
|
||||
### ExecAdapter
|
||||
|
||||
The `ExecAdapter` runs nodes as child processes of the running simulation.
|
||||
|
||||
It does this by executing the binary which is running the simulation but
|
||||
setting `argv[0]` (i.e. the program name) to `p2p-node` which is then
|
||||
detected by an init hook in the child process which runs the `node.Service`
|
||||
using the devp2p node stack rather than executing `main()`.
|
||||
|
||||
The nodes listen for devp2p connections and WebSocket RPC clients on random
|
||||
localhost ports.
|
||||
|
||||
### DockerAdapter
|
||||
|
||||
The `DockerAdapter` is similar to the `ExecAdapter` but executes `docker run`
|
||||
to run the node in a Docker container using a Docker image containing the
|
||||
simulation binary at `/bin/p2p-node`.
|
||||
|
||||
The Docker image is built using `docker build` when the adapter is initialised,
|
||||
meaning no prior setup is necessary other than having a working Docker client.
|
||||
|
||||
Each node listens on the external IP of the container and the default p2p and
|
||||
RPC ports (`30303` and `8546` respectively).
|
||||
|
||||
## Network
|
||||
|
||||
A simulation network is created with an ID and default service (which is used
|
||||
if a node is created without an explicit service), exposes methods for
|
||||
creating, starting, stopping, connecting and disconnecting nodes, and emits
|
||||
events when certain actions occur.
|
||||
|
||||
### Events
|
||||
|
||||
A simulation network emits the following events:
|
||||
|
||||
* node event - when nodes are created / started / stopped
|
||||
* connection event - when nodes are connected / disconnected
|
||||
* message event - when a protocol message is sent between two nodes
|
||||
|
||||
The events have a "control" flag which when set indicates that the event is the
|
||||
outcome of a controlled simulation action (e.g. creating a node or explicitly
|
||||
connecting two nodes together).
|
||||
|
||||
This is in contrast to a non-control event, otherwise called a "live" event,
|
||||
which is the outcome of something happening in the network as a result of a
|
||||
control event (e.g. a node actually started up or a connection was actually
|
||||
established between two nodes).
|
||||
|
||||
Live events are detected by the simulation network by subscribing to node peer
|
||||
events via RPC when the nodes start up.
|
||||
|
||||
## Testing Framework
|
||||
|
||||
The `Simulation` type can be used in tests to perform actions in a simulation
|
||||
network and then wait for expectations to be met.
|
||||
|
||||
With a running simulation network, the `Simulation.Run` method can be called
|
||||
with a `Step` which has the following fields:
|
||||
|
||||
* `Action` - a function which performs some action in the network
|
||||
|
||||
* `Expect` - an expectation function which returns whether or not a
|
||||
given node meets the expectation
|
||||
|
||||
* `Trigger` - a channel which receives node IDs which then trigger a check
|
||||
of the expectation function to be performed against that node
|
||||
|
||||
As a concrete example, consider a simulated network of Ethereum nodes. An
|
||||
`Action` could be the sending of a transaction, `Expect` it being included in
|
||||
a block, and `Trigger` a check for every block that is mined.
|
||||
|
||||
On return, the `Simulation.Run` method returns a `StepResult` which can be used
|
||||
to determine if all nodes met the expectation, how long it took them to meet
|
||||
the expectation and what network events were emitted during the step run.
|
||||
|
||||
## HTTP API
|
||||
|
||||
The simulation framework includes a HTTP API which can be used to control the
|
||||
simulation.
|
||||
|
||||
The API is initialised with a particular node adapter and has the following
|
||||
endpoints:
|
||||
|
||||
```
|
||||
GET / Get network information
|
||||
POST /start Start all nodes in the network
|
||||
POST /stop Stop all nodes in the network
|
||||
GET /events Stream network events
|
||||
GET /snapshot Take a network snapshot
|
||||
POST /snapshot Load a network snapshot
|
||||
POST /nodes Create a node
|
||||
GET /nodes Get all nodes in the network
|
||||
GET /nodes/:nodeid Get node information
|
||||
POST /nodes/:nodeid/start Start a node
|
||||
POST /nodes/:nodeid/stop Stop a node
|
||||
POST /nodes/:nodeid/conn/:peerid Connect two nodes
|
||||
DELETE /nodes/:nodeid/conn/:peerid Disconnect two nodes
|
||||
GET /nodes/:nodeid/rpc Make RPC requests to a node via WebSocket
|
||||
```
|
||||
|
||||
For convenience, `nodeid` in the URL can be the name of a node rather than its
|
||||
ID.
|
||||
|
||||
## Command line client
|
||||
|
||||
`p2psim` is a command line client for the HTTP API, located in
|
||||
`cmd/p2psim`.
|
||||
|
||||
It provides the following commands:
|
||||
|
||||
```
|
||||
p2psim show
|
||||
p2psim events [--current] [--filter=FILTER]
|
||||
p2psim snapshot
|
||||
p2psim load
|
||||
p2psim node create [--name=NAME] [--services=SERVICES] [--key=KEY]
|
||||
p2psim node list
|
||||
p2psim node show <node>
|
||||
p2psim node start <node>
|
||||
p2psim node stop <node>
|
||||
p2psim node connect <node> <peer>
|
||||
p2psim node disconnect <node> <peer>
|
||||
p2psim node rpc <node> <method> [<args>] [--subscribe]
|
||||
```
|
||||
|
||||
## Example
|
||||
|
||||
See [p2p/simulations/examples/README.md](examples/README.md).
|
182
vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/docker.go
generated
vendored
Normal file
182
vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/docker.go
generated
vendored
Normal file
|
@ -0,0 +1,182 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package adapters
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
)
|
||||
|
||||
// DockerAdapter is a NodeAdapter which runs simulation nodes inside Docker
|
||||
// containers.
|
||||
//
|
||||
// A Docker image is built which contains the current binary at /bin/p2p-node
|
||||
// which when executed runs the underlying service (see the description
|
||||
// of the execP2PNode function for more details)
|
||||
type DockerAdapter struct {
|
||||
ExecAdapter
|
||||
}
|
||||
|
||||
// NewDockerAdapter builds the p2p-node Docker image containing the current
|
||||
// binary and returns a DockerAdapter
|
||||
func NewDockerAdapter() (*DockerAdapter, error) {
|
||||
// Since Docker containers run on Linux and this adapter runs the
|
||||
// current binary in the container, it must be compiled for Linux.
|
||||
//
|
||||
// It is reasonable to require this because the caller can just
|
||||
// compile the current binary in a Docker container.
|
||||
if runtime.GOOS != "linux" {
|
||||
return nil, errors.New("DockerAdapter can only be used on Linux as it uses the current binary (which must be a Linux binary)")
|
||||
}
|
||||
|
||||
if err := buildDockerImage(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &DockerAdapter{
|
||||
ExecAdapter{
|
||||
nodes: make(map[discover.NodeID]*ExecNode),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Name returns the name of the adapter for logging purposes
|
||||
func (d *DockerAdapter) Name() string {
|
||||
return "docker-adapter"
|
||||
}
|
||||
|
||||
// NewNode returns a new DockerNode using the given config
|
||||
func (d *DockerAdapter) NewNode(config *NodeConfig) (Node, error) {
|
||||
if len(config.Services) == 0 {
|
||||
return nil, errors.New("node must have at least one service")
|
||||
}
|
||||
for _, service := range config.Services {
|
||||
if _, exists := serviceFuncs[service]; !exists {
|
||||
return nil, fmt.Errorf("unknown node service %q", service)
|
||||
}
|
||||
}
|
||||
|
||||
// generate the config
|
||||
conf := &execNodeConfig{
|
||||
Stack: node.DefaultConfig,
|
||||
Node: config,
|
||||
}
|
||||
conf.Stack.DataDir = "/data"
|
||||
conf.Stack.WSHost = "0.0.0.0"
|
||||
conf.Stack.WSOrigins = []string{"*"}
|
||||
conf.Stack.WSExposeAll = true
|
||||
conf.Stack.P2P.EnableMsgEvents = false
|
||||
conf.Stack.P2P.NoDiscovery = true
|
||||
conf.Stack.P2P.NAT = nil
|
||||
conf.Stack.NoUSB = true
|
||||
|
||||
node := &DockerNode{
|
||||
ExecNode: ExecNode{
|
||||
ID: config.ID,
|
||||
Config: conf,
|
||||
adapter: &d.ExecAdapter,
|
||||
},
|
||||
}
|
||||
node.newCmd = node.dockerCommand
|
||||
d.ExecAdapter.nodes[node.ID] = &node.ExecNode
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// DockerNode wraps an ExecNode but exec's the current binary in a docker
|
||||
// container rather than locally
|
||||
type DockerNode struct {
|
||||
ExecNode
|
||||
}
|
||||
|
||||
// dockerCommand returns a command which exec's the binary in a Docker
|
||||
// container.
|
||||
//
|
||||
// It uses a shell so that we can pass the _P2P_NODE_CONFIG environment
|
||||
// variable to the container using the --env flag.
|
||||
func (n *DockerNode) dockerCommand() *exec.Cmd {
|
||||
return exec.Command(
|
||||
"sh", "-c",
|
||||
fmt.Sprintf(
|
||||
`exec docker run --interactive --env _P2P_NODE_CONFIG="${_P2P_NODE_CONFIG}" %s p2p-node %s %s`,
|
||||
dockerImage, strings.Join(n.Config.Node.Services, ","), n.ID.String(),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
// dockerImage is the name of the Docker image which gets built to run the
|
||||
// simulation node
|
||||
const dockerImage = "p2p-node"
|
||||
|
||||
// buildDockerImage builds the Docker image which is used to run the simulation
|
||||
// node in a Docker container.
|
||||
//
|
||||
// It adds the current binary as "p2p-node" so that it runs execP2PNode
|
||||
// when executed.
|
||||
func buildDockerImage() error {
|
||||
// create a directory to use as the build context
|
||||
dir, err := ioutil.TempDir("", "p2p-docker")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
// copy the current binary into the build context
|
||||
bin, err := os.Open(reexec.Self())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer bin.Close()
|
||||
dst, err := os.OpenFile(filepath.Join(dir, "self.bin"), os.O_WRONLY|os.O_CREATE, 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dst.Close()
|
||||
if _, err := io.Copy(dst, bin); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// create the Dockerfile
|
||||
dockerfile := []byte(`
|
||||
FROM ubuntu:16.04
|
||||
RUN mkdir /data
|
||||
ADD self.bin /bin/p2p-node
|
||||
`)
|
||||
if err := ioutil.WriteFile(filepath.Join(dir, "Dockerfile"), dockerfile, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// run 'docker build'
|
||||
cmd := exec.Command("docker", "build", "-t", dockerImage, dir)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("error building docker image: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
504
vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/exec.go
generated
vendored
Normal file
504
vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/exec.go
generated
vendored
Normal file
|
@ -0,0 +1,504 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package adapters
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
// ExecAdapter is a NodeAdapter which runs simulation nodes by executing the
|
||||
// current binary as a child process.
|
||||
//
|
||||
// An init hook is used so that the child process executes the node services
|
||||
// (rather than whataver the main() function would normally do), see the
|
||||
// execP2PNode function for more information.
|
||||
type ExecAdapter struct {
|
||||
// BaseDir is the directory under which the data directories for each
|
||||
// simulation node are created.
|
||||
BaseDir string
|
||||
|
||||
nodes map[discover.NodeID]*ExecNode
|
||||
}
|
||||
|
||||
// NewExecAdapter returns an ExecAdapter which stores node data in
|
||||
// subdirectories of the given base directory
|
||||
func NewExecAdapter(baseDir string) *ExecAdapter {
|
||||
return &ExecAdapter{
|
||||
BaseDir: baseDir,
|
||||
nodes: make(map[discover.NodeID]*ExecNode),
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the adapter for logging purposes
|
||||
func (e *ExecAdapter) Name() string {
|
||||
return "exec-adapter"
|
||||
}
|
||||
|
||||
// NewNode returns a new ExecNode using the given config
|
||||
func (e *ExecAdapter) NewNode(config *NodeConfig) (Node, error) {
|
||||
if len(config.Services) == 0 {
|
||||
return nil, errors.New("node must have at least one service")
|
||||
}
|
||||
for _, service := range config.Services {
|
||||
if _, exists := serviceFuncs[service]; !exists {
|
||||
return nil, fmt.Errorf("unknown node service %q", service)
|
||||
}
|
||||
}
|
||||
|
||||
// create the node directory using the first 12 characters of the ID
|
||||
// as Unix socket paths cannot be longer than 256 characters
|
||||
dir := filepath.Join(e.BaseDir, config.ID.String()[:12])
|
||||
if err := os.Mkdir(dir, 0755); err != nil {
|
||||
return nil, fmt.Errorf("error creating node directory: %s", err)
|
||||
}
|
||||
|
||||
// generate the config
|
||||
conf := &execNodeConfig{
|
||||
Stack: node.DefaultConfig,
|
||||
Node: config,
|
||||
}
|
||||
conf.Stack.DataDir = filepath.Join(dir, "data")
|
||||
conf.Stack.WSHost = "127.0.0.1"
|
||||
conf.Stack.WSPort = 0
|
||||
conf.Stack.WSOrigins = []string{"*"}
|
||||
conf.Stack.WSExposeAll = true
|
||||
conf.Stack.P2P.EnableMsgEvents = false
|
||||
conf.Stack.P2P.NoDiscovery = true
|
||||
conf.Stack.P2P.NAT = nil
|
||||
conf.Stack.NoUSB = true
|
||||
|
||||
// listen on a random localhost port (we'll get the actual port after
|
||||
// starting the node through the RPC admin.nodeInfo method)
|
||||
conf.Stack.P2P.ListenAddr = "127.0.0.1:0"
|
||||
|
||||
node := &ExecNode{
|
||||
ID: config.ID,
|
||||
Dir: dir,
|
||||
Config: conf,
|
||||
adapter: e,
|
||||
}
|
||||
node.newCmd = node.execCommand
|
||||
e.nodes[node.ID] = node
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// ExecNode starts a simulation node by exec'ing the current binary and
|
||||
// running the configured services
|
||||
type ExecNode struct {
|
||||
ID discover.NodeID
|
||||
Dir string
|
||||
Config *execNodeConfig
|
||||
Cmd *exec.Cmd
|
||||
Info *p2p.NodeInfo
|
||||
|
||||
adapter *ExecAdapter
|
||||
client *rpc.Client
|
||||
wsAddr string
|
||||
newCmd func() *exec.Cmd
|
||||
key *ecdsa.PrivateKey
|
||||
}
|
||||
|
||||
// Addr returns the node's enode URL
|
||||
func (n *ExecNode) Addr() []byte {
|
||||
if n.Info == nil {
|
||||
return nil
|
||||
}
|
||||
return []byte(n.Info.Enode)
|
||||
}
|
||||
|
||||
// Client returns an rpc.Client which can be used to communicate with the
|
||||
// underlying services (it is set once the node has started)
|
||||
func (n *ExecNode) Client() (*rpc.Client, error) {
|
||||
return n.client, nil
|
||||
}
|
||||
|
||||
// wsAddrPattern is a regex used to read the WebSocket address from the node's
|
||||
// log
|
||||
var wsAddrPattern = regexp.MustCompile(`ws://[\d.:]+`)
|
||||
|
||||
// Start exec's the node passing the ID and service as command line arguments
|
||||
// and the node config encoded as JSON in the _P2P_NODE_CONFIG environment
|
||||
// variable
|
||||
func (n *ExecNode) Start(snapshots map[string][]byte) (err error) {
|
||||
if n.Cmd != nil {
|
||||
return errors.New("already started")
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
log.Error("node failed to start", "err", err)
|
||||
n.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
// encode a copy of the config containing the snapshot
|
||||
confCopy := *n.Config
|
||||
confCopy.Snapshots = snapshots
|
||||
confCopy.PeerAddrs = make(map[string]string)
|
||||
for id, node := range n.adapter.nodes {
|
||||
confCopy.PeerAddrs[id.String()] = node.wsAddr
|
||||
}
|
||||
confData, err := json.Marshal(confCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error generating node config: %s", err)
|
||||
}
|
||||
|
||||
// use a pipe for stderr so we can both copy the node's stderr to
|
||||
// os.Stderr and read the WebSocket address from the logs
|
||||
stderrR, stderrW := io.Pipe()
|
||||
stderr := io.MultiWriter(os.Stderr, stderrW)
|
||||
|
||||
// start the node
|
||||
cmd := n.newCmd()
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = stderr
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("_P2P_NODE_CONFIG=%s", confData))
|
||||
if err := cmd.Start(); err != nil {
|
||||
return fmt.Errorf("error starting node: %s", err)
|
||||
}
|
||||
n.Cmd = cmd
|
||||
|
||||
// read the WebSocket address from the stderr logs
|
||||
var wsAddr string
|
||||
wsAddrC := make(chan string)
|
||||
go func() {
|
||||
s := bufio.NewScanner(stderrR)
|
||||
for s.Scan() {
|
||||
if strings.Contains(s.Text(), "WebSocket endpoint opened:") {
|
||||
wsAddrC <- wsAddrPattern.FindString(s.Text())
|
||||
}
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case wsAddr = <-wsAddrC:
|
||||
if wsAddr == "" {
|
||||
return errors.New("failed to read WebSocket address from stderr")
|
||||
}
|
||||
case <-time.After(10 * time.Second):
|
||||
return errors.New("timed out waiting for WebSocket address on stderr")
|
||||
}
|
||||
|
||||
// create the RPC client and load the node info
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
client, err := rpc.DialWebsocket(ctx, wsAddr, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("error dialing rpc websocket: %s", err)
|
||||
}
|
||||
var info p2p.NodeInfo
|
||||
if err := client.CallContext(ctx, &info, "admin_nodeInfo"); err != nil {
|
||||
return fmt.Errorf("error getting node info: %s", err)
|
||||
}
|
||||
n.client = client
|
||||
n.wsAddr = wsAddr
|
||||
n.Info = &info
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// execCommand returns a command which runs the node locally by exec'ing
|
||||
// the current binary but setting argv[0] to "p2p-node" so that the child
|
||||
// runs execP2PNode
|
||||
func (n *ExecNode) execCommand() *exec.Cmd {
|
||||
return &exec.Cmd{
|
||||
Path: reexec.Self(),
|
||||
Args: []string{"p2p-node", strings.Join(n.Config.Node.Services, ","), n.ID.String()},
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops the node by first sending SIGTERM and then SIGKILL if the node
|
||||
// doesn't stop within 5s
|
||||
func (n *ExecNode) Stop() error {
|
||||
if n.Cmd == nil {
|
||||
return nil
|
||||
}
|
||||
defer func() {
|
||||
n.Cmd = nil
|
||||
}()
|
||||
|
||||
if n.client != nil {
|
||||
n.client.Close()
|
||||
n.client = nil
|
||||
n.wsAddr = ""
|
||||
n.Info = nil
|
||||
}
|
||||
|
||||
if err := n.Cmd.Process.Signal(syscall.SIGTERM); err != nil {
|
||||
return n.Cmd.Process.Kill()
|
||||
}
|
||||
waitErr := make(chan error)
|
||||
go func() {
|
||||
waitErr <- n.Cmd.Wait()
|
||||
}()
|
||||
select {
|
||||
case err := <-waitErr:
|
||||
return err
|
||||
case <-time.After(5 * time.Second):
|
||||
return n.Cmd.Process.Kill()
|
||||
}
|
||||
}
|
||||
|
||||
// NodeInfo returns information about the node
|
||||
func (n *ExecNode) NodeInfo() *p2p.NodeInfo {
|
||||
info := &p2p.NodeInfo{
|
||||
ID: n.ID.String(),
|
||||
}
|
||||
if n.client != nil {
|
||||
n.client.Call(&info, "admin_nodeInfo")
|
||||
}
|
||||
return info
|
||||
}
|
||||
|
||||
// ServeRPC serves RPC requests over the given connection by dialling the
|
||||
// node's WebSocket address and joining the two connections
|
||||
func (n *ExecNode) ServeRPC(clientConn net.Conn) error {
|
||||
conn, err := websocket.Dial(n.wsAddr, "", "http://localhost")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
join := func(src, dst net.Conn) {
|
||||
defer wg.Done()
|
||||
io.Copy(dst, src)
|
||||
// close the write end of the destination connection
|
||||
if cw, ok := dst.(interface {
|
||||
CloseWrite() error
|
||||
}); ok {
|
||||
cw.CloseWrite()
|
||||
} else {
|
||||
dst.Close()
|
||||
}
|
||||
}
|
||||
go join(conn, clientConn)
|
||||
go join(clientConn, conn)
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Snapshots creates snapshots of the services by calling the
|
||||
// simulation_snapshot RPC method
|
||||
func (n *ExecNode) Snapshots() (map[string][]byte, error) {
|
||||
if n.client == nil {
|
||||
return nil, errors.New("RPC not started")
|
||||
}
|
||||
var snapshots map[string][]byte
|
||||
return snapshots, n.client.Call(&snapshots, "simulation_snapshot")
|
||||
}
|
||||
|
||||
func init() {
|
||||
// register a reexec function to start a devp2p node when the current
|
||||
// binary is executed as "p2p-node"
|
||||
reexec.Register("p2p-node", execP2PNode)
|
||||
}
|
||||
|
||||
// execNodeConfig is used to serialize the node configuration so it can be
|
||||
// passed to the child process as a JSON encoded environment variable
|
||||
type execNodeConfig struct {
|
||||
Stack node.Config `json:"stack"`
|
||||
Node *NodeConfig `json:"node"`
|
||||
Snapshots map[string][]byte `json:"snapshots,omitempty"`
|
||||
PeerAddrs map[string]string `json:"peer_addrs,omitempty"`
|
||||
}
|
||||
|
||||
// execP2PNode starts a devp2p node when the current binary is executed with
|
||||
// argv[0] being "p2p-node", reading the service / ID from argv[1] / argv[2]
|
||||
// and the node config from the _P2P_NODE_CONFIG environment variable
|
||||
func execP2PNode() {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
// read the services from argv
|
||||
serviceNames := strings.Split(os.Args[1], ",")
|
||||
|
||||
// decode the config
|
||||
confEnv := os.Getenv("_P2P_NODE_CONFIG")
|
||||
if confEnv == "" {
|
||||
log.Crit("missing _P2P_NODE_CONFIG")
|
||||
}
|
||||
var conf execNodeConfig
|
||||
if err := json.Unmarshal([]byte(confEnv), &conf); err != nil {
|
||||
log.Crit("error decoding _P2P_NODE_CONFIG", "err", err)
|
||||
}
|
||||
conf.Stack.P2P.PrivateKey = conf.Node.PrivateKey
|
||||
|
||||
// use explicit IP address in ListenAddr so that Enode URL is usable
|
||||
externalIP := func() string {
|
||||
addrs, err := net.InterfaceAddrs()
|
||||
if err != nil {
|
||||
log.Crit("error getting IP address", "err", err)
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
if ip, ok := addr.(*net.IPNet); ok && !ip.IP.IsLoopback() {
|
||||
return ip.IP.String()
|
||||
}
|
||||
}
|
||||
log.Crit("unable to determine explicit IP address")
|
||||
return ""
|
||||
}
|
||||
if strings.HasPrefix(conf.Stack.P2P.ListenAddr, ":") {
|
||||
conf.Stack.P2P.ListenAddr = externalIP() + conf.Stack.P2P.ListenAddr
|
||||
}
|
||||
if conf.Stack.WSHost == "0.0.0.0" {
|
||||
conf.Stack.WSHost = externalIP()
|
||||
}
|
||||
|
||||
// initialize the devp2p stack
|
||||
stack, err := node.New(&conf.Stack)
|
||||
if err != nil {
|
||||
log.Crit("error creating node stack", "err", err)
|
||||
}
|
||||
|
||||
// register the services, collecting them into a map so we can wrap
|
||||
// them in a snapshot service
|
||||
services := make(map[string]node.Service, len(serviceNames))
|
||||
for _, name := range serviceNames {
|
||||
serviceFunc, exists := serviceFuncs[name]
|
||||
if !exists {
|
||||
log.Crit("unknown node service", "name", name)
|
||||
}
|
||||
constructor := func(nodeCtx *node.ServiceContext) (node.Service, error) {
|
||||
ctx := &ServiceContext{
|
||||
RPCDialer: &wsRPCDialer{addrs: conf.PeerAddrs},
|
||||
NodeContext: nodeCtx,
|
||||
Config: conf.Node,
|
||||
}
|
||||
if conf.Snapshots != nil {
|
||||
ctx.Snapshot = conf.Snapshots[name]
|
||||
}
|
||||
service, err := serviceFunc(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
services[name] = service
|
||||
return service, nil
|
||||
}
|
||||
if err := stack.Register(constructor); err != nil {
|
||||
log.Crit("error starting service", "name", name, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// register the snapshot service
|
||||
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
return &snapshotService{services}, nil
|
||||
}); err != nil {
|
||||
log.Crit("error starting snapshot service", "err", err)
|
||||
}
|
||||
|
||||
// start the stack
|
||||
if err := stack.Start(); err != nil {
|
||||
log.Crit("error stating node stack", "err", err)
|
||||
}
|
||||
|
||||
// stop the stack if we get a SIGTERM signal
|
||||
go func() {
|
||||
sigc := make(chan os.Signal, 1)
|
||||
signal.Notify(sigc, syscall.SIGTERM)
|
||||
defer signal.Stop(sigc)
|
||||
<-sigc
|
||||
log.Info("Received SIGTERM, shutting down...")
|
||||
stack.Stop()
|
||||
}()
|
||||
|
||||
// wait for the stack to exit
|
||||
stack.Wait()
|
||||
}
|
||||
|
||||
// snapshotService is a node.Service which wraps a list of services and
|
||||
// exposes an API to generate a snapshot of those services
|
||||
type snapshotService struct {
|
||||
services map[string]node.Service
|
||||
}
|
||||
|
||||
func (s *snapshotService) APIs() []rpc.API {
|
||||
return []rpc.API{{
|
||||
Namespace: "simulation",
|
||||
Version: "1.0",
|
||||
Service: SnapshotAPI{s.services},
|
||||
}}
|
||||
}
|
||||
|
||||
func (s *snapshotService) Protocols() []p2p.Protocol {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *snapshotService) Start(*p2p.Server) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *snapshotService) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SnapshotAPI provides an RPC method to create snapshots of services
|
||||
type SnapshotAPI struct {
|
||||
services map[string]node.Service
|
||||
}
|
||||
|
||||
func (api SnapshotAPI) Snapshot() (map[string][]byte, error) {
|
||||
snapshots := make(map[string][]byte)
|
||||
for name, service := range api.services {
|
||||
if s, ok := service.(interface {
|
||||
Snapshot() ([]byte, error)
|
||||
}); ok {
|
||||
snap, err := s.Snapshot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
snapshots[name] = snap
|
||||
}
|
||||
}
|
||||
return snapshots, nil
|
||||
}
|
||||
|
||||
type wsRPCDialer struct {
|
||||
addrs map[string]string
|
||||
}
|
||||
|
||||
// DialRPC implements the RPCDialer interface by creating a WebSocket RPC
|
||||
// client of the given node
|
||||
func (w *wsRPCDialer) DialRPC(id discover.NodeID) (*rpc.Client, error) {
|
||||
addr, ok := w.addrs[id.String()]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown node: %s", id)
|
||||
}
|
||||
return rpc.DialWebsocket(context.Background(), addr, "http://localhost")
|
||||
}
|
314
vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/inproc.go
generated
vendored
Normal file
314
vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/inproc.go
generated
vendored
Normal file
|
@ -0,0 +1,314 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package adapters
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
// SimAdapter is a NodeAdapter which creates in-memory simulation nodes and
|
||||
// connects them using in-memory net.Pipe connections
|
||||
type SimAdapter struct {
|
||||
mtx sync.RWMutex
|
||||
nodes map[discover.NodeID]*SimNode
|
||||
services map[string]ServiceFunc
|
||||
}
|
||||
|
||||
// NewSimAdapter creates a SimAdapter which is capable of running in-memory
|
||||
// simulation nodes running any of the given services (the services to run on a
|
||||
// particular node are passed to the NewNode function in the NodeConfig)
|
||||
func NewSimAdapter(services map[string]ServiceFunc) *SimAdapter {
|
||||
return &SimAdapter{
|
||||
nodes: make(map[discover.NodeID]*SimNode),
|
||||
services: services,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the adapter for logging purposes
|
||||
func (s *SimAdapter) Name() string {
|
||||
return "sim-adapter"
|
||||
}
|
||||
|
||||
// NewNode returns a new SimNode using the given config
|
||||
func (s *SimAdapter) NewNode(config *NodeConfig) (Node, error) {
|
||||
s.mtx.Lock()
|
||||
defer s.mtx.Unlock()
|
||||
|
||||
// check a node with the ID doesn't already exist
|
||||
id := config.ID
|
||||
if _, exists := s.nodes[id]; exists {
|
||||
return nil, fmt.Errorf("node already exists: %s", id)
|
||||
}
|
||||
|
||||
// check the services are valid
|
||||
if len(config.Services) == 0 {
|
||||
return nil, errors.New("node must have at least one service")
|
||||
}
|
||||
for _, service := range config.Services {
|
||||
if _, exists := s.services[service]; !exists {
|
||||
return nil, fmt.Errorf("unknown node service %q", service)
|
||||
}
|
||||
}
|
||||
|
||||
n, err := node.New(&node.Config{
|
||||
P2P: p2p.Config{
|
||||
PrivateKey: config.PrivateKey,
|
||||
MaxPeers: math.MaxInt32,
|
||||
NoDiscovery: true,
|
||||
Dialer: s,
|
||||
EnableMsgEvents: true,
|
||||
},
|
||||
NoUSB: true,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
simNode := &SimNode{
|
||||
ID: id,
|
||||
config: config,
|
||||
node: n,
|
||||
adapter: s,
|
||||
running: make(map[string]node.Service),
|
||||
}
|
||||
s.nodes[id] = simNode
|
||||
return simNode, nil
|
||||
}
|
||||
|
||||
// Dial implements the p2p.NodeDialer interface by connecting to the node using
|
||||
// an in-memory net.Pipe connection
|
||||
func (s *SimAdapter) Dial(dest *discover.Node) (conn net.Conn, err error) {
|
||||
node, ok := s.GetNode(dest.ID)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown node: %s", dest.ID)
|
||||
}
|
||||
srv := node.Server()
|
||||
if srv == nil {
|
||||
return nil, fmt.Errorf("node not running: %s", dest.ID)
|
||||
}
|
||||
pipe1, pipe2 := net.Pipe()
|
||||
go srv.SetupConn(pipe1, 0, nil)
|
||||
return pipe2, nil
|
||||
}
|
||||
|
||||
// DialRPC implements the RPCDialer interface by creating an in-memory RPC
|
||||
// client of the given node
|
||||
func (s *SimAdapter) DialRPC(id discover.NodeID) (*rpc.Client, error) {
|
||||
node, ok := s.GetNode(id)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown node: %s", id)
|
||||
}
|
||||
handler, err := node.node.RPCHandler()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rpc.DialInProc(handler), nil
|
||||
}
|
||||
|
||||
// GetNode returns the node with the given ID if it exists
|
||||
func (s *SimAdapter) GetNode(id discover.NodeID) (*SimNode, bool) {
|
||||
s.mtx.RLock()
|
||||
defer s.mtx.RUnlock()
|
||||
node, ok := s.nodes[id]
|
||||
return node, ok
|
||||
}
|
||||
|
||||
// SimNode is an in-memory simulation node which connects to other nodes using
|
||||
// an in-memory net.Pipe connection (see SimAdapter.Dial), running devp2p
|
||||
// protocols directly over that pipe
|
||||
type SimNode struct {
|
||||
lock sync.RWMutex
|
||||
ID discover.NodeID
|
||||
config *NodeConfig
|
||||
adapter *SimAdapter
|
||||
node *node.Node
|
||||
running map[string]node.Service
|
||||
client *rpc.Client
|
||||
registerOnce sync.Once
|
||||
}
|
||||
|
||||
// Addr returns the node's discovery address
|
||||
func (self *SimNode) Addr() []byte {
|
||||
return []byte(self.Node().String())
|
||||
}
|
||||
|
||||
// Node returns a discover.Node representing the SimNode
|
||||
func (self *SimNode) Node() *discover.Node {
|
||||
return discover.NewNode(self.ID, net.IP{127, 0, 0, 1}, 30303, 30303)
|
||||
}
|
||||
|
||||
// Client returns an rpc.Client which can be used to communicate with the
|
||||
// underlying services (it is set once the node has started)
|
||||
func (self *SimNode) Client() (*rpc.Client, error) {
|
||||
self.lock.RLock()
|
||||
defer self.lock.RUnlock()
|
||||
if self.client == nil {
|
||||
return nil, errors.New("node not started")
|
||||
}
|
||||
return self.client, nil
|
||||
}
|
||||
|
||||
// ServeRPC serves RPC requests over the given connection by creating an
|
||||
// in-memory client to the node's RPC server
|
||||
func (self *SimNode) ServeRPC(conn net.Conn) error {
|
||||
handler, err := self.node.RPCHandler()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
handler.ServeCodec(rpc.NewJSONCodec(conn), rpc.OptionMethodInvocation|rpc.OptionSubscriptions)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Snapshots creates snapshots of the services by calling the
|
||||
// simulation_snapshot RPC method
|
||||
func (self *SimNode) Snapshots() (map[string][]byte, error) {
|
||||
self.lock.RLock()
|
||||
services := make(map[string]node.Service, len(self.running))
|
||||
for name, service := range self.running {
|
||||
services[name] = service
|
||||
}
|
||||
self.lock.RUnlock()
|
||||
if len(services) == 0 {
|
||||
return nil, errors.New("no running services")
|
||||
}
|
||||
snapshots := make(map[string][]byte)
|
||||
for name, service := range services {
|
||||
if s, ok := service.(interface {
|
||||
Snapshot() ([]byte, error)
|
||||
}); ok {
|
||||
snap, err := s.Snapshot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
snapshots[name] = snap
|
||||
}
|
||||
}
|
||||
return snapshots, nil
|
||||
}
|
||||
|
||||
// Start registers the services and starts the underlying devp2p node
|
||||
func (self *SimNode) Start(snapshots map[string][]byte) error {
|
||||
newService := func(name string) func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
return func(nodeCtx *node.ServiceContext) (node.Service, error) {
|
||||
ctx := &ServiceContext{
|
||||
RPCDialer: self.adapter,
|
||||
NodeContext: nodeCtx,
|
||||
Config: self.config,
|
||||
}
|
||||
if snapshots != nil {
|
||||
ctx.Snapshot = snapshots[name]
|
||||
}
|
||||
serviceFunc := self.adapter.services[name]
|
||||
service, err := serviceFunc(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
self.running[name] = service
|
||||
return service, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ensure we only register the services once in the case of the node
|
||||
// being stopped and then started again
|
||||
var regErr error
|
||||
self.registerOnce.Do(func() {
|
||||
for _, name := range self.config.Services {
|
||||
if err := self.node.Register(newService(name)); err != nil {
|
||||
regErr = err
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
if regErr != nil {
|
||||
return regErr
|
||||
}
|
||||
|
||||
if err := self.node.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// create an in-process RPC client
|
||||
handler, err := self.node.RPCHandler()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
self.lock.Lock()
|
||||
self.client = rpc.DialInProc(handler)
|
||||
self.lock.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop closes the RPC client and stops the underlying devp2p node
|
||||
func (self *SimNode) Stop() error {
|
||||
self.lock.Lock()
|
||||
if self.client != nil {
|
||||
self.client.Close()
|
||||
self.client = nil
|
||||
}
|
||||
self.lock.Unlock()
|
||||
return self.node.Stop()
|
||||
}
|
||||
|
||||
// Services returns a copy of the underlying services
|
||||
func (self *SimNode) Services() []node.Service {
|
||||
self.lock.RLock()
|
||||
defer self.lock.RUnlock()
|
||||
services := make([]node.Service, 0, len(self.running))
|
||||
for _, service := range self.running {
|
||||
services = append(services, service)
|
||||
}
|
||||
return services
|
||||
}
|
||||
|
||||
// Server returns the underlying p2p.Server
|
||||
func (self *SimNode) Server() *p2p.Server {
|
||||
return self.node.Server()
|
||||
}
|
||||
|
||||
// SubscribeEvents subscribes the given channel to peer events from the
|
||||
// underlying p2p.Server
|
||||
func (self *SimNode) SubscribeEvents(ch chan *p2p.PeerEvent) event.Subscription {
|
||||
srv := self.Server()
|
||||
if srv == nil {
|
||||
panic("node not running")
|
||||
}
|
||||
return srv.SubscribeEvents(ch)
|
||||
}
|
||||
|
||||
// NodeInfo returns information about the node
|
||||
func (self *SimNode) NodeInfo() *p2p.NodeInfo {
|
||||
server := self.Server()
|
||||
if server == nil {
|
||||
return &p2p.NodeInfo{
|
||||
ID: self.ID.String(),
|
||||
Enode: self.Node().String(),
|
||||
}
|
||||
}
|
||||
return server.NodeInfo()
|
||||
}
|
215
vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/types.go
generated
vendored
Normal file
215
vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/types.go
generated
vendored
Normal file
|
@ -0,0 +1,215 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package adapters
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
// Node represents a node in a simulation network which is created by a
|
||||
// NodeAdapter, for example:
|
||||
//
|
||||
// * SimNode - An in-memory node
|
||||
// * ExecNode - A child process node
|
||||
// * DockerNode - A Docker container node
|
||||
//
|
||||
type Node interface {
|
||||
// Addr returns the node's address (e.g. an Enode URL)
|
||||
Addr() []byte
|
||||
|
||||
// Client returns the RPC client which is created once the node is
|
||||
// up and running
|
||||
Client() (*rpc.Client, error)
|
||||
|
||||
// ServeRPC serves RPC requests over the given connection
|
||||
ServeRPC(net.Conn) error
|
||||
|
||||
// Start starts the node with the given snapshots
|
||||
Start(snapshots map[string][]byte) error
|
||||
|
||||
// Stop stops the node
|
||||
Stop() error
|
||||
|
||||
// NodeInfo returns information about the node
|
||||
NodeInfo() *p2p.NodeInfo
|
||||
|
||||
// Snapshots creates snapshots of the running services
|
||||
Snapshots() (map[string][]byte, error)
|
||||
}
|
||||
|
||||
// NodeAdapter is used to create Nodes in a simulation network
|
||||
type NodeAdapter interface {
|
||||
// Name returns the name of the adapter for logging purposes
|
||||
Name() string
|
||||
|
||||
// NewNode creates a new node with the given configuration
|
||||
NewNode(config *NodeConfig) (Node, error)
|
||||
}
|
||||
|
||||
// NodeConfig is the configuration used to start a node in a simulation
|
||||
// network
|
||||
type NodeConfig struct {
|
||||
// ID is the node's ID which is used to identify the node in the
|
||||
// simulation network
|
||||
ID discover.NodeID
|
||||
|
||||
// PrivateKey is the node's private key which is used by the devp2p
|
||||
// stack to encrypt communications
|
||||
PrivateKey *ecdsa.PrivateKey
|
||||
|
||||
// Name is a human friendly name for the node like "node01"
|
||||
Name string
|
||||
|
||||
// Services are the names of the services which should be run when
|
||||
// starting the node (for SimNodes it should be the names of services
|
||||
// contained in SimAdapter.services, for other nodes it should be
|
||||
// services registered by calling the RegisterService function)
|
||||
Services []string
|
||||
}
|
||||
|
||||
// nodeConfigJSON is used to encode and decode NodeConfig as JSON by encoding
|
||||
// all fields as strings
|
||||
type nodeConfigJSON struct {
|
||||
ID string `json:"id"`
|
||||
PrivateKey string `json:"private_key"`
|
||||
Name string `json:"name"`
|
||||
Services []string `json:"services"`
|
||||
}
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface by encoding the config
|
||||
// fields as strings
|
||||
func (n *NodeConfig) MarshalJSON() ([]byte, error) {
|
||||
confJSON := nodeConfigJSON{
|
||||
ID: n.ID.String(),
|
||||
Name: n.Name,
|
||||
Services: n.Services,
|
||||
}
|
||||
if n.PrivateKey != nil {
|
||||
confJSON.PrivateKey = hex.EncodeToString(crypto.FromECDSA(n.PrivateKey))
|
||||
}
|
||||
return json.Marshal(confJSON)
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface by decoding the json
|
||||
// string values into the config fields
|
||||
func (n *NodeConfig) UnmarshalJSON(data []byte) error {
|
||||
var confJSON nodeConfigJSON
|
||||
if err := json.Unmarshal(data, &confJSON); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if confJSON.ID != "" {
|
||||
nodeID, err := discover.HexID(confJSON.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n.ID = nodeID
|
||||
}
|
||||
|
||||
if confJSON.PrivateKey != "" {
|
||||
key, err := hex.DecodeString(confJSON.PrivateKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
privKey, err := crypto.ToECDSA(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n.PrivateKey = privKey
|
||||
}
|
||||
|
||||
n.Name = confJSON.Name
|
||||
n.Services = confJSON.Services
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RandomNodeConfig returns node configuration with a randomly generated ID and
|
||||
// PrivateKey
|
||||
func RandomNodeConfig() *NodeConfig {
|
||||
key, err := crypto.GenerateKey()
|
||||
if err != nil {
|
||||
panic("unable to generate key")
|
||||
}
|
||||
var id discover.NodeID
|
||||
pubkey := crypto.FromECDSAPub(&key.PublicKey)
|
||||
copy(id[:], pubkey[1:])
|
||||
return &NodeConfig{
|
||||
ID: id,
|
||||
PrivateKey: key,
|
||||
}
|
||||
}
|
||||
|
||||
// ServiceContext is a collection of options and methods which can be utilised
|
||||
// when starting services
|
||||
type ServiceContext struct {
|
||||
RPCDialer
|
||||
|
||||
NodeContext *node.ServiceContext
|
||||
Config *NodeConfig
|
||||
Snapshot []byte
|
||||
}
|
||||
|
||||
// RPCDialer is used when initialising services which need to connect to
|
||||
// other nodes in the network (for example a simulated Swarm node which needs
|
||||
// to connect to a Geth node to resolve ENS names)
|
||||
type RPCDialer interface {
|
||||
DialRPC(id discover.NodeID) (*rpc.Client, error)
|
||||
}
|
||||
|
||||
// Services is a collection of services which can be run in a simulation
|
||||
type Services map[string]ServiceFunc
|
||||
|
||||
// ServiceFunc returns a node.Service which can be used to boot a devp2p node
|
||||
type ServiceFunc func(ctx *ServiceContext) (node.Service, error)
|
||||
|
||||
// serviceFuncs is a map of registered services which are used to boot devp2p
|
||||
// nodes
|
||||
var serviceFuncs = make(Services)
|
||||
|
||||
// RegisterServices registers the given Services which can then be used to
|
||||
// start devp2p nodes using either the Exec or Docker adapters.
|
||||
//
|
||||
// It should be called in an init function so that it has the opportunity to
|
||||
// execute the services before main() is called.
|
||||
func RegisterServices(services Services) {
|
||||
for name, f := range services {
|
||||
if _, exists := serviceFuncs[name]; exists {
|
||||
panic(fmt.Sprintf("node service already exists: %q", name))
|
||||
}
|
||||
serviceFuncs[name] = f
|
||||
}
|
||||
|
||||
// now we have registered the services, run reexec.Init() which will
|
||||
// potentially start one of the services if the current binary has
|
||||
// been exec'd with argv[0] set to "p2p-node"
|
||||
if reexec.Init() {
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,108 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulations
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// EventType is the type of event emitted by a simulation network
|
||||
type EventType string
|
||||
|
||||
const (
|
||||
// EventTypeNode is the type of event emitted when a node is either
|
||||
// created, started or stopped
|
||||
EventTypeNode EventType = "node"
|
||||
|
||||
// EventTypeConn is the type of event emitted when a connection is
|
||||
// is either established or dropped between two nodes
|
||||
EventTypeConn EventType = "conn"
|
||||
|
||||
// EventTypeMsg is the type of event emitted when a p2p message it
|
||||
// sent between two nodes
|
||||
EventTypeMsg EventType = "msg"
|
||||
)
|
||||
|
||||
// Event is an event emitted by a simulation network
|
||||
type Event struct {
|
||||
// Type is the type of the event
|
||||
Type EventType `json:"type"`
|
||||
|
||||
// Time is the time the event happened
|
||||
Time time.Time `json:"time"`
|
||||
|
||||
// Control indicates whether the event is the result of a controlled
|
||||
// action in the network
|
||||
Control bool `json:"control"`
|
||||
|
||||
// Node is set if the type is EventTypeNode
|
||||
Node *Node `json:"node,omitempty"`
|
||||
|
||||
// Conn is set if the type is EventTypeConn
|
||||
Conn *Conn `json:"conn,omitempty"`
|
||||
|
||||
// Msg is set if the type is EventTypeMsg
|
||||
Msg *Msg `json:"msg,omitempty"`
|
||||
}
|
||||
|
||||
// NewEvent creates a new event for the given object which should be either a
|
||||
// Node, Conn or Msg.
|
||||
//
|
||||
// The object is copied so that the event represents the state of the object
|
||||
// when NewEvent is called.
|
||||
func NewEvent(v interface{}) *Event {
|
||||
event := &Event{Time: time.Now()}
|
||||
switch v := v.(type) {
|
||||
case *Node:
|
||||
event.Type = EventTypeNode
|
||||
node := *v
|
||||
event.Node = &node
|
||||
case *Conn:
|
||||
event.Type = EventTypeConn
|
||||
conn := *v
|
||||
event.Conn = &conn
|
||||
case *Msg:
|
||||
event.Type = EventTypeMsg
|
||||
msg := *v
|
||||
event.Msg = &msg
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid event type: %T", v))
|
||||
}
|
||||
return event
|
||||
}
|
||||
|
||||
// ControlEvent creates a new control event
|
||||
func ControlEvent(v interface{}) *Event {
|
||||
event := NewEvent(v)
|
||||
event.Control = true
|
||||
return event
|
||||
}
|
||||
|
||||
// String returns the string representation of the event
|
||||
func (e *Event) String() string {
|
||||
switch e.Type {
|
||||
case EventTypeNode:
|
||||
return fmt.Sprintf("<node-event> id: %s up: %t", e.Node.ID().TerminalString(), e.Node.Up)
|
||||
case EventTypeConn:
|
||||
return fmt.Sprintf("<conn-event> nodes: %s->%s up: %t", e.Conn.One.TerminalString(), e.Conn.Other.TerminalString(), e.Conn.Up)
|
||||
case EventTypeMsg:
|
||||
return fmt.Sprintf("<msg-event> nodes: %s->%s proto: %s, code: %d, received: %t", e.Msg.One.TerminalString(), e.Msg.Other.TerminalString(), e.Msg.Protocol, e.Msg.Code, e.Msg.Received)
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
39
vendor/github.com/ethereum/go-ethereum/p2p/simulations/examples/README.md
generated
vendored
Normal file
39
vendor/github.com/ethereum/go-ethereum/p2p/simulations/examples/README.md
generated
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
# devp2p simulation examples
|
||||
|
||||
## ping-pong
|
||||
|
||||
`ping-pong.go` implements a simulation network which contains nodes running a
|
||||
simple "ping-pong" protocol where nodes send a ping message to all their
|
||||
connected peers every 10s and receive pong messages in return.
|
||||
|
||||
To run the simulation, run `go run ping-pong.go` in one terminal to start the
|
||||
simulation API and `./ping-pong.sh` in another to start and connect the nodes:
|
||||
|
||||
```
|
||||
$ go run ping-pong.go
|
||||
INFO [08-15|13:53:49] using sim adapter
|
||||
INFO [08-15|13:53:49] starting simulation server on 0.0.0.0:8888...
|
||||
```
|
||||
|
||||
```
|
||||
$ ./ping-pong.sh
|
||||
---> 13:58:12 creating 10 nodes
|
||||
Created node01
|
||||
Started node01
|
||||
...
|
||||
Created node10
|
||||
Started node10
|
||||
---> 13:58:13 connecting node01 to all other nodes
|
||||
Connected node01 to node02
|
||||
...
|
||||
Connected node01 to node10
|
||||
---> 13:58:14 done
|
||||
```
|
||||
|
||||
Use the `--adapter` flag to choose the adapter type:
|
||||
|
||||
```
|
||||
$ go run ping-pong.go --adapter exec
|
||||
INFO [08-15|14:01:14] using exec adapter tmpdir=/var/folders/k6/wpsgfg4n23ddbc6f5cnw5qg00000gn/T/p2p-example992833779
|
||||
INFO [08-15|14:01:14] starting simulation server on 0.0.0.0:8888...
|
||||
```
|
184
vendor/github.com/ethereum/go-ethereum/p2p/simulations/examples/ping-pong.go
generated
vendored
Normal file
184
vendor/github.com/ethereum/go-ethereum/p2p/simulations/examples/ping-pong.go
generated
vendored
Normal file
|
@ -0,0 +1,184 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
var adapterType = flag.String("adapter", "sim", `node adapter to use (one of "sim", "exec" or "docker")`)
|
||||
|
||||
// main() starts a simulation network which contains nodes running a simple
|
||||
// ping-pong protocol
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
// set the log level to Trace
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(false))))
|
||||
|
||||
// register a single ping-pong service
|
||||
services := map[string]adapters.ServiceFunc{
|
||||
"ping-pong": func(ctx *adapters.ServiceContext) (node.Service, error) {
|
||||
return newPingPongService(ctx.Config.ID), nil
|
||||
},
|
||||
}
|
||||
adapters.RegisterServices(services)
|
||||
|
||||
// create the NodeAdapter
|
||||
var adapter adapters.NodeAdapter
|
||||
|
||||
switch *adapterType {
|
||||
|
||||
case "sim":
|
||||
log.Info("using sim adapter")
|
||||
adapter = adapters.NewSimAdapter(services)
|
||||
|
||||
case "exec":
|
||||
tmpdir, err := ioutil.TempDir("", "p2p-example")
|
||||
if err != nil {
|
||||
log.Crit("error creating temp dir", "err", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
log.Info("using exec adapter", "tmpdir", tmpdir)
|
||||
adapter = adapters.NewExecAdapter(tmpdir)
|
||||
|
||||
case "docker":
|
||||
log.Info("using docker adapter")
|
||||
var err error
|
||||
adapter, err = adapters.NewDockerAdapter()
|
||||
if err != nil {
|
||||
log.Crit("error creating docker adapter", "err", err)
|
||||
}
|
||||
|
||||
default:
|
||||
log.Crit(fmt.Sprintf("unknown node adapter %q", *adapterType))
|
||||
}
|
||||
|
||||
// start the HTTP API
|
||||
log.Info("starting simulation server on 0.0.0.0:8888...")
|
||||
network := simulations.NewNetwork(adapter, &simulations.NetworkConfig{
|
||||
DefaultService: "ping-pong",
|
||||
})
|
||||
if err := http.ListenAndServe(":8888", simulations.NewServer(network)); err != nil {
|
||||
log.Crit("error starting simulation server", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// pingPongService runs a ping-pong protocol between nodes where each node
|
||||
// sends a ping to all its connected peers every 10s and receives a pong in
|
||||
// return
|
||||
type pingPongService struct {
|
||||
id discover.NodeID
|
||||
log log.Logger
|
||||
received int64
|
||||
}
|
||||
|
||||
func newPingPongService(id discover.NodeID) *pingPongService {
|
||||
return &pingPongService{
|
||||
id: id,
|
||||
log: log.New("node.id", id),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *pingPongService) Protocols() []p2p.Protocol {
|
||||
return []p2p.Protocol{{
|
||||
Name: "ping-pong",
|
||||
Version: 1,
|
||||
Length: 2,
|
||||
Run: p.Run,
|
||||
NodeInfo: p.Info,
|
||||
}}
|
||||
}
|
||||
|
||||
func (p *pingPongService) APIs() []rpc.API {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *pingPongService) Start(server *p2p.Server) error {
|
||||
p.log.Info("ping-pong service starting")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *pingPongService) Stop() error {
|
||||
p.log.Info("ping-pong service stopping")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *pingPongService) Info() interface{} {
|
||||
return struct {
|
||||
Received int64 `json:"received"`
|
||||
}{
|
||||
atomic.LoadInt64(&p.received),
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
pingMsgCode = iota
|
||||
pongMsgCode
|
||||
)
|
||||
|
||||
// Run implements the ping-pong protocol which sends ping messages to the peer
|
||||
// at 10s intervals, and responds to pings with pong messages.
|
||||
func (p *pingPongService) Run(peer *p2p.Peer, rw p2p.MsgReadWriter) error {
|
||||
log := p.log.New("peer.id", peer.ID())
|
||||
|
||||
errC := make(chan error)
|
||||
go func() {
|
||||
for range time.Tick(10 * time.Second) {
|
||||
log.Info("sending ping")
|
||||
if err := p2p.Send(rw, pingMsgCode, "PING"); err != nil {
|
||||
errC <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
for {
|
||||
msg, err := rw.ReadMsg()
|
||||
if err != nil {
|
||||
errC <- err
|
||||
return
|
||||
}
|
||||
payload, err := ioutil.ReadAll(msg.Payload)
|
||||
if err != nil {
|
||||
errC <- err
|
||||
return
|
||||
}
|
||||
log.Info("received message", "msg.code", msg.Code, "msg.payload", string(payload))
|
||||
atomic.AddInt64(&p.received, 1)
|
||||
if msg.Code == pingMsgCode {
|
||||
log.Info("sending pong")
|
||||
go p2p.Send(rw, pongMsgCode, "PONG")
|
||||
}
|
||||
}
|
||||
}()
|
||||
return <-errC
|
||||
}
|
40
vendor/github.com/ethereum/go-ethereum/p2p/simulations/examples/ping-pong.sh
generated
vendored
Executable file
40
vendor/github.com/ethereum/go-ethereum/p2p/simulations/examples/ping-pong.sh
generated
vendored
Executable file
|
@ -0,0 +1,40 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Boot a ping-pong network simulation using the HTTP API started by ping-pong.go
|
||||
|
||||
set -e
|
||||
|
||||
main() {
|
||||
if ! which p2psim &>/dev/null; then
|
||||
fail "missing p2psim binary (you need to build cmd/p2psim and put it in \$PATH)"
|
||||
fi
|
||||
|
||||
info "creating 10 nodes"
|
||||
for i in $(seq 1 10); do
|
||||
p2psim node create --name "$(node_name $i)"
|
||||
p2psim node start "$(node_name $i)"
|
||||
done
|
||||
|
||||
info "connecting node01 to all other nodes"
|
||||
for i in $(seq 2 10); do
|
||||
p2psim node connect "node01" "$(node_name $i)"
|
||||
done
|
||||
|
||||
info "done"
|
||||
}
|
||||
|
||||
node_name() {
|
||||
local num=$1
|
||||
echo "node$(printf '%02d' $num)"
|
||||
}
|
||||
|
||||
info() {
|
||||
echo -e "\033[1;32m---> $(date +%H:%M:%S) ${@}\033[0m"
|
||||
}
|
||||
|
||||
fail() {
|
||||
echo -e "\033[1;31mERROR: ${@}\033[0m" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
main "$@"
|
|
@ -0,0 +1,680 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulations
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/julienschmidt/httprouter"
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
// DefaultClient is the default simulation API client which expects the API
|
||||
// to be running at http://localhost:8888
|
||||
var DefaultClient = NewClient("http://localhost:8888")
|
||||
|
||||
// Client is a client for the simulation HTTP API which supports creating
|
||||
// and managing simulation networks
|
||||
type Client struct {
|
||||
URL string
|
||||
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// NewClient returns a new simulation API client
|
||||
func NewClient(url string) *Client {
|
||||
return &Client{
|
||||
URL: url,
|
||||
client: http.DefaultClient,
|
||||
}
|
||||
}
|
||||
|
||||
// GetNetwork returns details of the network
|
||||
func (c *Client) GetNetwork() (*Network, error) {
|
||||
network := &Network{}
|
||||
return network, c.Get("/", network)
|
||||
}
|
||||
|
||||
// StartNetwork starts all existing nodes in the simulation network
|
||||
func (c *Client) StartNetwork() error {
|
||||
return c.Post("/start", nil, nil)
|
||||
}
|
||||
|
||||
// StopNetwork stops all existing nodes in a simulation network
|
||||
func (c *Client) StopNetwork() error {
|
||||
return c.Post("/stop", nil, nil)
|
||||
}
|
||||
|
||||
// CreateSnapshot creates a network snapshot
|
||||
func (c *Client) CreateSnapshot() (*Snapshot, error) {
|
||||
snap := &Snapshot{}
|
||||
return snap, c.Get("/snapshot", snap)
|
||||
}
|
||||
|
||||
// LoadSnapshot loads a snapshot into the network
|
||||
func (c *Client) LoadSnapshot(snap *Snapshot) error {
|
||||
return c.Post("/snapshot", snap, nil)
|
||||
}
|
||||
|
||||
// SubscribeOpts is a collection of options to use when subscribing to network
|
||||
// events
|
||||
type SubscribeOpts struct {
|
||||
// Current instructs the server to send events for existing nodes and
|
||||
// connections first
|
||||
Current bool
|
||||
|
||||
// Filter instructs the server to only send a subset of message events
|
||||
Filter string
|
||||
}
|
||||
|
||||
// SubscribeNetwork subscribes to network events which are sent from the server
|
||||
// as a server-sent-events stream, optionally receiving events for existing
|
||||
// nodes and connections and filtering message events
|
||||
func (c *Client) SubscribeNetwork(events chan *Event, opts SubscribeOpts) (event.Subscription, error) {
|
||||
url := fmt.Sprintf("%s/events?current=%t&filter=%s", c.URL, opts.Current, opts.Filter)
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Accept", "text/event-stream")
|
||||
res, err := c.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res.StatusCode != http.StatusOK {
|
||||
response, _ := ioutil.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
return nil, fmt.Errorf("unexpected HTTP status: %s: %s", res.Status, response)
|
||||
}
|
||||
|
||||
// define a producer function to pass to event.Subscription
|
||||
// which reads server-sent events from res.Body and sends
|
||||
// them to the events channel
|
||||
producer := func(stop <-chan struct{}) error {
|
||||
defer res.Body.Close()
|
||||
|
||||
// read lines from res.Body in a goroutine so that we are
|
||||
// always reading from the stop channel
|
||||
lines := make(chan string)
|
||||
errC := make(chan error, 1)
|
||||
go func() {
|
||||
s := bufio.NewScanner(res.Body)
|
||||
for s.Scan() {
|
||||
select {
|
||||
case lines <- s.Text():
|
||||
case <-stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
errC <- s.Err()
|
||||
}()
|
||||
|
||||
// detect any lines which start with "data:", decode the data
|
||||
// into an event and send it to the events channel
|
||||
for {
|
||||
select {
|
||||
case line := <-lines:
|
||||
if !strings.HasPrefix(line, "data:") {
|
||||
continue
|
||||
}
|
||||
data := strings.TrimSpace(strings.TrimPrefix(line, "data:"))
|
||||
event := &Event{}
|
||||
if err := json.Unmarshal([]byte(data), event); err != nil {
|
||||
return fmt.Errorf("error decoding SSE event: %s", err)
|
||||
}
|
||||
select {
|
||||
case events <- event:
|
||||
case <-stop:
|
||||
return nil
|
||||
}
|
||||
case err := <-errC:
|
||||
return err
|
||||
case <-stop:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return event.NewSubscription(producer), nil
|
||||
}
|
||||
|
||||
// GetNodes returns all nodes which exist in the network
|
||||
func (c *Client) GetNodes() ([]*p2p.NodeInfo, error) {
|
||||
var nodes []*p2p.NodeInfo
|
||||
return nodes, c.Get("/nodes", &nodes)
|
||||
}
|
||||
|
||||
// CreateNode creates a node in the network using the given configuration
|
||||
func (c *Client) CreateNode(config *adapters.NodeConfig) (*p2p.NodeInfo, error) {
|
||||
node := &p2p.NodeInfo{}
|
||||
return node, c.Post("/nodes", config, node)
|
||||
}
|
||||
|
||||
// GetNode returns details of a node
|
||||
func (c *Client) GetNode(nodeID string) (*p2p.NodeInfo, error) {
|
||||
node := &p2p.NodeInfo{}
|
||||
return node, c.Get(fmt.Sprintf("/nodes/%s", nodeID), node)
|
||||
}
|
||||
|
||||
// StartNode starts a node
|
||||
func (c *Client) StartNode(nodeID string) error {
|
||||
return c.Post(fmt.Sprintf("/nodes/%s/start", nodeID), nil, nil)
|
||||
}
|
||||
|
||||
// StopNode stops a node
|
||||
func (c *Client) StopNode(nodeID string) error {
|
||||
return c.Post(fmt.Sprintf("/nodes/%s/stop", nodeID), nil, nil)
|
||||
}
|
||||
|
||||
// ConnectNode connects a node to a peer node
|
||||
func (c *Client) ConnectNode(nodeID, peerID string) error {
|
||||
return c.Post(fmt.Sprintf("/nodes/%s/conn/%s", nodeID, peerID), nil, nil)
|
||||
}
|
||||
|
||||
// DisconnectNode disconnects a node from a peer node
|
||||
func (c *Client) DisconnectNode(nodeID, peerID string) error {
|
||||
return c.Delete(fmt.Sprintf("/nodes/%s/conn/%s", nodeID, peerID))
|
||||
}
|
||||
|
||||
// RPCClient returns an RPC client connected to a node
|
||||
func (c *Client) RPCClient(ctx context.Context, nodeID string) (*rpc.Client, error) {
|
||||
baseURL := strings.Replace(c.URL, "http", "ws", 1)
|
||||
return rpc.DialWebsocket(ctx, fmt.Sprintf("%s/nodes/%s/rpc", baseURL, nodeID), "")
|
||||
}
|
||||
|
||||
// Get performs a HTTP GET request decoding the resulting JSON response
|
||||
// into "out"
|
||||
func (c *Client) Get(path string, out interface{}) error {
|
||||
return c.Send("GET", path, nil, out)
|
||||
}
|
||||
|
||||
// Post performs a HTTP POST request sending "in" as the JSON body and
|
||||
// decoding the resulting JSON response into "out"
|
||||
func (c *Client) Post(path string, in, out interface{}) error {
|
||||
return c.Send("POST", path, in, out)
|
||||
}
|
||||
|
||||
// Delete performs a HTTP DELETE request
|
||||
func (c *Client) Delete(path string) error {
|
||||
return c.Send("DELETE", path, nil, nil)
|
||||
}
|
||||
|
||||
// Send performs a HTTP request, sending "in" as the JSON request body and
|
||||
// decoding the JSON response into "out"
|
||||
func (c *Client) Send(method, path string, in, out interface{}) error {
|
||||
var body []byte
|
||||
if in != nil {
|
||||
var err error
|
||||
body, err = json.Marshal(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
req, err := http.NewRequest(method, c.URL+path, bytes.NewReader(body))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Accept", "application/json")
|
||||
res, err := c.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated {
|
||||
response, _ := ioutil.ReadAll(res.Body)
|
||||
return fmt.Errorf("unexpected HTTP status: %s: %s", res.Status, response)
|
||||
}
|
||||
if out != nil {
|
||||
if err := json.NewDecoder(res.Body).Decode(out); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Server is an HTTP server providing an API to manage a simulation network
|
||||
type Server struct {
|
||||
router *httprouter.Router
|
||||
network *Network
|
||||
}
|
||||
|
||||
// NewServer returns a new simulation API server
|
||||
func NewServer(network *Network) *Server {
|
||||
s := &Server{
|
||||
router: httprouter.New(),
|
||||
network: network,
|
||||
}
|
||||
|
||||
s.OPTIONS("/", s.Options)
|
||||
s.GET("/", s.GetNetwork)
|
||||
s.POST("/start", s.StartNetwork)
|
||||
s.POST("/stop", s.StopNetwork)
|
||||
s.GET("/events", s.StreamNetworkEvents)
|
||||
s.GET("/snapshot", s.CreateSnapshot)
|
||||
s.POST("/snapshot", s.LoadSnapshot)
|
||||
s.POST("/nodes", s.CreateNode)
|
||||
s.GET("/nodes", s.GetNodes)
|
||||
s.GET("/nodes/:nodeid", s.GetNode)
|
||||
s.POST("/nodes/:nodeid/start", s.StartNode)
|
||||
s.POST("/nodes/:nodeid/stop", s.StopNode)
|
||||
s.POST("/nodes/:nodeid/conn/:peerid", s.ConnectNode)
|
||||
s.DELETE("/nodes/:nodeid/conn/:peerid", s.DisconnectNode)
|
||||
s.GET("/nodes/:nodeid/rpc", s.NodeRPC)
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// GetNetwork returns details of the network
|
||||
func (s *Server) GetNetwork(w http.ResponseWriter, req *http.Request) {
|
||||
s.JSON(w, http.StatusOK, s.network)
|
||||
}
|
||||
|
||||
// StartNetwork starts all nodes in the network
|
||||
func (s *Server) StartNetwork(w http.ResponseWriter, req *http.Request) {
|
||||
if err := s.network.StartAll(); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
// StopNetwork stops all nodes in the network
|
||||
func (s *Server) StopNetwork(w http.ResponseWriter, req *http.Request) {
|
||||
if err := s.network.StopAll(); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
// StreamNetworkEvents streams network events as a server-sent-events stream
|
||||
func (s *Server) StreamNetworkEvents(w http.ResponseWriter, req *http.Request) {
|
||||
events := make(chan *Event)
|
||||
sub := s.network.events.Subscribe(events)
|
||||
defer sub.Unsubscribe()
|
||||
|
||||
// stop the stream if the client goes away
|
||||
var clientGone <-chan bool
|
||||
if cn, ok := w.(http.CloseNotifier); ok {
|
||||
clientGone = cn.CloseNotify()
|
||||
}
|
||||
|
||||
// write writes the given event and data to the stream like:
|
||||
//
|
||||
// event: <event>
|
||||
// data: <data>
|
||||
//
|
||||
write := func(event, data string) {
|
||||
fmt.Fprintf(w, "event: %s\n", event)
|
||||
fmt.Fprintf(w, "data: %s\n\n", data)
|
||||
if fw, ok := w.(http.Flusher); ok {
|
||||
fw.Flush()
|
||||
}
|
||||
}
|
||||
writeEvent := func(event *Event) error {
|
||||
data, err := json.Marshal(event)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
write("network", string(data))
|
||||
return nil
|
||||
}
|
||||
writeErr := func(err error) {
|
||||
write("error", err.Error())
|
||||
}
|
||||
|
||||
// check if filtering has been requested
|
||||
var filters MsgFilters
|
||||
if filterParam := req.URL.Query().Get("filter"); filterParam != "" {
|
||||
var err error
|
||||
filters, err = NewMsgFilters(filterParam)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "text/event-stream; charset=utf-8")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintf(w, "\n\n")
|
||||
if fw, ok := w.(http.Flusher); ok {
|
||||
fw.Flush()
|
||||
}
|
||||
|
||||
// optionally send the existing nodes and connections
|
||||
if req.URL.Query().Get("current") == "true" {
|
||||
snap, err := s.network.Snapshot()
|
||||
if err != nil {
|
||||
writeErr(err)
|
||||
return
|
||||
}
|
||||
for _, node := range snap.Nodes {
|
||||
event := NewEvent(&node.Node)
|
||||
if err := writeEvent(event); err != nil {
|
||||
writeErr(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
for _, conn := range snap.Conns {
|
||||
event := NewEvent(&conn)
|
||||
if err := writeEvent(event); err != nil {
|
||||
writeErr(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case event := <-events:
|
||||
// only send message events which match the filters
|
||||
if event.Msg != nil && !filters.Match(event.Msg) {
|
||||
continue
|
||||
}
|
||||
if err := writeEvent(event); err != nil {
|
||||
writeErr(err)
|
||||
return
|
||||
}
|
||||
case <-clientGone:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewMsgFilters constructs a collection of message filters from a URL query
|
||||
// parameter.
|
||||
//
|
||||
// The parameter is expected to be a dash-separated list of individual filters,
|
||||
// each having the format '<proto>:<codes>', where <proto> is the name of a
|
||||
// protocol and <codes> is a comma-separated list of message codes.
|
||||
//
|
||||
// A message code of '*' or '-1' is considered a wildcard and matches any code.
|
||||
func NewMsgFilters(filterParam string) (MsgFilters, error) {
|
||||
filters := make(MsgFilters)
|
||||
for _, filter := range strings.Split(filterParam, "-") {
|
||||
protoCodes := strings.SplitN(filter, ":", 2)
|
||||
if len(protoCodes) != 2 || protoCodes[0] == "" || protoCodes[1] == "" {
|
||||
return nil, fmt.Errorf("invalid message filter: %s", filter)
|
||||
}
|
||||
proto := protoCodes[0]
|
||||
for _, code := range strings.Split(protoCodes[1], ",") {
|
||||
if code == "*" || code == "-1" {
|
||||
filters[MsgFilter{Proto: proto, Code: -1}] = struct{}{}
|
||||
continue
|
||||
}
|
||||
n, err := strconv.ParseUint(code, 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid message code: %s", code)
|
||||
}
|
||||
filters[MsgFilter{Proto: proto, Code: int64(n)}] = struct{}{}
|
||||
}
|
||||
}
|
||||
return filters, nil
|
||||
}
|
||||
|
||||
// MsgFilters is a collection of filters which are used to filter message
|
||||
// events
|
||||
type MsgFilters map[MsgFilter]struct{}
|
||||
|
||||
// Match checks if the given message matches any of the filters
|
||||
func (m MsgFilters) Match(msg *Msg) bool {
|
||||
// check if there is a wildcard filter for the message's protocol
|
||||
if _, ok := m[MsgFilter{Proto: msg.Protocol, Code: -1}]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
// check if there is a filter for the message's protocol and code
|
||||
if _, ok := m[MsgFilter{Proto: msg.Protocol, Code: int64(msg.Code)}]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// MsgFilter is used to filter message events based on protocol and message
|
||||
// code
|
||||
type MsgFilter struct {
|
||||
// Proto is matched against a message's protocol
|
||||
Proto string
|
||||
|
||||
// Code is matched against a message's code, with -1 matching all codes
|
||||
Code int64
|
||||
}
|
||||
|
||||
// CreateSnapshot creates a network snapshot
|
||||
func (s *Server) CreateSnapshot(w http.ResponseWriter, req *http.Request) {
|
||||
snap, err := s.network.Snapshot()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
s.JSON(w, http.StatusOK, snap)
|
||||
}
|
||||
|
||||
// LoadSnapshot loads a snapshot into the network
|
||||
func (s *Server) LoadSnapshot(w http.ResponseWriter, req *http.Request) {
|
||||
snap := &Snapshot{}
|
||||
if err := json.NewDecoder(req.Body).Decode(snap); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.network.Load(snap); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
s.JSON(w, http.StatusOK, s.network)
|
||||
}
|
||||
|
||||
// CreateNode creates a node in the network using the given configuration
|
||||
func (s *Server) CreateNode(w http.ResponseWriter, req *http.Request) {
|
||||
config := adapters.RandomNodeConfig()
|
||||
err := json.NewDecoder(req.Body).Decode(config)
|
||||
if err != nil && err != io.EOF {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
node, err := s.network.NewNodeWithConfig(config)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
s.JSON(w, http.StatusCreated, node.NodeInfo())
|
||||
}
|
||||
|
||||
// GetNodes returns all nodes which exist in the network
|
||||
func (s *Server) GetNodes(w http.ResponseWriter, req *http.Request) {
|
||||
nodes := s.network.GetNodes()
|
||||
|
||||
infos := make([]*p2p.NodeInfo, len(nodes))
|
||||
for i, node := range nodes {
|
||||
infos[i] = node.NodeInfo()
|
||||
}
|
||||
|
||||
s.JSON(w, http.StatusOK, infos)
|
||||
}
|
||||
|
||||
// GetNode returns details of a node
|
||||
func (s *Server) GetNode(w http.ResponseWriter, req *http.Request) {
|
||||
node := req.Context().Value("node").(*Node)
|
||||
|
||||
s.JSON(w, http.StatusOK, node.NodeInfo())
|
||||
}
|
||||
|
||||
// StartNode starts a node
|
||||
func (s *Server) StartNode(w http.ResponseWriter, req *http.Request) {
|
||||
node := req.Context().Value("node").(*Node)
|
||||
|
||||
if err := s.network.Start(node.ID()); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
s.JSON(w, http.StatusOK, node.NodeInfo())
|
||||
}
|
||||
|
||||
// StopNode stops a node
|
||||
func (s *Server) StopNode(w http.ResponseWriter, req *http.Request) {
|
||||
node := req.Context().Value("node").(*Node)
|
||||
|
||||
if err := s.network.Stop(node.ID()); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
s.JSON(w, http.StatusOK, node.NodeInfo())
|
||||
}
|
||||
|
||||
// ConnectNode connects a node to a peer node
|
||||
func (s *Server) ConnectNode(w http.ResponseWriter, req *http.Request) {
|
||||
node := req.Context().Value("node").(*Node)
|
||||
peer := req.Context().Value("peer").(*Node)
|
||||
|
||||
if err := s.network.Connect(node.ID(), peer.ID()); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
s.JSON(w, http.StatusOK, node.NodeInfo())
|
||||
}
|
||||
|
||||
// DisconnectNode disconnects a node from a peer node
|
||||
func (s *Server) DisconnectNode(w http.ResponseWriter, req *http.Request) {
|
||||
node := req.Context().Value("node").(*Node)
|
||||
peer := req.Context().Value("peer").(*Node)
|
||||
|
||||
if err := s.network.Disconnect(node.ID(), peer.ID()); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
s.JSON(w, http.StatusOK, node.NodeInfo())
|
||||
}
|
||||
|
||||
// Options responds to the OPTIONS HTTP method by returning a 200 OK response
|
||||
// with the "Access-Control-Allow-Headers" header set to "Content-Type"
|
||||
func (s *Server) Options(w http.ResponseWriter, req *http.Request) {
|
||||
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
// NodeRPC forwards RPC requests to a node in the network via a WebSocket
|
||||
// connection
|
||||
func (s *Server) NodeRPC(w http.ResponseWriter, req *http.Request) {
|
||||
node := req.Context().Value("node").(*Node)
|
||||
|
||||
handler := func(conn *websocket.Conn) {
|
||||
node.ServeRPC(conn)
|
||||
}
|
||||
|
||||
websocket.Server{Handler: handler}.ServeHTTP(w, req)
|
||||
}
|
||||
|
||||
// ServeHTTP implements the http.Handler interface by delegating to the
|
||||
// underlying httprouter.Router
|
||||
func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
s.router.ServeHTTP(w, req)
|
||||
}
|
||||
|
||||
// GET registers a handler for GET requests to a particular path
|
||||
func (s *Server) GET(path string, handle http.HandlerFunc) {
|
||||
s.router.GET(path, s.wrapHandler(handle))
|
||||
}
|
||||
|
||||
// POST registers a handler for POST requests to a particular path
|
||||
func (s *Server) POST(path string, handle http.HandlerFunc) {
|
||||
s.router.POST(path, s.wrapHandler(handle))
|
||||
}
|
||||
|
||||
// DELETE registers a handler for DELETE requests to a particular path
|
||||
func (s *Server) DELETE(path string, handle http.HandlerFunc) {
|
||||
s.router.DELETE(path, s.wrapHandler(handle))
|
||||
}
|
||||
|
||||
// OPTIONS registers a handler for OPTIONS requests to a particular path
|
||||
func (s *Server) OPTIONS(path string, handle http.HandlerFunc) {
|
||||
s.router.OPTIONS("/*path", s.wrapHandler(handle))
|
||||
}
|
||||
|
||||
// JSON sends "data" as a JSON HTTP response
|
||||
func (s *Server) JSON(w http.ResponseWriter, status int, data interface{}) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(status)
|
||||
json.NewEncoder(w).Encode(data)
|
||||
}
|
||||
|
||||
// wrapHandler returns a httprouter.Handle which wraps a http.HandlerFunc by
|
||||
// populating request.Context with any objects from the URL params
|
||||
func (s *Server) wrapHandler(handler http.HandlerFunc) httprouter.Handle {
|
||||
return func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS")
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if id := params.ByName("nodeid"); id != "" {
|
||||
var node *Node
|
||||
if nodeID, err := discover.HexID(id); err == nil {
|
||||
node = s.network.GetNode(nodeID)
|
||||
} else {
|
||||
node = s.network.GetNodeByName(id)
|
||||
}
|
||||
if node == nil {
|
||||
http.NotFound(w, req)
|
||||
return
|
||||
}
|
||||
ctx = context.WithValue(ctx, "node", node)
|
||||
}
|
||||
|
||||
if id := params.ByName("peerid"); id != "" {
|
||||
var peer *Node
|
||||
if peerID, err := discover.HexID(id); err == nil {
|
||||
peer = s.network.GetNode(peerID)
|
||||
} else {
|
||||
peer = s.network.GetNodeByName(id)
|
||||
}
|
||||
if peer == nil {
|
||||
http.NotFound(w, req)
|
||||
return
|
||||
}
|
||||
ctx = context.WithValue(ctx, "peer", peer)
|
||||
}
|
||||
|
||||
handler(w, req.WithContext(ctx))
|
||||
}
|
||||
}
|
680
vendor/github.com/ethereum/go-ethereum/p2p/simulations/network.go
generated
vendored
Normal file
680
vendor/github.com/ethereum/go-ethereum/p2p/simulations/network.go
generated
vendored
Normal file
|
@ -0,0 +1,680 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulations
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
)
|
||||
|
||||
// NetworkConfig defines configuration options for starting a Network
|
||||
type NetworkConfig struct {
|
||||
ID string `json:"id"`
|
||||
DefaultService string `json:"default_service,omitempty"`
|
||||
}
|
||||
|
||||
// Network models a p2p simulation network which consists of a collection of
|
||||
// simulated nodes and the connections which exist between them.
|
||||
//
|
||||
// The Network has a single NodeAdapter which is responsible for actually
|
||||
// starting nodes and connecting them together.
|
||||
//
|
||||
// The Network emits events when nodes are started and stopped, when they are
|
||||
// connected and disconnected, and also when messages are sent between nodes.
|
||||
type Network struct {
|
||||
NetworkConfig
|
||||
|
||||
Nodes []*Node `json:"nodes"`
|
||||
nodeMap map[discover.NodeID]int
|
||||
|
||||
Conns []*Conn `json:"conns"`
|
||||
connMap map[string]int
|
||||
|
||||
nodeAdapter adapters.NodeAdapter
|
||||
events event.Feed
|
||||
lock sync.RWMutex
|
||||
quitc chan struct{}
|
||||
}
|
||||
|
||||
// NewNetwork returns a Network which uses the given NodeAdapter and NetworkConfig
|
||||
func NewNetwork(nodeAdapter adapters.NodeAdapter, conf *NetworkConfig) *Network {
|
||||
return &Network{
|
||||
NetworkConfig: *conf,
|
||||
nodeAdapter: nodeAdapter,
|
||||
nodeMap: make(map[discover.NodeID]int),
|
||||
connMap: make(map[string]int),
|
||||
quitc: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Events returns the output event feed of the Network.
|
||||
func (self *Network) Events() *event.Feed {
|
||||
return &self.events
|
||||
}
|
||||
|
||||
// NewNode adds a new node to the network with a random ID
|
||||
func (self *Network) NewNode() (*Node, error) {
|
||||
conf := adapters.RandomNodeConfig()
|
||||
conf.Services = []string{self.DefaultService}
|
||||
return self.NewNodeWithConfig(conf)
|
||||
}
|
||||
|
||||
// NewNodeWithConfig adds a new node to the network with the given config,
|
||||
// returning an error if a node with the same ID or name already exists
|
||||
func (self *Network) NewNodeWithConfig(conf *adapters.NodeConfig) (*Node, error) {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
|
||||
// create a random ID and PrivateKey if not set
|
||||
if conf.ID == (discover.NodeID{}) {
|
||||
c := adapters.RandomNodeConfig()
|
||||
conf.ID = c.ID
|
||||
conf.PrivateKey = c.PrivateKey
|
||||
}
|
||||
id := conf.ID
|
||||
|
||||
// assign a name to the node if not set
|
||||
if conf.Name == "" {
|
||||
conf.Name = fmt.Sprintf("node%02d", len(self.Nodes)+1)
|
||||
}
|
||||
|
||||
// check the node doesn't already exist
|
||||
if node := self.getNode(id); node != nil {
|
||||
return nil, fmt.Errorf("node with ID %q already exists", id)
|
||||
}
|
||||
if node := self.getNodeByName(conf.Name); node != nil {
|
||||
return nil, fmt.Errorf("node with name %q already exists", conf.Name)
|
||||
}
|
||||
|
||||
// if no services are configured, use the default service
|
||||
if len(conf.Services) == 0 {
|
||||
conf.Services = []string{self.DefaultService}
|
||||
}
|
||||
|
||||
// use the NodeAdapter to create the node
|
||||
adapterNode, err := self.nodeAdapter.NewNode(conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
node := &Node{
|
||||
Node: adapterNode,
|
||||
Config: conf,
|
||||
}
|
||||
log.Trace(fmt.Sprintf("node %v created", id))
|
||||
self.nodeMap[id] = len(self.Nodes)
|
||||
self.Nodes = append(self.Nodes, node)
|
||||
|
||||
// emit a "control" event
|
||||
self.events.Send(ControlEvent(node))
|
||||
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// Config returns the network configuration
|
||||
func (self *Network) Config() *NetworkConfig {
|
||||
return &self.NetworkConfig
|
||||
}
|
||||
|
||||
// StartAll starts all nodes in the network
|
||||
func (self *Network) StartAll() error {
|
||||
for _, node := range self.Nodes {
|
||||
if node.Up {
|
||||
continue
|
||||
}
|
||||
if err := self.Start(node.ID()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StopAll stops all nodes in the network
|
||||
func (self *Network) StopAll() error {
|
||||
for _, node := range self.Nodes {
|
||||
if !node.Up {
|
||||
continue
|
||||
}
|
||||
if err := self.Stop(node.ID()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start starts the node with the given ID
|
||||
func (self *Network) Start(id discover.NodeID) error {
|
||||
return self.startWithSnapshots(id, nil)
|
||||
}
|
||||
|
||||
// startWithSnapshots starts the node with the given ID using the give
|
||||
// snapshots
|
||||
func (self *Network) startWithSnapshots(id discover.NodeID, snapshots map[string][]byte) error {
|
||||
node := self.GetNode(id)
|
||||
if node == nil {
|
||||
return fmt.Errorf("node %v does not exist", id)
|
||||
}
|
||||
if node.Up {
|
||||
return fmt.Errorf("node %v already up", id)
|
||||
}
|
||||
log.Trace(fmt.Sprintf("starting node %v: %v using %v", id, node.Up, self.nodeAdapter.Name()))
|
||||
if err := node.Start(snapshots); err != nil {
|
||||
log.Warn(fmt.Sprintf("start up failed: %v", err))
|
||||
return err
|
||||
}
|
||||
node.Up = true
|
||||
log.Info(fmt.Sprintf("started node %v: %v", id, node.Up))
|
||||
|
||||
self.events.Send(NewEvent(node))
|
||||
|
||||
// subscribe to peer events
|
||||
client, err := node.Client()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting rpc client for node %v: %s", id, err)
|
||||
}
|
||||
events := make(chan *p2p.PeerEvent)
|
||||
sub, err := client.Subscribe(context.Background(), "admin", events, "peerEvents")
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting peer events for node %v: %s", id, err)
|
||||
}
|
||||
go self.watchPeerEvents(id, events, sub)
|
||||
return nil
|
||||
}
|
||||
|
||||
// watchPeerEvents reads peer events from the given channel and emits
|
||||
// corresponding network events
|
||||
func (self *Network) watchPeerEvents(id discover.NodeID, events chan *p2p.PeerEvent, sub event.Subscription) {
|
||||
defer func() {
|
||||
sub.Unsubscribe()
|
||||
|
||||
// assume the node is now down
|
||||
self.lock.Lock()
|
||||
node := self.getNode(id)
|
||||
node.Up = false
|
||||
self.lock.Unlock()
|
||||
self.events.Send(NewEvent(node))
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-events:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
peer := event.Peer
|
||||
switch event.Type {
|
||||
|
||||
case p2p.PeerEventTypeAdd:
|
||||
self.DidConnect(id, peer)
|
||||
|
||||
case p2p.PeerEventTypeDrop:
|
||||
self.DidDisconnect(id, peer)
|
||||
|
||||
case p2p.PeerEventTypeMsgSend:
|
||||
self.DidSend(id, peer, event.Protocol, *event.MsgCode)
|
||||
|
||||
case p2p.PeerEventTypeMsgRecv:
|
||||
self.DidReceive(peer, id, event.Protocol, *event.MsgCode)
|
||||
|
||||
}
|
||||
|
||||
case err := <-sub.Err():
|
||||
if err != nil {
|
||||
log.Error(fmt.Sprintf("error getting peer events for node %v", id), "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops the node with the given ID
|
||||
func (self *Network) Stop(id discover.NodeID) error {
|
||||
node := self.GetNode(id)
|
||||
if node == nil {
|
||||
return fmt.Errorf("node %v does not exist", id)
|
||||
}
|
||||
if !node.Up {
|
||||
return fmt.Errorf("node %v already down", id)
|
||||
}
|
||||
if err := node.Stop(); err != nil {
|
||||
return err
|
||||
}
|
||||
node.Up = false
|
||||
log.Info(fmt.Sprintf("stop node %v: %v", id, node.Up))
|
||||
|
||||
self.events.Send(ControlEvent(node))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Connect connects two nodes together by calling the "admin_addPeer" RPC
|
||||
// method on the "one" node so that it connects to the "other" node
|
||||
func (self *Network) Connect(oneID, otherID discover.NodeID) error {
|
||||
log.Debug(fmt.Sprintf("connecting %s to %s", oneID, otherID))
|
||||
conn, err := self.GetOrCreateConn(oneID, otherID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if conn.Up {
|
||||
return fmt.Errorf("%v and %v already connected", oneID, otherID)
|
||||
}
|
||||
if err := conn.nodesUp(); err != nil {
|
||||
return err
|
||||
}
|
||||
client, err := conn.one.Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
self.events.Send(ControlEvent(conn))
|
||||
return client.Call(nil, "admin_addPeer", string(conn.other.Addr()))
|
||||
}
|
||||
|
||||
// Disconnect disconnects two nodes by calling the "admin_removePeer" RPC
|
||||
// method on the "one" node so that it disconnects from the "other" node
|
||||
func (self *Network) Disconnect(oneID, otherID discover.NodeID) error {
|
||||
conn := self.GetConn(oneID, otherID)
|
||||
if conn == nil {
|
||||
return fmt.Errorf("connection between %v and %v does not exist", oneID, otherID)
|
||||
}
|
||||
if !conn.Up {
|
||||
return fmt.Errorf("%v and %v already disconnected", oneID, otherID)
|
||||
}
|
||||
client, err := conn.one.Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
self.events.Send(ControlEvent(conn))
|
||||
return client.Call(nil, "admin_removePeer", string(conn.other.Addr()))
|
||||
}
|
||||
|
||||
// DidConnect tracks the fact that the "one" node connected to the "other" node
|
||||
func (self *Network) DidConnect(one, other discover.NodeID) error {
|
||||
conn, err := self.GetOrCreateConn(one, other)
|
||||
if err != nil {
|
||||
return fmt.Errorf("connection between %v and %v does not exist", one, other)
|
||||
}
|
||||
if conn.Up {
|
||||
return fmt.Errorf("%v and %v already connected", one, other)
|
||||
}
|
||||
conn.Up = true
|
||||
self.events.Send(NewEvent(conn))
|
||||
return nil
|
||||
}
|
||||
|
||||
// DidDisconnect tracks the fact that the "one" node disconnected from the
|
||||
// "other" node
|
||||
func (self *Network) DidDisconnect(one, other discover.NodeID) error {
|
||||
conn, err := self.GetOrCreateConn(one, other)
|
||||
if err != nil {
|
||||
return fmt.Errorf("connection between %v and %v does not exist", one, other)
|
||||
}
|
||||
if !conn.Up {
|
||||
return fmt.Errorf("%v and %v already disconnected", one, other)
|
||||
}
|
||||
conn.Up = false
|
||||
self.events.Send(NewEvent(conn))
|
||||
return nil
|
||||
}
|
||||
|
||||
// DidSend tracks the fact that "sender" sent a message to "receiver"
|
||||
func (self *Network) DidSend(sender, receiver discover.NodeID, proto string, code uint64) error {
|
||||
msg := &Msg{
|
||||
One: sender,
|
||||
Other: receiver,
|
||||
Protocol: proto,
|
||||
Code: code,
|
||||
Received: false,
|
||||
}
|
||||
self.events.Send(NewEvent(msg))
|
||||
return nil
|
||||
}
|
||||
|
||||
// DidReceive tracks the fact that "receiver" received a message from "sender"
|
||||
func (self *Network) DidReceive(sender, receiver discover.NodeID, proto string, code uint64) error {
|
||||
msg := &Msg{
|
||||
One: sender,
|
||||
Other: receiver,
|
||||
Protocol: proto,
|
||||
Code: code,
|
||||
Received: true,
|
||||
}
|
||||
self.events.Send(NewEvent(msg))
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNode gets the node with the given ID, returning nil if the node does not
|
||||
// exist
|
||||
func (self *Network) GetNode(id discover.NodeID) *Node {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
return self.getNode(id)
|
||||
}
|
||||
|
||||
// GetNode gets the node with the given name, returning nil if the node does
|
||||
// not exist
|
||||
func (self *Network) GetNodeByName(name string) *Node {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
return self.getNodeByName(name)
|
||||
}
|
||||
|
||||
func (self *Network) getNode(id discover.NodeID) *Node {
|
||||
i, found := self.nodeMap[id]
|
||||
if !found {
|
||||
return nil
|
||||
}
|
||||
return self.Nodes[i]
|
||||
}
|
||||
|
||||
func (self *Network) getNodeByName(name string) *Node {
|
||||
for _, node := range self.Nodes {
|
||||
if node.Config.Name == name {
|
||||
return node
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNodes returns the existing nodes
|
||||
func (self *Network) GetNodes() []*Node {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
return self.Nodes
|
||||
}
|
||||
|
||||
// GetConn returns the connection which exists between "one" and "other"
|
||||
// regardless of which node initiated the connection
|
||||
func (self *Network) GetConn(oneID, otherID discover.NodeID) *Conn {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
return self.getConn(oneID, otherID)
|
||||
}
|
||||
|
||||
// GetOrCreateConn is like GetConn but creates the connection if it doesn't
|
||||
// already exist
|
||||
func (self *Network) GetOrCreateConn(oneID, otherID discover.NodeID) (*Conn, error) {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
if conn := self.getConn(oneID, otherID); conn != nil {
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
one := self.getNode(oneID)
|
||||
if one == nil {
|
||||
return nil, fmt.Errorf("node %v does not exist", oneID)
|
||||
}
|
||||
other := self.getNode(otherID)
|
||||
if other == nil {
|
||||
return nil, fmt.Errorf("node %v does not exist", otherID)
|
||||
}
|
||||
conn := &Conn{
|
||||
One: oneID,
|
||||
Other: otherID,
|
||||
one: one,
|
||||
other: other,
|
||||
}
|
||||
label := ConnLabel(oneID, otherID)
|
||||
self.connMap[label] = len(self.Conns)
|
||||
self.Conns = append(self.Conns, conn)
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func (self *Network) getConn(oneID, otherID discover.NodeID) *Conn {
|
||||
label := ConnLabel(oneID, otherID)
|
||||
i, found := self.connMap[label]
|
||||
if !found {
|
||||
return nil
|
||||
}
|
||||
return self.Conns[i]
|
||||
}
|
||||
|
||||
// Shutdown stops all nodes in the network and closes the quit channel
|
||||
func (self *Network) Shutdown() {
|
||||
for _, node := range self.Nodes {
|
||||
log.Debug(fmt.Sprintf("stopping node %s", node.ID().TerminalString()))
|
||||
if err := node.Stop(); err != nil {
|
||||
log.Warn(fmt.Sprintf("error stopping node %s", node.ID().TerminalString()), "err", err)
|
||||
}
|
||||
}
|
||||
close(self.quitc)
|
||||
}
|
||||
|
||||
// Node is a wrapper around adapters.Node which is used to track the status
|
||||
// of a node in the network
|
||||
type Node struct {
|
||||
adapters.Node `json:"-"`
|
||||
|
||||
// Config if the config used to created the node
|
||||
Config *adapters.NodeConfig `json:"config"`
|
||||
|
||||
// Up tracks whether or not the node is running
|
||||
Up bool `json:"up"`
|
||||
}
|
||||
|
||||
// ID returns the ID of the node
|
||||
func (self *Node) ID() discover.NodeID {
|
||||
return self.Config.ID
|
||||
}
|
||||
|
||||
// String returns a log-friendly string
|
||||
func (self *Node) String() string {
|
||||
return fmt.Sprintf("Node %v", self.ID().TerminalString())
|
||||
}
|
||||
|
||||
// NodeInfo returns information about the node
|
||||
func (self *Node) NodeInfo() *p2p.NodeInfo {
|
||||
// avoid a panic if the node is not started yet
|
||||
if self.Node == nil {
|
||||
return nil
|
||||
}
|
||||
info := self.Node.NodeInfo()
|
||||
info.Name = self.Config.Name
|
||||
return info
|
||||
}
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface so that the encoded
|
||||
// JSON includes the NodeInfo
|
||||
func (self *Node) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Info *p2p.NodeInfo `json:"info,omitempty"`
|
||||
Config *adapters.NodeConfig `json:"config,omitempty"`
|
||||
Up bool `json:"up"`
|
||||
}{
|
||||
Info: self.NodeInfo(),
|
||||
Config: self.Config,
|
||||
Up: self.Up,
|
||||
})
|
||||
}
|
||||
|
||||
// Conn represents a connection between two nodes in the network
|
||||
type Conn struct {
|
||||
// One is the node which initiated the connection
|
||||
One discover.NodeID `json:"one"`
|
||||
|
||||
// Other is the node which the connection was made to
|
||||
Other discover.NodeID `json:"other"`
|
||||
|
||||
// Up tracks whether or not the connection is active
|
||||
Up bool `json:"up"`
|
||||
|
||||
one *Node
|
||||
other *Node
|
||||
}
|
||||
|
||||
// nodesUp returns whether both nodes are currently up
|
||||
func (self *Conn) nodesUp() error {
|
||||
if !self.one.Up {
|
||||
return fmt.Errorf("one %v is not up", self.One)
|
||||
}
|
||||
if !self.other.Up {
|
||||
return fmt.Errorf("other %v is not up", self.Other)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns a log-friendly string
|
||||
func (self *Conn) String() string {
|
||||
return fmt.Sprintf("Conn %v->%v", self.One.TerminalString(), self.Other.TerminalString())
|
||||
}
|
||||
|
||||
// Msg represents a p2p message sent between two nodes in the network
|
||||
type Msg struct {
|
||||
One discover.NodeID `json:"one"`
|
||||
Other discover.NodeID `json:"other"`
|
||||
Protocol string `json:"protocol"`
|
||||
Code uint64 `json:"code"`
|
||||
Received bool `json:"received"`
|
||||
}
|
||||
|
||||
// String returns a log-friendly string
|
||||
func (self *Msg) String() string {
|
||||
return fmt.Sprintf("Msg(%d) %v->%v", self.Code, self.One.TerminalString(), self.Other.TerminalString())
|
||||
}
|
||||
|
||||
// ConnLabel generates a deterministic string which represents a connection
|
||||
// between two nodes, used to compare if two connections are between the same
|
||||
// nodes
|
||||
func ConnLabel(source, target discover.NodeID) string {
|
||||
var first, second discover.NodeID
|
||||
if bytes.Compare(source.Bytes(), target.Bytes()) > 0 {
|
||||
first = target
|
||||
second = source
|
||||
} else {
|
||||
first = source
|
||||
second = target
|
||||
}
|
||||
return fmt.Sprintf("%v-%v", first, second)
|
||||
}
|
||||
|
||||
// Snapshot represents the state of a network at a single point in time and can
|
||||
// be used to restore the state of a network
|
||||
type Snapshot struct {
|
||||
Nodes []NodeSnapshot `json:"nodes,omitempty"`
|
||||
Conns []Conn `json:"conns,omitempty"`
|
||||
}
|
||||
|
||||
// NodeSnapshot represents the state of a node in the network
|
||||
type NodeSnapshot struct {
|
||||
Node Node `json:"node,omitempty"`
|
||||
|
||||
// Snapshots is arbitrary data gathered from calling node.Snapshots()
|
||||
Snapshots map[string][]byte `json:"snapshots,omitempty"`
|
||||
}
|
||||
|
||||
// Snapshot creates a network snapshot
|
||||
func (self *Network) Snapshot() (*Snapshot, error) {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
snap := &Snapshot{
|
||||
Nodes: make([]NodeSnapshot, len(self.Nodes)),
|
||||
Conns: make([]Conn, len(self.Conns)),
|
||||
}
|
||||
for i, node := range self.Nodes {
|
||||
snap.Nodes[i] = NodeSnapshot{Node: *node}
|
||||
if !node.Up {
|
||||
continue
|
||||
}
|
||||
snapshots, err := node.Snapshots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
snap.Nodes[i].Snapshots = snapshots
|
||||
}
|
||||
for i, conn := range self.Conns {
|
||||
snap.Conns[i] = *conn
|
||||
}
|
||||
return snap, nil
|
||||
}
|
||||
|
||||
// Load loads a network snapshot
|
||||
func (self *Network) Load(snap *Snapshot) error {
|
||||
for _, n := range snap.Nodes {
|
||||
if _, err := self.NewNodeWithConfig(n.Node.Config); err != nil {
|
||||
return err
|
||||
}
|
||||
if !n.Node.Up {
|
||||
continue
|
||||
}
|
||||
if err := self.startWithSnapshots(n.Node.Config.ID, n.Snapshots); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, conn := range snap.Conns {
|
||||
if err := self.Connect(conn.One, conn.Other); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Subscribe reads control events from a channel and executes them
|
||||
func (self *Network) Subscribe(events chan *Event) {
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-events:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if event.Control {
|
||||
self.executeControlEvent(event)
|
||||
}
|
||||
case <-self.quitc:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (self *Network) executeControlEvent(event *Event) {
|
||||
log.Trace("execute control event", "type", event.Type, "event", event)
|
||||
switch event.Type {
|
||||
case EventTypeNode:
|
||||
if err := self.executeNodeEvent(event); err != nil {
|
||||
log.Error("error executing node event", "event", event, "err", err)
|
||||
}
|
||||
case EventTypeConn:
|
||||
if err := self.executeConnEvent(event); err != nil {
|
||||
log.Error("error executing conn event", "event", event, "err", err)
|
||||
}
|
||||
case EventTypeMsg:
|
||||
log.Warn("ignoring control msg event")
|
||||
}
|
||||
}
|
||||
|
||||
func (self *Network) executeNodeEvent(e *Event) error {
|
||||
if !e.Node.Up {
|
||||
return self.Stop(e.Node.ID())
|
||||
}
|
||||
|
||||
if _, err := self.NewNodeWithConfig(e.Node.Config); err != nil {
|
||||
return err
|
||||
}
|
||||
return self.Start(e.Node.ID())
|
||||
}
|
||||
|
||||
func (self *Network) executeConnEvent(e *Event) error {
|
||||
if e.Conn.Up {
|
||||
return self.Connect(e.Conn.One, e.Conn.Other)
|
||||
} else {
|
||||
return self.Disconnect(e.Conn.One, e.Conn.Other)
|
||||
}
|
||||
}
|
157
vendor/github.com/ethereum/go-ethereum/p2p/simulations/simulation.go
generated
vendored
Normal file
157
vendor/github.com/ethereum/go-ethereum/p2p/simulations/simulation.go
generated
vendored
Normal file
|
@ -0,0 +1,157 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
)
|
||||
|
||||
// Simulation provides a framework for running actions in a simulated network
|
||||
// and then waiting for expectations to be met
|
||||
type Simulation struct {
|
||||
network *Network
|
||||
}
|
||||
|
||||
// NewSimulation returns a new simulation which runs in the given network
|
||||
func NewSimulation(network *Network) *Simulation {
|
||||
return &Simulation{
|
||||
network: network,
|
||||
}
|
||||
}
|
||||
|
||||
// Run performs a step of the simulation by performing the step's action and
|
||||
// then waiting for the step's expectation to be met
|
||||
func (s *Simulation) Run(ctx context.Context, step *Step) (result *StepResult) {
|
||||
result = newStepResult()
|
||||
|
||||
result.StartedAt = time.Now()
|
||||
defer func() { result.FinishedAt = time.Now() }()
|
||||
|
||||
// watch network events for the duration of the step
|
||||
stop := s.watchNetwork(result)
|
||||
defer stop()
|
||||
|
||||
// perform the action
|
||||
if err := step.Action(ctx); err != nil {
|
||||
result.Error = err
|
||||
return
|
||||
}
|
||||
|
||||
// wait for all node expectations to either pass, error or timeout
|
||||
nodes := make(map[discover.NodeID]struct{}, len(step.Expect.Nodes))
|
||||
for _, id := range step.Expect.Nodes {
|
||||
nodes[id] = struct{}{}
|
||||
}
|
||||
for len(result.Passes) < len(nodes) {
|
||||
select {
|
||||
case id := <-step.Trigger:
|
||||
// skip if we aren't checking the node
|
||||
if _, ok := nodes[id]; !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// skip if the node has already passed
|
||||
if _, ok := result.Passes[id]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// run the node expectation check
|
||||
pass, err := step.Expect.Check(ctx, id)
|
||||
if err != nil {
|
||||
result.Error = err
|
||||
return
|
||||
}
|
||||
if pass {
|
||||
result.Passes[id] = time.Now()
|
||||
}
|
||||
case <-ctx.Done():
|
||||
result.Error = ctx.Err()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Simulation) watchNetwork(result *StepResult) func() {
|
||||
stop := make(chan struct{})
|
||||
done := make(chan struct{})
|
||||
events := make(chan *Event)
|
||||
sub := s.network.Events().Subscribe(events)
|
||||
go func() {
|
||||
defer close(done)
|
||||
defer sub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case event := <-events:
|
||||
result.NetworkEvents = append(result.NetworkEvents, event)
|
||||
case <-stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return func() {
|
||||
close(stop)
|
||||
<-done
|
||||
}
|
||||
}
|
||||
|
||||
type Step struct {
|
||||
// Action is the action to perform for this step
|
||||
Action func(context.Context) error
|
||||
|
||||
// Trigger is a channel which receives node ids and triggers an
|
||||
// expectation check for that node
|
||||
Trigger chan discover.NodeID
|
||||
|
||||
// Expect is the expectation to wait for when performing this step
|
||||
Expect *Expectation
|
||||
}
|
||||
|
||||
type Expectation struct {
|
||||
// Nodes is a list of nodes to check
|
||||
Nodes []discover.NodeID
|
||||
|
||||
// Check checks whether a given node meets the expectation
|
||||
Check func(context.Context, discover.NodeID) (bool, error)
|
||||
}
|
||||
|
||||
func newStepResult() *StepResult {
|
||||
return &StepResult{
|
||||
Passes: make(map[discover.NodeID]time.Time),
|
||||
}
|
||||
}
|
||||
|
||||
type StepResult struct {
|
||||
// Error is the error encountered whilst running the step
|
||||
Error error
|
||||
|
||||
// StartedAt is the time the step started
|
||||
StartedAt time.Time
|
||||
|
||||
// FinishedAt is the time the step finished
|
||||
FinishedAt time.Time
|
||||
|
||||
// Passes are the timestamps of the successful node expectations
|
||||
Passes map[discover.NodeID]time.Time
|
||||
|
||||
// NetworkEvents are the network events which occurred during the step
|
||||
NetworkEvents []*Event
|
||||
}
|
|
@ -18,7 +18,6 @@ package params
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
|
@ -40,7 +39,7 @@ var (
|
|||
EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"),
|
||||
EIP155Block: big.NewInt(2675000),
|
||||
EIP158Block: big.NewInt(2675000),
|
||||
ByzantiumBlock: big.NewInt(math.MaxInt64), // Don't enable yet
|
||||
ByzantiumBlock: big.NewInt(4370000),
|
||||
|
||||
Ethash: new(EthashConfig),
|
||||
}
|
||||
|
@ -70,7 +69,7 @@ var (
|
|||
EIP150Hash: common.HexToHash("0x9b095b36c15eaf13044373aef8ee0bd3a382a5abb92e402afa44b8249c3a90e9"),
|
||||
EIP155Block: big.NewInt(3),
|
||||
EIP158Block: big.NewInt(3),
|
||||
ByzantiumBlock: big.NewInt(math.MaxInt64), // Don't enable yet
|
||||
ByzantiumBlock: big.NewInt(1035301),
|
||||
|
||||
Clique: &CliqueConfig{
|
||||
Period: 15,
|
||||
|
@ -100,17 +99,18 @@ type ChainConfig struct {
|
|||
ChainId *big.Int `json:"chainId"` // Chain id identifies the current chain and is used for replay protection
|
||||
|
||||
HomesteadBlock *big.Int `json:"homesteadBlock,omitempty"` // Homestead switch block (nil = no fork, 0 = already homestead)
|
||||
|
||||
DAOForkBlock *big.Int `json:"daoForkBlock,omitempty"` // TheDAO hard-fork switch block (nil = no fork)
|
||||
DAOForkSupport bool `json:"daoForkSupport,omitempty"` // Whether the nodes supports or opposes the DAO hard-fork
|
||||
|
||||
// EIP150 implements the Gas price changes (https://github.com/ethereum/EIPs/issues/150)
|
||||
EIP150Block *big.Int `json:"eip150Block,omitempty"` // EIP150 HF block (nil = no fork)
|
||||
EIP150Hash common.Hash `json:"eip150Hash,omitempty"` // EIP150 HF hash (fast sync aid)
|
||||
EIP150Hash common.Hash `json:"eip150Hash,omitempty"` // EIP150 HF hash (needed for header only clients as only gas pricing changed)
|
||||
|
||||
EIP155Block *big.Int `json:"eip155Block,omitempty"` // EIP155 HF block
|
||||
EIP158Block *big.Int `json:"eip158Block,omitempty"` // EIP158 HF block
|
||||
|
||||
ByzantiumBlock *big.Int `json:"byzantiumBlock,omitempty"` // Byzantium switch block (nil = no fork, 0 = alraedy on homestead)
|
||||
ByzantiumBlock *big.Int `json:"byzantiumBlock,omitempty"` // Byzantium switch block (nil = no fork, 0 = already on byzantium)
|
||||
|
||||
// Various consensus engines
|
||||
Ethash *EthashConfig `json:"ethash,omitempty"`
|
||||
|
@ -197,7 +197,7 @@ func (c *ChainConfig) GasTable(num *big.Int) GasTable {
|
|||
case c.IsEIP158(num):
|
||||
return GasTableEIP158
|
||||
case c.IsEIP150(num):
|
||||
return GasTableHomesteadGasRepriceFork
|
||||
return GasTableEIP150
|
||||
default:
|
||||
return GasTableHomestead
|
||||
}
|
||||
|
|
|
@ -49,9 +49,7 @@ var (
|
|||
|
||||
// GasTableHomestead contain the gas re-prices for
|
||||
// the homestead phase.
|
||||
//
|
||||
// TODO rename to GasTableEIP150
|
||||
GasTableHomesteadGasRepriceFork = GasTable{
|
||||
GasTableEIP150 = GasTable{
|
||||
ExtcodeSize: 700,
|
||||
ExtcodeCopy: 700,
|
||||
Balance: 400,
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
const (
|
||||
VersionMajor = 1 // Major version component of the current release
|
||||
VersionMinor = 7 // Minor version component of the current release
|
||||
VersionPatch = 0 // Patch version component of the current release
|
||||
VersionPatch = 2 // Patch version component of the current release
|
||||
VersionMeta = "stable" // Version metadata to append to the version string
|
||||
)
|
||||
|
||||
|
|
|
@ -349,85 +349,49 @@ func (c *Client) BatchCallContext(ctx context.Context, b []BatchElem) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// ShhSubscribe calls the "shh_subscribe" method with the given arguments,
|
||||
// registering a subscription. Server notifications for the subscription are
|
||||
// sent to the given channel. The element type of the channel must match the
|
||||
// expected type of content returned by the subscription.
|
||||
//
|
||||
// The context argument cancels the RPC request that sets up the subscription but has no
|
||||
// effect on the subscription after ShhSubscribe has returned.
|
||||
//
|
||||
// Slow subscribers will be dropped eventually. Client buffers up to 8000 notifications
|
||||
// before considering the subscriber dead. The subscription Err channel will receive
|
||||
// ErrSubscriptionQueueOverflow. Use a sufficiently large buffer on the channel or ensure
|
||||
// that the channel usually has at least one reader to prevent this issue.
|
||||
func (c *Client) ShhSubscribe(ctx context.Context, channel interface{}, args ...interface{}) (*ClientSubscription, error) {
|
||||
// Check type of channel first.
|
||||
chanVal := reflect.ValueOf(channel)
|
||||
if chanVal.Kind() != reflect.Chan || chanVal.Type().ChanDir()&reflect.SendDir == 0 {
|
||||
panic("first argument to ShhSubscribe must be a writable channel")
|
||||
}
|
||||
if chanVal.IsNil() {
|
||||
panic("channel given to ShhSubscribe must not be nil")
|
||||
}
|
||||
if c.isHTTP {
|
||||
return nil, ErrNotificationsUnsupported
|
||||
}
|
||||
|
||||
msg, err := c.newMessage("shh"+subscribeMethodSuffix, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
op := &requestOp{
|
||||
ids: []json.RawMessage{msg.ID},
|
||||
resp: make(chan *jsonrpcMessage),
|
||||
sub: newClientSubscription(c, "shh", chanVal),
|
||||
}
|
||||
|
||||
// Send the subscription request.
|
||||
// The arrival and validity of the response is signaled on sub.quit.
|
||||
if err := c.send(ctx, op, msg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := op.wait(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return op.sub, nil
|
||||
// EthSubscribe registers a subscripion under the "eth" namespace.
|
||||
func (c *Client) EthSubscribe(ctx context.Context, channel interface{}, args ...interface{}) (*ClientSubscription, error) {
|
||||
return c.Subscribe(ctx, "eth", channel, args...)
|
||||
}
|
||||
|
||||
// EthSubscribe calls the "eth_subscribe" method with the given arguments,
|
||||
// ShhSubscribe registers a subscripion under the "shh" namespace.
|
||||
func (c *Client) ShhSubscribe(ctx context.Context, channel interface{}, args ...interface{}) (*ClientSubscription, error) {
|
||||
return c.Subscribe(ctx, "shh", channel, args...)
|
||||
}
|
||||
|
||||
// Subscribe calls the "<namespace>_subscribe" method with the given arguments,
|
||||
// registering a subscription. Server notifications for the subscription are
|
||||
// sent to the given channel. The element type of the channel must match the
|
||||
// expected type of content returned by the subscription.
|
||||
//
|
||||
// The context argument cancels the RPC request that sets up the subscription but has no
|
||||
// effect on the subscription after EthSubscribe has returned.
|
||||
// effect on the subscription after Subscribe has returned.
|
||||
//
|
||||
// Slow subscribers will be dropped eventually. Client buffers up to 8000 notifications
|
||||
// before considering the subscriber dead. The subscription Err channel will receive
|
||||
// ErrSubscriptionQueueOverflow. Use a sufficiently large buffer on the channel or ensure
|
||||
// that the channel usually has at least one reader to prevent this issue.
|
||||
func (c *Client) EthSubscribe(ctx context.Context, channel interface{}, args ...interface{}) (*ClientSubscription, error) {
|
||||
func (c *Client) Subscribe(ctx context.Context, namespace string, channel interface{}, args ...interface{}) (*ClientSubscription, error) {
|
||||
// Check type of channel first.
|
||||
chanVal := reflect.ValueOf(channel)
|
||||
if chanVal.Kind() != reflect.Chan || chanVal.Type().ChanDir()&reflect.SendDir == 0 {
|
||||
panic("first argument to EthSubscribe must be a writable channel")
|
||||
panic("first argument to Subscribe must be a writable channel")
|
||||
}
|
||||
if chanVal.IsNil() {
|
||||
panic("channel given to EthSubscribe must not be nil")
|
||||
panic("channel given to Subscribe must not be nil")
|
||||
}
|
||||
if c.isHTTP {
|
||||
return nil, ErrNotificationsUnsupported
|
||||
}
|
||||
|
||||
msg, err := c.newMessage("eth"+subscribeMethodSuffix, args...)
|
||||
msg, err := c.newMessage(namespace+subscribeMethodSuffix, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
op := &requestOp{
|
||||
ids: []json.RawMessage{msg.ID},
|
||||
resp: make(chan *jsonrpcMessage),
|
||||
sub: newClientSubscription(c, "eth", chanVal),
|
||||
sub: newClientSubscription(c, namespace, chanVal),
|
||||
}
|
||||
|
||||
// Send the subscription request.
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue