Upgrade to geth 1.7.3 and add geth patches (#492)

This commit is contained in:
Ivan Daniluk 2018-01-15 21:26:41 +01:00 committed by GitHub
parent 953790c641
commit f93cd81d83
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
170 changed files with 10437 additions and 3163 deletions

View File

@ -14,7 +14,7 @@ It's written in Go and requires Go 1.8 or above.
It uses Makefile to do most common actions. See `make help` output for available commands. It uses Makefile to do most common actions. See `make help` output for available commands.
status-go uses [forked ethereum-go](https://github.com/status-im/go-ethereum) with [some changes](https://github.com/status-im/go-ethereum/wiki/Rebase-Geth-1.7.0) in it, located under [`vendor/` dir](https://github.com/status-im/status-go/tree/develop/vendor/github.com/ethereum/go-ethereum). status-go uses [forked ethereum-go](https://github.com/status-im/go-ethereum) with [some patches applied](https://github.com/status-im/status-go/geth-patches/) in it, located under [`vendor/` dir](https://github.com/status-im/status-go/tree/develop/vendor/github.com/ethereum/go-ethereum). See [geth-patches README](https://github.com/status-im/status-go/geth-patches/) for more info.
# Build # Build
There are two main modes status-go can be built: There are two main modes status-go can be built:

View File

@ -0,0 +1,352 @@
diff --git a/accounts/keystore/key.go b/accounts/keystore/key.go
index 211fa863d..65c83f3b0 100644
--- a/accounts/keystore/key.go
+++ b/accounts/keystore/key.go
@@ -33,6 +33,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/pborman/uuid"
+ "github.com/status-im/status-go/extkeys"
)
const (
@@ -46,6 +47,10 @@ type Key struct {
// we only store privkey as pubkey/address can be derived from it
// privkey in this struct is always in plaintext
PrivateKey *ecdsa.PrivateKey
+ // extended key is the root node for new hardened children i.e. sub-accounts
+ ExtendedKey *extkeys.ExtendedKey
+ // next index to be used for sub-account child derivation
+ SubAccountIndex uint32
}
type keyStore interface {
@@ -65,10 +70,12 @@ type plainKeyJSON struct {
}
type encryptedKeyJSONV3 struct {
- Address string `json:"address"`
- Crypto cryptoJSON `json:"crypto"`
- Id string `json:"id"`
- Version int `json:"version"`
+ Address string `json:"address"`
+ Crypto cryptoJSON `json:"crypto"`
+ Id string `json:"id"`
+ Version int `json:"version"`
+ ExtendedKey cryptoJSON `json:"extendedkey"`
+ SubAccountIndex uint32 `json:"subaccountindex"`
}
type encryptedKeyJSONV1 struct {
@@ -137,6 +144,40 @@ func newKeyFromECDSA(privateKeyECDSA *ecdsa.PrivateKey) *Key {
return key
}
+func newKeyFromExtendedKey(extKey *extkeys.ExtendedKey) (*Key, error) {
+ var (
+ extChild1, extChild2 *extkeys.ExtendedKey
+ err error
+ )
+
+ if extKey.Depth == 0 { // we are dealing with master key
+ // CKD#1 - main account
+ extChild1, err = extKey.BIP44Child(extkeys.CoinTypeETH, 0)
+ if err != nil {
+ return &Key{}, err
+ }
+
+ // CKD#2 - sub-accounts root
+ extChild2, err = extKey.BIP44Child(extkeys.CoinTypeETH, 1)
+ if err != nil {
+ return &Key{}, err
+ }
+ } else { // we are dealing with non-master key, so it is safe to persist and extend from it
+ extChild1 = extKey
+ extChild2 = extKey
+ }
+
+ privateKeyECDSA := extChild1.ToECDSA()
+ id := uuid.NewRandom()
+ key := &Key{
+ Id: id,
+ Address: crypto.PubkeyToAddress(privateKeyECDSA.PublicKey),
+ PrivateKey: privateKeyECDSA,
+ ExtendedKey: extChild2,
+ }
+ return key, nil
+}
+
// NewKeyForDirectICAP generates a key whose address fits into < 155 bits so it can fit
// into the Direct ICAP spec. for simplicity and easier compatibility with other libs, we
// retry until the first byte is 0.
diff --git a/accounts/keystore/keystore.go b/accounts/keystore/keystore.go
index 80ccd3741..750608145 100644
--- a/accounts/keystore/keystore.go
+++ b/accounts/keystore/keystore.go
@@ -38,6 +38,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/event"
+ "github.com/status-im/status-go/extkeys"
)
var (
@@ -228,6 +229,11 @@ func (ks *KeyStore) Accounts() []accounts.Account {
return ks.cache.accounts()
}
+// AccountDecryptedKey returns decrypted key for account (provided that password is correct).
+func (ks *KeyStore) AccountDecryptedKey(a accounts.Account, auth string) (accounts.Account, *Key, error) {
+ return ks.getDecryptedKey(a, auth)
+}
+
// Delete deletes the key matched by account if the passphrase is correct.
// If the account contains no filename, the address must match a unique key.
func (ks *KeyStore) Delete(a accounts.Account, passphrase string) error {
@@ -453,6 +459,34 @@ func (ks *KeyStore) ImportECDSA(priv *ecdsa.PrivateKey, passphrase string) (acco
return ks.importKey(key, passphrase)
}
+// ImportExtendedKey stores ECDSA key (obtained from extended key) along with CKD#2 (root for sub-accounts)
+// If key file is not found, it is created. Key is encrypted with the given passphrase.
+func (ks *KeyStore) ImportExtendedKey(extKey *extkeys.ExtendedKey, passphrase string) (accounts.Account, error) {
+ key, err := newKeyFromExtendedKey(extKey)
+ if err != nil {
+ zeroKey(key.PrivateKey)
+ return accounts.Account{}, err
+ }
+
+ // if account is already imported, return cached version
+ if ks.cache.hasAddress(key.Address) {
+ a := accounts.Account{
+ Address: key.Address,
+ }
+ ks.cache.maybeReload()
+ ks.cache.mu.Lock()
+ a, err := ks.cache.find(a)
+ ks.cache.mu.Unlock()
+ if err != nil {
+ zeroKey(key.PrivateKey)
+ return a, err
+ }
+ return a, nil
+ }
+
+ return ks.importKey(key, passphrase)
+}
+
func (ks *KeyStore) importKey(key *Key, passphrase string) (accounts.Account, error) {
a := accounts.Account{Address: key.Address, URL: accounts.URL{Scheme: KeyStoreScheme, Path: ks.storage.JoinPath(keyFileName(key.Address))}}
if err := ks.storage.StoreKey(a.URL.Path, key, passphrase); err != nil {
@@ -463,6 +497,15 @@ func (ks *KeyStore) importKey(key *Key, passphrase string) (accounts.Account, er
return a, nil
}
+func (ks *KeyStore) IncSubAccountIndex(a accounts.Account, passphrase string) error {
+ a, key, err := ks.getDecryptedKey(a, passphrase)
+ if err != nil {
+ return err
+ }
+ key.SubAccountIndex++
+ return ks.storage.StoreKey(a.URL.Path, key, passphrase)
+}
+
// Update changes the passphrase of an existing account.
func (ks *KeyStore) Update(a accounts.Account, passphrase, newPassphrase string) error {
a, key, err := ks.getDecryptedKey(a, passphrase)
@@ -486,6 +529,9 @@ func (ks *KeyStore) ImportPreSaleKey(keyJSON []byte, passphrase string) (account
// zeroKey zeroes a private key in memory.
func zeroKey(k *ecdsa.PrivateKey) {
+ if k == nil {
+ return
+ }
b := k.D.Bits()
for i := range b {
b[i] = 0
diff --git a/accounts/keystore/keystore_passphrase.go b/accounts/keystore/keystore_passphrase.go
index eaec39f7d..902b213e2 100644
--- a/accounts/keystore/keystore_passphrase.go
+++ b/accounts/keystore/keystore_passphrase.go
@@ -41,6 +41,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/randentropy"
"github.com/pborman/uuid"
+ "github.com/status-im/status-go/extkeys"
"golang.org/x/crypto/pbkdf2"
"golang.org/x/crypto/scrypt"
)
@@ -151,15 +152,62 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
KDFParams: scryptParamsJSON,
MAC: hex.EncodeToString(mac),
}
+ encryptedExtendedKey, err := EncryptExtendedKey(key.ExtendedKey, auth, scryptN, scryptP)
+ if err != nil {
+ return nil, err
+ }
encryptedKeyJSONV3 := encryptedKeyJSONV3{
hex.EncodeToString(key.Address[:]),
cryptoStruct,
key.Id.String(),
version,
+ encryptedExtendedKey,
+ key.SubAccountIndex,
}
return json.Marshal(encryptedKeyJSONV3)
}
+func EncryptExtendedKey(extKey *extkeys.ExtendedKey, auth string, scryptN, scryptP int) (cryptoJSON, error) {
+ if extKey == nil {
+ return cryptoJSON{}, nil
+ }
+ authArray := []byte(auth)
+ salt := randentropy.GetEntropyCSPRNG(32)
+ derivedKey, err := scrypt.Key(authArray, salt, scryptN, scryptR, scryptP, scryptDKLen)
+ if err != nil {
+ return cryptoJSON{}, err
+ }
+ encryptKey := derivedKey[:16]
+ keyBytes := []byte(extKey.String())
+
+ iv := randentropy.GetEntropyCSPRNG(aes.BlockSize) // 16
+ cipherText, err := aesCTRXOR(encryptKey, keyBytes, iv)
+ if err != nil {
+ return cryptoJSON{}, err
+ }
+ mac := crypto.Keccak256(derivedKey[16:32], cipherText)
+
+ scryptParamsJSON := make(map[string]interface{}, 5)
+ scryptParamsJSON["n"] = scryptN
+ scryptParamsJSON["r"] = scryptR
+ scryptParamsJSON["p"] = scryptP
+ scryptParamsJSON["dklen"] = scryptDKLen
+ scryptParamsJSON["salt"] = hex.EncodeToString(salt)
+
+ cipherParamsJSON := cipherparamsJSON{
+ IV: hex.EncodeToString(iv),
+ }
+
+ return cryptoJSON{
+ Cipher: "aes-128-ctr",
+ CipherText: hex.EncodeToString(cipherText),
+ CipherParams: cipherParamsJSON,
+ KDF: "scrypt",
+ KDFParams: scryptParamsJSON,
+ MAC: hex.EncodeToString(mac),
+ }, nil
+}
+
// DecryptKey decrypts a key from a json blob, returning the private key itself.
func DecryptKey(keyjson []byte, auth string) (*Key, error) {
// Parse the json into a simple map to fetch the key version
@@ -171,20 +219,43 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
var (
keyBytes, keyId []byte
err error
+ extKeyBytes []byte
+ extKey *extkeys.ExtendedKey
)
+
+ subAccountIndex, ok := m["subaccountindex"].(float64)
+ if !ok {
+ subAccountIndex = 0
+ }
+
if version, ok := m["version"].(string); ok && version == "1" {
k := new(encryptedKeyJSONV1)
if err := json.Unmarshal(keyjson, k); err != nil {
return nil, err
}
keyBytes, keyId, err = decryptKeyV1(k, auth)
+ if err != nil {
+ return nil, err
+ }
+
+ extKey, err = extkeys.NewKeyFromString(extkeys.EmptyExtendedKeyString)
} else {
k := new(encryptedKeyJSONV3)
if err := json.Unmarshal(keyjson, k); err != nil {
return nil, err
}
keyBytes, keyId, err = decryptKeyV3(k, auth)
+ if err != nil {
+ return nil, err
+ }
+
+ extKeyBytes, err = decryptExtendedKey(k, auth)
+ if err != nil {
+ return nil, err
+ }
+ extKey, err = extkeys.NewKeyFromString(string(extKeyBytes))
}
+
// Handle any decryption errors and return the key
if err != nil {
return nil, err
@@ -192,9 +263,11 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
key := crypto.ToECDSAUnsafe(keyBytes)
return &Key{
- Id: uuid.UUID(keyId),
- Address: crypto.PubkeyToAddress(key.PublicKey),
- PrivateKey: key,
+ Id: uuid.UUID(keyId),
+ Address: crypto.PubkeyToAddress(key.PublicKey),
+ PrivateKey: key,
+ ExtendedKey: extKey,
+ SubAccountIndex: uint32(subAccountIndex),
}, nil
}
@@ -274,6 +347,51 @@ func decryptKeyV1(keyProtected *encryptedKeyJSONV1, auth string) (keyBytes []byt
return plainText, keyId, err
}
+func decryptExtendedKey(keyProtected *encryptedKeyJSONV3, auth string) (plainText []byte, err error) {
+ if len(keyProtected.ExtendedKey.CipherText) == 0 {
+ return []byte(extkeys.EmptyExtendedKeyString), nil
+ }
+
+ if keyProtected.Version != version {
+ return nil, fmt.Errorf("Version not supported: %v", keyProtected.Version)
+ }
+
+ if keyProtected.ExtendedKey.Cipher != "aes-128-ctr" {
+ return nil, fmt.Errorf("Cipher not supported: %v", keyProtected.ExtendedKey.Cipher)
+ }
+
+ mac, err := hex.DecodeString(keyProtected.ExtendedKey.MAC)
+ if err != nil {
+ return nil, err
+ }
+
+ iv, err := hex.DecodeString(keyProtected.ExtendedKey.CipherParams.IV)
+ if err != nil {
+ return nil, err
+ }
+
+ cipherText, err := hex.DecodeString(keyProtected.ExtendedKey.CipherText)
+ if err != nil {
+ return nil, err
+ }
+
+ derivedKey, err := getKDFKey(keyProtected.ExtendedKey, auth)
+ if err != nil {
+ return nil, err
+ }
+
+ calculatedMAC := crypto.Keccak256(derivedKey[16:32], cipherText)
+ if !bytes.Equal(calculatedMAC, mac) {
+ return nil, ErrDecrypt
+ }
+
+ plainText, err = aesCTRXOR(derivedKey[:16], cipherText, iv)
+ if err != nil {
+ return nil, err
+ }
+ return plainText, err
+}
+
func getKDFKey(cryptoJSON cryptoJSON, auth string) ([]byte, error) {
authArray := []byte(auth)
salt, err := hex.DecodeString(cryptoJSON.KDFParams["salt"].(string))

View File

@ -0,0 +1,381 @@
diff --git a/eth/api_backend.go b/eth/api_backend.go
index 91f392f94..471275de5 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -32,14 +32,20 @@ import (
"github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
)
// EthApiBackend implements ethapi.Backend for full nodes
type EthApiBackend struct {
- eth *Ethereum
- gpo *gasprice.Oracle
+ eth *Ethereum
+ gpo *gasprice.Oracle
+ statusBackend *ethapi.StatusBackend
+}
+
+func (b *EthApiBackend) GetStatusBackend() *ethapi.StatusBackend {
+ return b.statusBackend
}
func (b *EthApiBackend) ChainConfig() *params.ChainConfig {
diff --git a/eth/backend.go b/eth/backend.go
index 1cd9e8fff..2fbdec4e2 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -169,7 +169,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
eth.miner = miner.New(eth, eth.chainConfig, eth.EventMux(), eth.engine)
eth.miner.SetExtra(makeExtraData(config.ExtraData))
- eth.ApiBackend = &EthApiBackend{eth, nil}
+ eth.ApiBackend = &EthApiBackend{eth, nil, nil}
gpoParams := config.GPO
if gpoParams.Default == nil {
gpoParams.Default = config.GasPrice
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index 025f42617..d8f48a890 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -178,15 +178,24 @@ func (s *PublicTxPoolAPI) Inspect() map[string]map[string]map[string]string {
// It offers only methods that can retrieve accounts.
type PublicAccountAPI struct {
am *accounts.Manager
+ b Backend
}
// NewPublicAccountAPI creates a new PublicAccountAPI.
-func NewPublicAccountAPI(am *accounts.Manager) *PublicAccountAPI {
- return &PublicAccountAPI{am: am}
+func NewPublicAccountAPI(b Backend) *PublicAccountAPI {
+ return &PublicAccountAPI{
+ am: b.AccountManager(),
+ b: b,
+ }
}
// Accounts returns the collection of accounts this node manages
func (s *PublicAccountAPI) Accounts() []common.Address {
+ backend := s.b.GetStatusBackend()
+ if backend != nil {
+ return backend.am.Accounts()
+ }
+
addresses := make([]common.Address, 0) // return [] instead of nil if empty
for _, wallet := range s.am.Wallets() {
for _, account := range wallet.Accounts() {
@@ -216,6 +225,11 @@ func NewPrivateAccountAPI(b Backend, nonceLock *AddrLocker) *PrivateAccountAPI {
// ListAccounts will return a list of addresses for accounts this node manages.
func (s *PrivateAccountAPI) ListAccounts() []common.Address {
+ backend := s.b.GetStatusBackend()
+ if backend != nil {
+ return backend.am.Accounts()
+ }
+
addresses := make([]common.Address, 0) // return [] instead of nil if empty
for _, wallet := range s.am.Wallets() {
for _, account := range wallet.Accounts() {
@@ -1122,10 +1136,46 @@ func submitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (c
return tx.Hash(), nil
}
-// SendTransaction creates a transaction for the given argument, sign it and submit it to the
+// SendTransactionWithPassphrase creates a transaction by unpacking queued transaction, signs it and submits to the
// transaction pool.
-func (s *PublicTransactionPoolAPI) SendTransaction(ctx context.Context, args SendTxArgs) (common.Hash, error) {
+// @Status
+func (s *PublicTransactionPoolAPI) SendTransactionWithPassphrase(ctx context.Context, args SendTxArgs, passphrase string) (common.Hash, error) {
+ // Look up the wallet containing the requested signer
+ account := accounts.Account{Address: args.From}
+
+ wallet, err := s.b.AccountManager().Find(account)
+ if err != nil {
+ return common.Hash{}, err
+ }
+
+ if args.Nonce == nil {
+ // Hold the addresse's mutex around signing to prevent concurrent assignment of
+ // the same nonce to multiple accounts.
+ s.nonceLock.LockAddr(args.From)
+ defer s.nonceLock.UnlockAddr(args.From)
+ }
+ // Set some sanity defaults and terminate on failure
+ if err := args.setDefaults(ctx, s.b); err != nil {
+ return common.Hash{}, err
+ }
+ // Assemble the transaction and sign with the wallet
+ tx := args.toTransaction()
+
+ var chainID *big.Int
+ if config := s.b.ChainConfig(); config.IsEIP155(s.b.CurrentBlock().Number()) {
+ chainID = config.ChainId
+ }
+ signed, err := wallet.SignTxWithPassphrase(account, passphrase, tx, chainID)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ return submitTransaction(ctx, s.b, signed)
+}
+
+// SendTransaction creates a transaction by unpacking queued transaction, signs it and submits to the
+// transaction pool.
+func (s *PublicTransactionPoolAPI) SendTransaction(ctx context.Context, args SendTxArgs) (common.Hash, error) {
// Look up the wallet containing the requested signer
account := accounts.Account{Address: args.From}
diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go
index 368fa4872..cac58dfc0 100644
--- a/internal/ethapi/backend.go
+++ b/internal/ethapi/backend.go
@@ -68,6 +68,8 @@ type Backend interface {
ChainConfig() *params.ChainConfig
CurrentBlock() *types.Block
+
+ GetStatusBackend() *StatusBackend
}
func GetAPIs(apiBackend Backend) []rpc.API {
@@ -105,7 +107,7 @@ func GetAPIs(apiBackend Backend) []rpc.API {
}, {
Namespace: "eth",
Version: "1.0",
- Service: NewPublicAccountAPI(apiBackend.AccountManager()),
+ Service: NewPublicAccountAPI(apiBackend),
Public: true,
}, {
Namespace: "personal",
diff --git a/internal/ethapi/status_backend.go b/internal/ethapi/status_backend.go
new file mode 100644
index 000000000..c4e553cae
--- /dev/null
+++ b/internal/ethapi/status_backend.go
@@ -0,0 +1,88 @@
+package ethapi
+
+import (
+ "context"
+ "errors"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/les/status"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// StatusBackend exposes Ethereum internals to support custom semantics in status-go bindings
+type StatusBackend struct {
+ eapi *PublicEthereumAPI // Wrapper around the Ethereum object to access metadata
+ bcapi *PublicBlockChainAPI // Wrapper around the blockchain to access chain data
+ txapi *PublicTransactionPoolAPI // Wrapper around the transaction pool to access transaction data
+
+ am *status.AccountManager
+}
+
+var (
+ ErrStatusBackendNotInited = errors.New("StatusIM backend is not properly inited")
+)
+
+// NewStatusBackend creates a new backend using an existing Ethereum object.
+func NewStatusBackend(apiBackend Backend) *StatusBackend {
+ log.Info("StatusIM: backend service inited")
+ return &StatusBackend{
+ eapi: NewPublicEthereumAPI(apiBackend),
+ bcapi: NewPublicBlockChainAPI(apiBackend),
+ txapi: NewPublicTransactionPoolAPI(apiBackend, new(AddrLocker)),
+ am: status.NewAccountManager(apiBackend.AccountManager()),
+ }
+}
+
+// SetAccountsFilterHandler sets a callback that is triggered when account list is requested
+func (b *StatusBackend) SetAccountsFilterHandler(fn status.AccountsFilterHandler) {
+ b.am.SetAccountsFilterHandler(fn)
+}
+
+// AccountManager returns reference to account manager
+func (b *StatusBackend) AccountManager() *status.AccountManager {
+ return b.am
+}
+
+// SendTransaction wraps call to PublicTransactionPoolAPI.SendTransactionWithPassphrase
+func (b *StatusBackend) SendTransaction(ctx context.Context, args status.SendTxArgs, passphrase string) (common.Hash, error) {
+ if ctx == nil {
+ ctx = context.Background()
+ }
+
+ if estimatedGas, err := b.EstimateGas(ctx, args); err == nil {
+ if estimatedGas.ToInt().Cmp(big.NewInt(defaultGas)) == 1 { // gas > defaultGas
+ args.Gas = estimatedGas
+ }
+ }
+
+ return b.txapi.SendTransactionWithPassphrase(ctx, SendTxArgs(args), passphrase)
+}
+
+// EstimateGas uses underlying blockchain API to obtain gas for a given tx arguments
+func (b *StatusBackend) EstimateGas(ctx context.Context, args status.SendTxArgs) (*hexutil.Big, error) {
+ if args.Gas != nil {
+ return args.Gas, nil
+ }
+
+ var gasPrice hexutil.Big
+ if args.GasPrice != nil {
+ gasPrice = *args.GasPrice
+ }
+
+ var value hexutil.Big
+ if args.Value != nil {
+ value = *args.Value
+ }
+
+ callArgs := CallArgs{
+ From: args.From,
+ To: args.To,
+ GasPrice: gasPrice,
+ Value: value,
+ Data: args.Data,
+ }
+
+ return b.bcapi.EstimateGas(ctx, callArgs)
+}
diff --git a/les/api_backend.go b/les/api_backend.go
index 56f617a7d..f839f24e6 100644
--- a/les/api_backend.go
+++ b/les/api_backend.go
@@ -32,14 +32,20 @@ import (
"github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
)
type LesApiBackend struct {
- eth *LightEthereum
- gpo *gasprice.Oracle
+ eth *LightEthereum
+ gpo *gasprice.Oracle
+ statusBackend *ethapi.StatusBackend
+}
+
+func (b *LesApiBackend) GetStatusBackend() *ethapi.StatusBackend {
+ return b.statusBackend
}
func (b *LesApiBackend) ChainConfig() *params.ChainConfig {
diff --git a/les/backend.go b/les/backend.go
index 333df920e..7d8cf3916 100644
--- a/les/backend.go
+++ b/les/backend.go
@@ -75,6 +75,8 @@ type LightEthereum struct {
netRPCService *ethapi.PublicNetAPI
wg sync.WaitGroup
+
+ StatusBackend *ethapi.StatusBackend
}
func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
@@ -126,12 +128,17 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
if leth.protocolManager, err = NewProtocolManager(leth.chainConfig, true, ClientProtocolVersions, config.NetworkId, leth.eventMux, leth.engine, leth.peers, leth.blockchain, nil, chainDb, leth.odr, leth.relay, quitSync, &leth.wg); err != nil {
return nil, err
}
- leth.ApiBackend = &LesApiBackend{leth, nil}
+ leth.ApiBackend = &LesApiBackend{leth, nil, nil}
gpoParams := config.GPO
if gpoParams.Default == nil {
gpoParams.Default = config.GasPrice
}
leth.ApiBackend.gpo = gasprice.NewOracle(leth.ApiBackend, gpoParams)
+
+ // inject status-im backend
+ leth.ApiBackend.statusBackend = ethapi.NewStatusBackend(leth.ApiBackend)
+ leth.StatusBackend = leth.ApiBackend.statusBackend // alias
+
return leth, nil
}
diff --git a/les/status/accounts.go b/les/status/accounts.go
new file mode 100644
index 000000000..78bd2ad92
--- /dev/null
+++ b/les/status/accounts.go
@@ -0,0 +1,45 @@
+package status
+
+import (
+ "github.com/ethereum/go-ethereum/accounts"
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// AccountManager abstracts both internal account manager and extra filter status backend requires
+type AccountManager struct {
+ am *accounts.Manager
+ accountsFilterHandler AccountsFilterHandler
+}
+
+// NewAccountManager creates a new AccountManager
+func NewAccountManager(am *accounts.Manager) *AccountManager {
+ return &AccountManager{
+ am: am,
+ }
+}
+
+// AccountsFilterHandler function to filter out accounts list
+type AccountsFilterHandler func([]common.Address) []common.Address
+
+// Accounts returns accounts' addresses of currently logged in user.
+// Since status supports HD keys, the following list is returned:
+// [addressCDK#1, addressCKD#2->Child1, addressCKD#2->Child2, .. addressCKD#2->ChildN]
+func (d *AccountManager) Accounts() []common.Address {
+ var addresses []common.Address
+ for _, wallet := range d.am.Wallets() {
+ for _, account := range wallet.Accounts() {
+ addresses = append(addresses, account.Address)
+ }
+ }
+
+ if d.accountsFilterHandler != nil {
+ return d.accountsFilterHandler(addresses)
+ }
+
+ return addresses
+}
+
+// SetAccountsFilterHandler sets filtering function for accounts list
+func (d *AccountManager) SetAccountsFilterHandler(fn AccountsFilterHandler) {
+ d.accountsFilterHandler = fn
+}
diff --git a/les/status/types.go b/les/status/types.go
new file mode 100644
index 000000000..04437bdb6
--- /dev/null
+++ b/les/status/types.go
@@ -0,0 +1,17 @@
+package status
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+)
+
+// SendTxArgs represents the arguments to submit a new transaction into the transaction pool.
+type SendTxArgs struct {
+ From common.Address `json:"from"`
+ To *common.Address `json:"to"`
+ Gas *hexutil.Big `json:"gas"`
+ GasPrice *hexutil.Big `json:"gasPrice"`
+ Value *hexutil.Big `json:"value"`
+ Data hexutil.Bytes `json:"data"`
+ Nonce *hexutil.Uint64 `json:"nonce"`
+}

View File

@ -0,0 +1,70 @@
diff --git a/containers/docker/status-alpine/geth/Dockerfile b/containers/docker/status-alpine/geth/Dockerfile
--- a/containers/docker/status-alpine/geth/Dockerfile 1970-01-01 01:00:00.000000000 +0100
+++ b/containers/docker/status-alpine/geth/Dockerfile 2017-09-19 17:03:51.000000000 +0200
@@ -0,0 +1,20 @@
+FROM alpine:3.5
+
+RUN \
+ apk add --update go git make gcc musl-dev linux-headers ca-certificates && \
+
+ # clone status-go
+ mkdir -p /usr/lib/go/src/github.com/status-im && \
+ git clone --depth 1 --branch 0.9.7 https://github.com/status-im/status-go.git /usr/lib/go/src/github.com/status-im/status-go && \
+
+ # clone go-ethereum (and install everything)
+ git clone --depth 1 --branch status/1.6.1-stable https://github.com/status-im/go-ethereum && \
+ (cd go-ethereum && make geth) && \
+ cp go-ethereum/build/bin/geth /geth && \
+ apk del go git make gcc musl-dev linux-headers && \
+ rm -rf /go-ethereum && rm -rf /var/cache/apk/*
+
+EXPOSE 8545
+EXPOSE 30303
+
+ENTRYPOINT ["/geth"]
diff --git a/containers/docker/status-alpine/swarm/Dockerfile b/containers/docker/status-alpine/swarm/Dockerfile
--- a/containers/docker/status-alpine/swarm/Dockerfile 1970-01-01 01:00:00.000000000 +0100
+++ b/containers/docker/status-alpine/swarm/Dockerfile 2017-09-19 17:03:51.000000000 +0200
@@ -0,0 +1,19 @@
+FROM alpine:3.5
+
+RUN \
+ apk add --update go git make gcc musl-dev linux-headers ca-certificates && \
+
+ # clone status-go
+ mkdir -p /usr/lib/go/src/github.com/status-im && \
+ git clone --depth 1 --branch develop https://github.com/status-im/status-go.git /usr/lib/go/src/github.com/status-im/status-go && \
+
+ # clone go-ethereum (and install everything)
+ git clone --depth 1 --branch status/1.6.1-stable https://github.com/status-im/go-ethereum && \
+ (cd go-ethereum && build/env.sh go run build/ci.go install ./cmd/swarm) && \
+ cp go-ethereum/build/bin/swarm /swarm && \
+ apk del go git make gcc musl-dev linux-headers && \
+ rm -rf /go-ethereum && rm -rf /var/cache/apk/*
+
+EXPOSE 30399
+
+ENTRYPOINT ["/swarm"]
diff --git a/containers/docker/status-alpine/wnode/Dockerfile b/containers/docker/status-alpine/wnode/Dockerfile
--- a/containers/docker/status-alpine/wnode/Dockerfile 1970-01-01 01:00:00.000000000 +0100
+++ b/containers/docker/status-alpine/wnode/Dockerfile 2017-09-19 17:03:51.000000000 +0200
@@ -0,0 +1,19 @@
+FROM alpine:3.5
+
+RUN \
+ apk add --update go git make gcc musl-dev linux-headers ca-certificates && \
+
+ # clone status-go
+ mkdir -p /usr/lib/go/src/github.com/status-im && \
+ git clone --depth 1 --branch develop https://github.com/status-im/status-go.git /usr/lib/go/src/github.com/status-im/status-go && \
+
+ # clone go-ethereum (and install everything)
+ git clone --depth 1 --branch status/1.6.1-stable https://github.com/status-im/go-ethereum && \
+ (cd go-ethereum && build/env.sh go run build/ci.go install ./cmd/wnode) && \
+ cp go-ethereum/build/bin/wnode /wnode && \
+ apk del go git make gcc musl-dev linux-headers && \
+ rm -rf /go-ethereum && rm -rf /var/cache/apk/*
+
+EXPOSE 30379
+
+ENTRYPOINT ["/wnode"]

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,405 @@
diff --git a/common/message/message.go b/common/message/message.go
new file mode 100644
index 00000000..c9f8965c
--- /dev/null
+++ b/common/message/message.go
@@ -0,0 +1,65 @@
+package message
+
+// Direction defines a int type to indicate a message as either incoming or outgoing.
+type Direction int
+
+// consts of all message direction values.
+const (
+ IncomingMessage Direction = iota + 1
+ OutgoingMessage
+)
+
+// String returns the representation of giving direction.
+func (d Direction) String() string {
+ switch d {
+ case IncomingMessage:
+ return "IncomingMessage"
+ case OutgoingMessage:
+ return "OutgoingMessage"
+ }
+
+ return "MessageDirectionUnknown"
+}
+
+// Status defines a int type to indicate different status value of a
+// message state.
+type Status int
+
+// consts of all message delivery status.
+const (
+ PendingStatus Status = iota + 1
+ QueuedStatus
+ CachedStatus
+ SentStatus
+ ExpiredStatus
+ ProcessingStatus
+ ResentStatus
+ RejectedStatus
+ DeliveredStatus
+)
+
+// String returns the representation of giving state.
+func (s Status) String() string {
+ switch s {
+ case PendingStatus:
+ return "Pending"
+ case QueuedStatus:
+ return "Queued"
+ case CachedStatus:
+ return "Cached"
+ case SentStatus:
+ return "Sent"
+ case ProcessingStatus:
+ return "Processing"
+ case ExpiredStatus:
+ return "ExpiredTTL"
+ case ResentStatus:
+ return "Resent"
+ case RejectedStatus:
+ return "Rejected"
+ case DeliveredStatus:
+ return "Delivered"
+ }
+
+ return "unknown"
+}
diff --git a/whisper/whisperv5/api.go b/whisper/whisperv5/api.go
index e3c2f4a9..75ef8b66 100644
--- a/whisper/whisperv5/api.go
+++ b/whisper/whisperv5/api.go
@@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/common/message"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/discover"
@@ -238,13 +239,17 @@ type newMessageOverride struct {
// Post a message on the Whisper network.
func (api *PublicWhisperAPI) Post(ctx context.Context, req NewMessage) (bool, error) {
var (
- symKeyGiven = len(req.SymKeyID) > 0
- pubKeyGiven = len(req.PublicKey) > 0
- err error
+ symKeyGiven = len(req.SymKeyID) > 0
+ pubKeyGiven = len(req.PublicKey) > 0
+ isP2PMessage = len(req.TargetPeer) > 0
+ err error
)
+ api.w.traceOutgoingDelivery(isP2PMessage, message.PendingStatus, &req, nil, nil, nil)
+
// user must specify either a symmetric or an asymmetric key
if (symKeyGiven && pubKeyGiven) || (!symKeyGiven && !pubKeyGiven) {
+ api.w.traceOutgoingDelivery(isP2PMessage, message.RejectedStatus, &req, nil, nil, ErrSymAsym)
return false, ErrSymAsym
}
@@ -260,6 +265,7 @@ func (api *PublicWhisperAPI) Post(ctx context.Context, req NewMessage) (bool, er
// Set key that is used to sign the message
if len(req.Sig) > 0 {
if params.Src, err = api.w.GetPrivateKey(req.Sig); err != nil {
+ api.w.traceOutgoingDelivery(isP2PMessage, message.RejectedStatus, &req, nil, nil, err)
return false, err
}
}
@@ -267,12 +273,15 @@ func (api *PublicWhisperAPI) Post(ctx context.Context, req NewMessage) (bool, er
// Set symmetric key that is used to encrypt the message
if symKeyGiven {
if params.Topic == (TopicType{}) { // topics are mandatory with symmetric encryption
+ api.w.traceOutgoingDelivery(isP2PMessage, message.RejectedStatus, &req, nil, nil, ErrNoTopics)
return false, ErrNoTopics
}
if params.KeySym, err = api.w.GetSymKey(req.SymKeyID); err != nil {
+ api.w.traceOutgoingDelivery(isP2PMessage, message.RejectedStatus, &req, nil, nil, err)
return false, err
}
if !validateSymmetricKey(params.KeySym) {
+ api.w.traceOutgoingDelivery(isP2PMessage, message.RejectedStatus, &req, nil, nil, ErrInvalidSymmetricKey)
return false, ErrInvalidSymmetricKey
}
}
@@ -281,6 +290,7 @@ func (api *PublicWhisperAPI) Post(ctx context.Context, req NewMessage) (bool, er
if pubKeyGiven {
params.Dst = crypto.ToECDSAPub(req.PublicKey)
if !ValidatePublicKey(params.Dst) {
+ api.w.traceOutgoingDelivery(isP2PMessage, message.RejectedStatus, &req, nil, nil, ErrInvalidPublicKey)
return false, ErrInvalidPublicKey
}
}
@@ -288,11 +298,13 @@ func (api *PublicWhisperAPI) Post(ctx context.Context, req NewMessage) (bool, er
// encrypt and sent message
whisperMsg, err := NewSentMessage(params)
if err != nil {
+ api.w.traceOutgoingDelivery(isP2PMessage, message.RejectedStatus, &req, nil, nil, err)
return false, err
}
env, err := whisperMsg.Wrap(params)
if err != nil {
+ api.w.traceOutgoingDelivery(isP2PMessage, message.RejectedStatus, &req, nil, nil, err)
return false, err
}
@@ -300,16 +312,28 @@ func (api *PublicWhisperAPI) Post(ctx context.Context, req NewMessage) (bool, er
if len(req.TargetPeer) > 0 {
n, err := discover.ParseNode(req.TargetPeer)
if err != nil {
+ api.w.traceOutgoingDelivery(isP2PMessage, message.RejectedStatus, &req, env, nil, err)
return false, fmt.Errorf("failed to parse target peer: %s", err)
}
- return true, api.w.SendP2PMessage(n.ID[:], env)
+
+ api.w.traceOutgoingDelivery(isP2PMessage, message.SentStatus, &req, env, nil, nil)
+
+ if err := api.w.SendP2PMessage(n.ID[:], env); err != nil {
+ api.w.traceOutgoingDelivery(isP2PMessage, message.RejectedStatus, &req, env, nil, err)
+ return true, err
+ }
+
+ api.w.traceOutgoingDelivery(isP2PMessage, message.DeliveredStatus, &req, env, nil, err)
+ return true, nil
}
// ensure that the message PoW meets the node's minimum accepted PoW
if req.PowTarget < api.w.MinPow() {
+ api.w.traceOutgoingDelivery(isP2PMessage, message.RejectedStatus, &req, env, nil, ErrTooLowPoW)
return false, ErrTooLowPoW
}
+ api.w.traceOutgoingDelivery(isP2PMessage, message.SentStatus, &req, env, nil, nil)
return true, api.w.Send(env)
}
diff --git a/whisper/whisperv5/doc.go b/whisper/whisperv5/doc.go
index a6c9e610..b3bc9963 100644
--- a/whisper/whisperv5/doc.go
+++ b/whisper/whisperv5/doc.go
@@ -33,6 +33,7 @@ import (
"fmt"
"time"
+ "github.com/ethereum/go-ethereum/common/message"
"github.com/ethereum/go-ethereum/p2p"
)
@@ -99,3 +100,23 @@ type NotificationServer interface {
// Stop stops notification sending loop, releasing related resources
Stop() error
}
+
+// MessageState holds the current delivery status of a whisper p2p message.
+type MessageState struct {
+ IsP2P bool `json:"is_p2p"`
+ Reason error `json:"reason"`
+ Envelope Envelope `json:"envelope"`
+ Timestamp time.Time `json:"timestamp"`
+ Source NewMessage `json:"source"`
+ Status message.Status `json:"status"`
+ Direction message.Direction `json:"direction"`
+ Received ReceivedMessage `json:"received"`
+}
+
+// DeliveryServer represents a small message status
+// notification system where a message delivery status
+// update event is delivered to it's underline system
+// for both rpc messages and p2p messages.
+type DeliveryServer interface {
+ SendState(MessageState)
+}
diff --git a/whisper/whisperv5/filter.go b/whisper/whisperv5/filter.go
index b5e893e0..71dc3b2d 100644
--- a/whisper/whisperv5/filter.go
+++ b/whisper/whisperv5/filter.go
@@ -18,10 +18,12 @@ package whisperv5
import (
"crypto/ecdsa"
+ "errors"
"fmt"
"sync"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/message"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
)
@@ -115,15 +117,20 @@ func (fs *Filters) NotifyWatchers(env *Envelope, p2pMessage bool) {
if match {
msg = env.Open(watcher)
if msg == nil {
+ err := errors.New("Envelope failed to be opened")
+ fs.whisper.traceIncomingDelivery(p2pMessage, message.RejectedStatus, nil, env, nil, err)
log.Trace("processing message: failed to open", "message", env.Hash().Hex(), "filter", i)
}
} else {
+ err := errors.New("processing message: does not match")
+ fs.whisper.traceIncomingDelivery(p2pMessage, message.RejectedStatus, nil, env, nil, err)
log.Trace("processing message: does not match", "message", env.Hash().Hex(), "filter", i)
}
}
if match && msg != nil {
log.Trace("processing message: decrypted", "hash", env.Hash().Hex())
+ fs.whisper.traceIncomingDelivery(p2pMessage, message.DeliveredStatus, nil, env, msg, nil)
if watcher.Src == nil || IsPubKeyEqual(msg.Src, watcher.Src) {
watcher.Trigger(msg)
}
diff --git a/whisper/whisperv5/whisper.go b/whisper/whisperv5/whisper.go
index d1ef2445..58d0fd6e 100644
--- a/whisper/whisperv5/whisper.go
+++ b/whisper/whisperv5/whisper.go
@@ -27,6 +27,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/message"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p"
@@ -77,7 +78,8 @@ type Whisper struct {
statsMu sync.Mutex // guard stats
stats Statistics // Statistics of whisper node
- mailServer MailServer // MailServer interface
+ mailServer MailServer // MailServer interface
+ deliveryServer DeliveryServer // DeliveryServer interface
notificationServer NotificationServer
}
@@ -157,6 +159,11 @@ func (w *Whisper) RegisterServer(server MailServer) {
w.mailServer = server
}
+// RegisterDeliveryServer registers notification server with Whisper
+func (w *Whisper) RegisterDeliveryServer(server DeliveryServer) {
+ w.deliveryServer = server
+}
+
// RegisterNotificationServer registers notification server with Whisper
func (w *Whisper) RegisterNotificationServer(server NotificationServer) {
w.notificationServer = server
@@ -620,8 +627,11 @@ func (wh *Whisper) runMessageLoop(p *Peer, rw p2p.MsgReadWriter) error {
var envelope Envelope
if err := packet.Decode(&envelope); err != nil {
log.Warn("failed to decode direct message, peer will be disconnected", "peer", p.peer.ID(), "err", err)
+ wh.traceIncomingDelivery(true, message.RejectedStatus, nil, &envelope, nil, err)
return errors.New("invalid direct message")
}
+
+ wh.traceIncomingDelivery(true, message.SentStatus, nil, &envelope, nil, nil)
wh.postEvent(&envelope, true)
}
case p2pRequestCode:
@@ -630,6 +640,7 @@ func (wh *Whisper) runMessageLoop(p *Peer, rw p2p.MsgReadWriter) error {
var request Envelope
if err := packet.Decode(&request); err != nil {
log.Warn("failed to decode p2p request message, peer will be disconnected", "peer", p.peer.ID(), "err", err)
+ wh.traceIncomingDelivery(true, message.RejectedStatus, nil, &request, nil, err)
return errors.New("invalid p2p request")
}
wh.mailServer.DeliverMail(p, &request)
@@ -700,16 +711,22 @@ func (wh *Whisper) add(envelope *Envelope) (bool, error) {
if !wh.expirations[envelope.Expiry].Has(hash) {
wh.expirations[envelope.Expiry].Add(hash)
}
+
+ wh.traceIncomingDelivery(false, message.CachedStatus, nil, envelope, nil, nil)
}
wh.poolMu.Unlock()
if alreadyCached {
log.Trace("whisper envelope already cached", "hash", envelope.Hash().Hex())
+ wh.traceIncomingDelivery(false, message.ResentStatus, nil, envelope, nil, nil)
} else {
log.Trace("cached whisper envelope", "hash", envelope.Hash().Hex())
wh.statsMu.Lock()
wh.stats.memoryUsed += envelope.size()
wh.statsMu.Unlock()
+
+ wh.traceIncomingDelivery(false, message.QueuedStatus, nil, envelope, nil, nil)
+
wh.postEvent(envelope, false) // notify the local node about the new message
if wh.mailServer != nil {
wh.mailServer.Archive(envelope)
@@ -718,6 +735,47 @@ func (wh *Whisper) add(envelope *Envelope) (bool, error) {
return true, nil
}
+func (w *Whisper) traceIncomingDelivery(isP2P bool, status message.Status, src *NewMessage, env *Envelope, rec *ReceivedMessage, err error) {
+ w.traceDelivery(isP2P, message.IncomingMessage, status, src, env, rec, err)
+}
+
+func (w *Whisper) traceOutgoingDelivery(isP2P bool, status message.Status, src *NewMessage, env *Envelope, rec *ReceivedMessage, err error) {
+ w.traceDelivery(isP2P, message.OutgoingMessage, status, src, env, rec, err)
+}
+
+func (w *Whisper) traceDelivery(isP2P bool, dir message.Direction, status message.Status, newmsg *NewMessage, envelope *Envelope, received *ReceivedMessage, err error) {
+ if w.deliveryServer == nil {
+ return
+ }
+
+ var env Envelope
+ var rec ReceivedMessage
+ var src NewMessage
+
+ if newmsg != nil {
+ src = *newmsg
+ }
+
+ if envelope != nil {
+ env = *envelope
+ }
+
+ if received != nil {
+ rec = *received
+ }
+
+ go w.deliveryServer.SendState(MessageState{
+ Reason: err,
+ Source: src,
+ Received: rec,
+ IsP2P: isP2P,
+ Status: status,
+ Envelope: env,
+ Direction: dir,
+ Timestamp: time.Now(),
+ })
+}
+
// postEvent queues the message for further processing.
func (w *Whisper) postEvent(envelope *Envelope, isP2P bool) {
// if the version of incoming message is higher than
@@ -730,6 +788,13 @@ func (w *Whisper) postEvent(envelope *Envelope, isP2P bool) {
w.checkOverflow()
w.messageQueue <- envelope
}
+
+ return
+ }
+
+ if w.deliveryServer != nil {
+ err := fmt.Errorf("Mismatch Envelope version(%d) to wanted Version(%d)", envelope.Ver(), EnvelopeVersion)
+ w.traceIncomingDelivery(isP2P, message.RejectedStatus, nil, envelope, nil, err)
}
}
@@ -759,9 +824,11 @@ func (w *Whisper) processQueue() {
return
case e = <-w.messageQueue:
+ w.traceIncomingDelivery(false, message.ProcessingStatus, nil, e, nil, nil)
w.filters.NotifyWatchers(e, false)
case e = <-w.p2pMsgQueue:
+ w.traceIncomingDelivery(true, message.ProcessingStatus, nil, e, nil, nil)
w.filters.NotifyWatchers(e, true)
}
}

View File

@ -0,0 +1,26 @@
diff --git a/light/postprocess.go b/light/postprocess.go
index e7e51388..dc6562be 100644
--- a/light/postprocess.go
+++ b/light/postprocess.go
@@ -66,12 +66,20 @@ var (
chtRoot: common.HexToHash("6f56dc61936752cc1f8c84b4addabdbe6a1c19693de3f21cb818362df2117f03"),
bloomTrieRoot: common.HexToHash("aca7d7c504d22737242effc3fdc604a762a0af9ced898036b5986c3a15220208"),
}
+
+ statusRopstenCheckpoint = trustedCheckpoint{
+ name: "Ropsten testnet",
+ sectionIdx: 67,
+ sectionHead: common.HexToHash("9832cf2ce760d4e3a7922fbfedeaa5dce67f1772e0f729f67c806bfafdedc370"),
+ chtRoot: common.HexToHash("60d43984a1d55e93f4296f4b48bf5af350476fe48679a73263bd57d8a324c9d4"),
+ bloomTrieRoot: common.HexToHash("fd81543dc619f6d1148e766b942c90296343c2cd0fd464946678f27f35feb59b"),
+ }
)
// trustedCheckpoints associates each known checkpoint with the genesis hash of the chain it belongs to
var trustedCheckpoints = map[common.Hash]trustedCheckpoint{
params.MainnetGenesisHash: mainnetCheckpoint,
- params.TestnetGenesisHash: ropstenCheckpoint,
+ params.TestnetGenesisHash: statusRopstenCheckpoint,
}
var (

View File

@ -0,0 +1,13 @@
diff --git a/README.md b/README.md
index 61e36afec..5f1423276 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,8 @@
+# Go Ethereum (Status fork)
+
+This is a forked version of the official `go-ethereum` repository. For detailed information on patches applied, see [https://github.com/status-im/status-go/geth-patches/](https://github.com/status-im/status-go/geth-patches/).
+
+# Original README
## Go Ethereum
Official golang implementation of the Ethereum protocol.

84
geth-patches/README.md Normal file
View File

@ -0,0 +1,84 @@
# Status Patches to for geth (go-ethereum)
---
Status-go uses [go-ethereum](https://github.com/ethereum/go-ethereum) (**upstream**) as its dependency. As any other Go dependency `go-ethereum` code is vendored and stored in `vendor/` folder.
However, there are a few changes has been made to the upstream, that are specific to Status and should not be merged to the upstream. We keep those changes as a set of patches, that can be applied upon each next release of `go-ethereum`. Patched version of `go-ethereum` is available in the [status-im/go-ethereum](https://github.com/status/go-ethereum) repo.
We try to minimize number and amount of changes in those patches as much as possible, and whereas possible, to contribute changes into the upstream.
# Patches
- `0000-accounts-hd-keys.patch` — adds support for HD extended keys (links/docs?)
- `0002-les-api-status.patch` — adds StatusBackend into LES code (need to be inspected, some things can and should be done outside of les code)
- `0003-dockerfiles-wnode-swarm.patch` — adds Dockerfiles (who uses this?)
- `0004-whisper-notifications.patch` — adds Whisper notifications (need to be reviewed and documented)
- `0005-whisper-delivery.patch` - adds support for logs/traces of Whisper traffic (questionable, nobody used this functionality so far)
- `0006-latest-cht.patch` updates CHT root hashes, should be updated regularly to keep sync fast, until proper Trusted Checkpoint sync is not implemented as part of LES/2 protocol.
- `0007-README.patch` — update upstream README.md.
# Updating upstream version
When a new stable release of `go-ethereum` comes out, we need to upgrade our fork and vendored copy.
**Note: The process is completely repeatable, so it's safe to remove current `go-ethereum` directory, clone latest upstream version and apply patches from scratch.**
## How to update forked version
Make sure you have `status-go` in your `$GOPATH/src/github.com/status-im/` first.
### From very scratch
Use this method if you're up to nuke forked repo for some reason and apply patches from scratch:
```
# from scratch
rm -rf $GOPATH/src/github.com/status-im/go-ethereum
cd $GOPATH/src/github.com/status-im/
git clone https://github.com/ethereum/go-ethereum
# update remote url to point to our fork repo
git remote set-url origin git@github.com:status-im/go-ethereum.git
```
### Using existing fork repo (recommended)
```
# merge upstream release branch into local master
git pull git@github.com:ethereum/go-ethereum.git release/1.7:master
```
### Apply patches
```
for patch in $GOPATH/src/github.com/status-im/status-go/geth-patches/*.patch;
do
patch -p1 < $patch;
done
```
Once patches applied, you might want to inspect changes between current vendored version and newly patched version by this command:
```
diff -Nru -x "*_test.go" -x "vendor" -x ".git" -x "tests" -x "build" --brief $GOPATH/src/github.com/status-im/go-ethereum $GOPATH/src/github.com/status-im/status-go/vendor/github.com/ethereum/go-ethereum
```
# Vendor patched version
## Using `dep` tool
TBD
## Manually
This method should be used only while `dep` tool workflow is not set up.
```
# remove existing version from vendor
rm -rf $GOPATH/src/github.com/status-im/vendor/github.com/ethereum/go-ethereum/
# copy whole directory
cp -a $GOPATH/src/github.com/status-im/go-ethereum $GOPATH/src/github.com/status-im/status-go/vendor/github.com/ethereum/
# remove unneeded folders
cd $GOPATH/src/github.com/status-im/status-go/vendor/github.com/ethereum/go-ethereum
rm -rf .git tests build vendor
# remove _test.go files
find . -type f -name "*_test.go" -exec rm '{}' ';'
```

View File

@ -9,13 +9,11 @@ import (
"path/filepath" "path/filepath"
"strings" "strings"
gethcommon "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/les" "github.com/ethereum/go-ethereum/les"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
@ -127,28 +125,6 @@ func defaultEmbeddedNodeConfig(config *params.NodeConfig) *node.Config {
return nc return nc
} }
// updateCHT changes trusted canonical hash trie root
func updateCHT(eth *les.LightEthereum, config *params.NodeConfig) {
if !config.BootClusterConfig.Enabled {
return
}
if config.BootClusterConfig.RootNumber == 0 {
return
}
if config.BootClusterConfig.RootHash == "" {
return
}
eth.WriteTrustedCht(light.TrustedCht{
Number: uint64(config.BootClusterConfig.RootNumber),
Root: gethcommon.HexToHash(config.BootClusterConfig.RootHash),
})
log.Info("Added trusted CHT",
"develop", config.DevMode, "number", config.BootClusterConfig.RootNumber, "hash", config.BootClusterConfig.RootHash)
}
// activateEthService configures and registers the eth.Ethereum service with a given node. // activateEthService configures and registers the eth.Ethereum service with a given node.
func activateEthService(stack *node.Node, config *params.NodeConfig) error { func activateEthService(stack *node.Node, config *params.NodeConfig) error {
if !config.LightEthConfig.Enabled { if !config.LightEthConfig.Enabled {
@ -171,12 +147,7 @@ func activateEthService(stack *node.Node, config *params.NodeConfig) error {
ethConf.DatabaseCache = config.LightEthConfig.DatabaseCache ethConf.DatabaseCache = config.LightEthConfig.DatabaseCache
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
lightEth, err := les.New(ctx, &ethConf) return les.New(ctx, &ethConf)
if err == nil {
updateCHT(lightEth, config)
}
return lightEth, err
}); err != nil { }); err != nil {
return fmt.Errorf("%v: %v", ErrLightEthRegistrationFailure, err) return fmt.Errorf("%v: %v", ErrLightEthRegistrationFailure, err)
} }

View File

@ -183,12 +183,6 @@ type BootClusterConfig struct {
// Enabled flag specifies whether feature is enabled // Enabled flag specifies whether feature is enabled
Enabled bool Enabled bool
// RootNumber CHT root number
RootNumber int
// RootHash is hash of CHT root for a given root number
RootHash string
// BootNodes list of bootstrap nodes for a given network (Ropsten, Rinkeby, Homestead), // BootNodes list of bootstrap nodes for a given network (Ropsten, Rinkeby, Homestead),
// for a given mode (production vs development) // for a given mode (production vs development)
BootNodes []string BootNodes []string
@ -588,12 +582,8 @@ func (c *NodeConfig) updateBootClusterConfig() error {
for _, cluster := range clusters { for _, cluster := range clusters {
if cluster.NetworkID == int(c.NetworkID) { if cluster.NetworkID == int(c.NetworkID) {
c.BootClusterConfig.RootNumber = cluster.Prod.Number
c.BootClusterConfig.RootHash = cluster.Prod.Hash
c.BootClusterConfig.BootNodes = cluster.Prod.BootNodes c.BootClusterConfig.BootNodes = cluster.Prod.BootNodes
if c.DevMode { if c.DevMode {
c.BootClusterConfig.RootNumber = cluster.Dev.Number
c.BootClusterConfig.RootHash = cluster.Dev.Hash
c.BootClusterConfig.BootNodes = cluster.Dev.BootNodes c.BootClusterConfig.BootNodes = cluster.Dev.BootNodes
} }
break break

View File

@ -225,8 +225,6 @@ var loadConfigTestCases = []struct {
require.NoError(t, err) require.NoError(t, err)
require.True(t, nodeConfig.BootClusterConfig.Enabled, "boot cluster is expected to be enabled by default") require.True(t, nodeConfig.BootClusterConfig.Enabled, "boot cluster is expected to be enabled by default")
require.Equal(t, "77eedcf6f940940b3615da49109c1ba57b95c3fff8bcf16f20ac579c3ae24e58", nodeConfig.BootClusterConfig.RootHash)
require.Equal(t, 478, nodeConfig.BootClusterConfig.RootNumber)
enodes := nodeConfig.BootClusterConfig.BootNodes enodes := nodeConfig.BootClusterConfig.BootNodes
expectedEnodes := []string{ expectedEnodes := []string{
@ -262,8 +260,6 @@ var loadConfigTestCases = []struct {
func(t *testing.T, dataDir string, nodeConfig *params.NodeConfig, err error) { func(t *testing.T, dataDir string, nodeConfig *params.NodeConfig, err error) {
require.NoError(t, err) require.NoError(t, err)
require.False(t, nodeConfig.BootClusterConfig.Enabled, "boot cluster is expected to be disabled") require.False(t, nodeConfig.BootClusterConfig.Enabled, "boot cluster is expected to be disabled")
require.Empty(t, nodeConfig.BootClusterConfig.RootHash)
require.Empty(t, nodeConfig.BootClusterConfig.RootNumber)
}, },
}, },
{ {
@ -276,8 +272,6 @@ var loadConfigTestCases = []struct {
func(t *testing.T, dataDir string, nodeConfig *params.NodeConfig, err error) { func(t *testing.T, dataDir string, nodeConfig *params.NodeConfig, err error) {
require.NoError(t, err) require.NoError(t, err)
require.True(t, nodeConfig.BootClusterConfig.Enabled, "boot cluster is expected to be enabled by default") require.True(t, nodeConfig.BootClusterConfig.Enabled, "boot cluster is expected to be enabled by default")
require.Equal(t, "77eedcf6f940940b3615da49109c1ba57b95c3fff8bcf16f20ac579c3ae24e58", nodeConfig.BootClusterConfig.RootHash)
require.Equal(t, 478, nodeConfig.BootClusterConfig.RootNumber)
enodes := nodeConfig.BootClusterConfig.BootNodes enodes := nodeConfig.BootClusterConfig.BootNodes
expectedEnodes := []string{ expectedEnodes := []string{
@ -310,8 +304,6 @@ var loadConfigTestCases = []struct {
func(t *testing.T, dataDir string, nodeConfig *params.NodeConfig, err error) { func(t *testing.T, dataDir string, nodeConfig *params.NodeConfig, err error) {
require.NoError(t, err) require.NoError(t, err)
require.True(t, nodeConfig.BootClusterConfig.Enabled, "boot cluster is expected to be enabled by default") require.True(t, nodeConfig.BootClusterConfig.Enabled, "boot cluster is expected to be enabled by default")
require.Equal(t, "", nodeConfig.BootClusterConfig.RootHash)
require.Equal(t, 0, nodeConfig.BootClusterConfig.RootNumber)
enodes := nodeConfig.BootClusterConfig.BootNodes enodes := nodeConfig.BootClusterConfig.BootNodes
expectedEnodes := []string{ expectedEnodes := []string{
@ -332,8 +324,6 @@ var loadConfigTestCases = []struct {
func(t *testing.T, dataDir string, nodeConfig *params.NodeConfig, err error) { func(t *testing.T, dataDir string, nodeConfig *params.NodeConfig, err error) {
require.NoError(t, err) require.NoError(t, err)
require.True(t, nodeConfig.BootClusterConfig.Enabled, "boot cluster is expected to be enabled by default") require.True(t, nodeConfig.BootClusterConfig.Enabled, "boot cluster is expected to be enabled by default")
require.Equal(t, "", nodeConfig.BootClusterConfig.RootHash)
require.Equal(t, 0, nodeConfig.BootClusterConfig.RootNumber)
enodes := nodeConfig.BootClusterConfig.BootNodes enodes := nodeConfig.BootClusterConfig.BootNodes
expectedEnodes := []string{ expectedEnodes := []string{
@ -353,8 +343,6 @@ var loadConfigTestCases = []struct {
func(t *testing.T, dataDir string, nodeConfig *params.NodeConfig, err error) { func(t *testing.T, dataDir string, nodeConfig *params.NodeConfig, err error) {
require.NoError(t, err) require.NoError(t, err)
require.True(t, nodeConfig.BootClusterConfig.Enabled, "boot cluster is expected to be enabled by default") require.True(t, nodeConfig.BootClusterConfig.Enabled, "boot cluster is expected to be enabled by default")
require.Equal(t, "85e4286fe0a730390245c49de8476977afdae0eb5530b277f62a52b12313d50f", nodeConfig.BootClusterConfig.RootHash)
require.True(t, nodeConfig.BootClusterConfig.RootNumber >= 805)
enodes := nodeConfig.BootClusterConfig.BootNodes enodes := nodeConfig.BootClusterConfig.BootNodes
expectedEnodes := []string{} expectedEnodes := []string{}
@ -371,8 +359,6 @@ var loadConfigTestCases = []struct {
func(t *testing.T, dataDir string, nodeConfig *params.NodeConfig, err error) { func(t *testing.T, dataDir string, nodeConfig *params.NodeConfig, err error) {
require.NoError(t, err) require.NoError(t, err)
require.True(t, nodeConfig.BootClusterConfig.Enabled, "boot cluster is expected to be enabled by default") require.True(t, nodeConfig.BootClusterConfig.Enabled, "boot cluster is expected to be enabled by default")
require.Equal(t, "85e4286fe0a730390245c49de8476977afdae0eb5530b277f62a52b12313d50f", nodeConfig.BootClusterConfig.RootHash)
require.True(t, nodeConfig.BootClusterConfig.RootNumber >= 805)
enodes := nodeConfig.BootClusterConfig.BootNodes enodes := nodeConfig.BootClusterConfig.BootNodes
expectedEnodes := []string{} expectedEnodes := []string{}

File diff suppressed because one or more lines are too long

View File

@ -3,8 +3,6 @@
"networkID": 3, "networkID": 3,
"genesisHash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", "genesisHash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d",
"prod": { "prod": {
"number": 478,
"hash": "77eedcf6f940940b3615da49109c1ba57b95c3fff8bcf16f20ac579c3ae24e58",
"bootnodes": [ "bootnodes": [
"enode://7ab298cedc4185a894d21d8a4615262ec6bdce66c9b6783878258e0d5b31013d30c9038932432f70e5b2b6a5cd323bf820554fcb22fbc7b45367889522e9c449@51.15.63.93:30303", "enode://7ab298cedc4185a894d21d8a4615262ec6bdce66c9b6783878258e0d5b31013d30c9038932432f70e5b2b6a5cd323bf820554fcb22fbc7b45367889522e9c449@51.15.63.93:30303",
"enode://f59e8701f18c79c5cbc7618dc7bb928d44dc2f5405c7d693dad97da2d8585975942ec6fd36d3fe608bfdc7270a34a4dd00f38cfe96b2baa24f7cd0ac28d382a1@51.15.79.88:30303", "enode://f59e8701f18c79c5cbc7618dc7bb928d44dc2f5405c7d693dad97da2d8585975942ec6fd36d3fe608bfdc7270a34a4dd00f38cfe96b2baa24f7cd0ac28d382a1@51.15.79.88:30303",
@ -25,8 +23,6 @@
] ]
}, },
"dev": { "dev": {
"number": 478,
"hash": "77eedcf6f940940b3615da49109c1ba57b95c3fff8bcf16f20ac579c3ae24e58",
"bootnodes": [ "bootnodes": [
"enode://7ab298cedc4185a894d21d8a4615262ec6bdce66c9b6783878258e0d5b31013d30c9038932432f70e5b2b6a5cd323bf820554fcb22fbc7b45367889522e9c449@51.15.63.93:30303", "enode://7ab298cedc4185a894d21d8a4615262ec6bdce66c9b6783878258e0d5b31013d30c9038932432f70e5b2b6a5cd323bf820554fcb22fbc7b45367889522e9c449@51.15.63.93:30303",
"enode://f59e8701f18c79c5cbc7618dc7bb928d44dc2f5405c7d693dad97da2d8585975942ec6fd36d3fe608bfdc7270a34a4dd00f38cfe96b2baa24f7cd0ac28d382a1@51.15.79.88:30303", "enode://f59e8701f18c79c5cbc7618dc7bb928d44dc2f5405c7d693dad97da2d8585975942ec6fd36d3fe608bfdc7270a34a4dd00f38cfe96b2baa24f7cd0ac28d382a1@51.15.79.88:30303",
@ -69,13 +65,9 @@
"networkID": 1, "networkID": 1,
"genesisHash": "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3", "genesisHash": "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3",
"prod": { "prod": {
"number": 805,
"hash": "85e4286fe0a730390245c49de8476977afdae0eb5530b277f62a52b12313d50f",
"bootnodes": [] "bootnodes": []
}, },
"dev": { "dev": {
"number": 805,
"hash": "85e4286fe0a730390245c49de8476977afdae0eb5530b277f62a52b12313d50f",
"bootnodes": [] "bootnodes": []
} }
} }

View File

@ -1,4 +1,7 @@
**/.git **/.git
.git
!.git/HEAD
!.git/refs/heads
**/*_test.go **/*_test.go
build/_workspace build/_workspace

View File

@ -0,0 +1,9 @@
# Lines starting with '#' are comments.
# Each line is a file pattern followed by one or more owners.
accounts/usbwallet @karalabe
consensus @karalabe
core/ @karalabe @holiman
eth/ @karalabe
mobile/ @karalabe
p2p/ @fjl @zsfelfoldi

View File

@ -1,3 +1,9 @@
Hi there,
please note that this is an issue tracker reserved for bug reports and feature requests.
For general questions please use the gitter channel or the Ethereum stack exchange at https://ethereum.stackexchange.com.
#### System information #### System information
Geth version: `geth version` Geth version: `geth version`

View File

@ -33,3 +33,8 @@ profile.cov
# IdeaIDE # IdeaIDE
.idea .idea
# dashboard
/dashboard/assets/node_modules
/dashboard/assets/stats.json
/dashboard/assets/public/bundle.js

View File

@ -38,7 +38,7 @@ matrix:
- sudo chmod 666 /dev/fuse - sudo chmod 666 /dev/fuse
- sudo chown root:$USER /etc/fuse.conf - sudo chown root:$USER /etc/fuse.conf
- go run build/ci.go install - go run build/ci.go install
- go run build/ci.go test -coverage -misspell - go run build/ci.go test -coverage
- os: osx - os: osx
go: 1.9.x go: 1.9.x
@ -48,7 +48,21 @@ matrix:
- brew install caskroom/cask/brew-cask - brew install caskroom/cask/brew-cask
- brew cask install osxfuse - brew cask install osxfuse
- go run build/ci.go install - go run build/ci.go install
- go run build/ci.go test -coverage -misspell - go run build/ci.go test -coverage
# This builder only tests code linters on latest version of Go
- os: linux
dist: trusty
sudo: required
go: 1.9.x
env:
- lint
script:
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
- sudo modprobe fuse
- sudo chmod 666 /dev/fuse
- sudo chown root:$USER /etc/fuse.conf
- go run build/ci.go lint
# This builder does the Ubuntu PPA and Linux Azure uploads # This builder does the Ubuntu PPA and Linux Azure uploads
- os: linux - os: linux
@ -133,16 +147,16 @@ matrix:
- azure-android - azure-android
- maven-android - maven-android
before_install: before_install:
- curl https://storage.googleapis.com/golang/go1.9.linux-amd64.tar.gz | tar -xz - curl https://storage.googleapis.com/golang/go1.9.2.linux-amd64.tar.gz | tar -xz
- export PATH=`pwd`/go/bin:$PATH - export PATH=`pwd`/go/bin:$PATH
- export GOROOT=`pwd`/go - export GOROOT=`pwd`/go
- export GOPATH=$HOME/go - export GOPATH=$HOME/go
script: script:
# Build the Android archive and upload it to Maven Central and Azure # Build the Android archive and upload it to Maven Central and Azure
- curl https://dl.google.com/android/repository/android-ndk-r14b-linux-x86_64.zip -o android-ndk-r14b.zip - curl https://dl.google.com/android/repository/android-ndk-r15c-linux-x86_64.zip -o android-ndk-r15c.zip
- unzip -q android-ndk-r14b.zip && rm android-ndk-r14b.zip - unzip -q android-ndk-r15c.zip && rm android-ndk-r15c.zip
- mv android-ndk-r14b $HOME - mv android-ndk-r15c $HOME
- export ANDROID_NDK=$HOME/android-ndk-r14b - export ANDROID_NDK=$HOME/android-ndk-r15c
- mkdir -p $GOPATH/src/github.com/ethereum - mkdir -p $GOPATH/src/github.com/ethereum
- ln -s `pwd` $GOPATH/src/github.com/ethereum - ln -s `pwd` $GOPATH/src/github.com/ethereum

View File

@ -12,5 +12,5 @@ FROM alpine:latest
RUN apk add --no-cache ca-certificates RUN apk add --no-cache ca-certificates
COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/ COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/
EXPOSE 8545 8546 30303 30303/udp EXPOSE 8545 8546 30303 30303/udp 30304/udp
ENTRYPOINT ["geth"] ENTRYPOINT ["geth"]

View File

@ -0,0 +1,15 @@
# Build Geth in a stock Go builder container
FROM golang:1.9-alpine as builder
RUN apk add --no-cache make gcc musl-dev linux-headers
ADD . /go-ethereum
RUN cd /go-ethereum && make all
# Pull all binaries into a second stage deploy alpine container
FROM alpine:latest
RUN apk add --no-cache ca-certificates
COPY --from=builder /go-ethereum/build/bin/* /usr/local/bin/
EXPOSE 8545 8546 30303 30303/udp 30304/udp

View File

@ -35,7 +35,7 @@ The go-ethereum project comes with several wrappers/executables found in the `cm
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default) archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. | | **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default) archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. |
| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. | | `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. |
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. | | `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow insolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug`). | | `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug`). |
| `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://github.com/ethereum/wiki/wiki/JSON-RPC) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. | | `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://github.com/ethereum/wiki/wiki/JSON-RPC) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. |
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://github.com/ethereum/wiki/wiki/RLP)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). | | `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://github.com/ethereum/wiki/wiki/RLP)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
| `swarm` | swarm daemon and tools. This is the entrypoint for the swarm network. `swarm --help` for command line options and subcommands. See https://swarm-guide.readthedocs.io for swarm documentation. | | `swarm` | swarm daemon and tools. This is the entrypoint for the swarm network. `swarm --help` for command line options and subcommands. See https://swarm-guide.readthedocs.io for swarm documentation. |
@ -130,6 +130,8 @@ docker run -d --name ethereum-node -v /Users/alice/ethereum:/root \
This will start geth in fast sync mode with a DB memory allowance of 512MB just as the above command does. It will also create a persistent volume in your home directory for saving your blockchain as well as map the default ports. There is also an `alpine` tag available for a slim version of the image. This will start geth in fast sync mode with a DB memory allowance of 512MB just as the above command does. It will also create a persistent volume in your home directory for saving your blockchain as well as map the default ports. There is also an `alpine` tag available for a slim version of the image.
Do not forget `--rpcaddr 0.0.0.0`, if you want to access RPC from other containers and/or hosts. By default, `geth` binds to the local interface and RPC endpoints is not accessible from the outside.
### Programatically interfacing Geth nodes ### Programatically interfacing Geth nodes
As a developer, sooner rather than later you'll want to start interacting with Geth and the Ethereum As a developer, sooner rather than later you'll want to start interacting with Geth and the Ethereum
@ -264,7 +266,7 @@ instance for mining, run it with all your usual flags, extended by:
$ geth <usual-flags> --mine --minerthreads=1 --etherbase=0x0000000000000000000000000000000000000000 $ geth <usual-flags> --mine --minerthreads=1 --etherbase=0x0000000000000000000000000000000000000000
``` ```
Which will start mining bocks and transactions on a single CPU thread, crediting all proceedings to Which will start mining blocks and transactions on a single CPU thread, crediting all proceedings to
the account specified by `--etherbase`. You can further tune the mining by changing the default gas the account specified by `--etherbase`. You can further tune the mining by changing the default gas
limit blocks converge to (`--targetgaslimit`) and the price transactions are accepted at (`--gasprice`). limit blocks converge to (`--targetgaslimit`) and the price transactions are accepted at (`--gasprice`).

View File

@ -1 +1 @@
1.7.2 1.7.3

View File

@ -20,10 +20,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"reflect"
"strings"
"github.com/ethereum/go-ethereum/common"
) )
// The ABI holds information about a contract's context and available // The ABI holds information about a contract's context and available
@ -76,106 +72,27 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
return append(method.Id(), arguments...), nil return append(method.Id(), arguments...), nil
} }
// these variable are used to determine certain types during type assertion for
// assignment.
var (
r_interSlice = reflect.TypeOf([]interface{}{})
r_hash = reflect.TypeOf(common.Hash{})
r_bytes = reflect.TypeOf([]byte{})
r_byte = reflect.TypeOf(byte(0))
)
// Unpack output in v according to the abi specification // Unpack output in v according to the abi specification
func (abi ABI) Unpack(v interface{}, name string, output []byte) error { func (abi ABI) Unpack(v interface{}, name string, output []byte) (err error) {
var method = abi.Methods[name] if err = bytesAreProper(output); err != nil {
return err
if len(output) == 0 {
return fmt.Errorf("abi: unmarshalling empty output")
} }
// since there can't be naming collisions with contracts and events,
// make sure the passed value is a pointer // we need to decide whether we're calling a method or an event
valueOf := reflect.ValueOf(v) var unpack unpacker
if reflect.Ptr != valueOf.Kind() { if method, ok := abi.Methods[name]; ok {
return fmt.Errorf("abi: Unpack(non-pointer %T)", v) unpack = method
} } else if event, ok := abi.Events[name]; ok {
unpack = event
var (
value = valueOf.Elem()
typ = value.Type()
)
if len(method.Outputs) > 1 {
switch value.Kind() {
// struct will match named return values to the struct's field
// names
case reflect.Struct:
for i := 0; i < len(method.Outputs); i++ {
marshalledValue, err := toGoType(i, method.Outputs[i], output)
if err != nil {
return err
}
reflectValue := reflect.ValueOf(marshalledValue)
for j := 0; j < typ.NumField(); j++ {
field := typ.Field(j)
// TODO read tags: `abi:"fieldName"`
if field.Name == strings.ToUpper(method.Outputs[i].Name[:1])+method.Outputs[i].Name[1:] {
if err := set(value.Field(j), reflectValue, method.Outputs[i]); err != nil {
return err
}
}
}
}
case reflect.Slice:
if !value.Type().AssignableTo(r_interSlice) {
return fmt.Errorf("abi: cannot marshal tuple in to slice %T (only []interface{} is supported)", v)
}
// if the slice already contains values, set those instead of the interface slice itself.
if value.Len() > 0 {
if len(method.Outputs) > value.Len() {
return fmt.Errorf("abi: cannot marshal in to slices of unequal size (require: %v, got: %v)", len(method.Outputs), value.Len())
}
for i := 0; i < len(method.Outputs); i++ {
marshalledValue, err := toGoType(i, method.Outputs[i], output)
if err != nil {
return err
}
reflectValue := reflect.ValueOf(marshalledValue)
if err := set(value.Index(i).Elem(), reflectValue, method.Outputs[i]); err != nil {
return err
}
}
return nil
}
// create a new slice and start appending the unmarshalled
// values to the new interface slice.
z := reflect.MakeSlice(typ, 0, len(method.Outputs))
for i := 0; i < len(method.Outputs); i++ {
marshalledValue, err := toGoType(i, method.Outputs[i], output)
if err != nil {
return err
}
z = reflect.Append(z, reflect.ValueOf(marshalledValue))
}
value.Set(z)
default:
return fmt.Errorf("abi: cannot unmarshal tuple in to %v", typ)
}
} else { } else {
marshalledValue, err := toGoType(0, method.Outputs[0], output) return fmt.Errorf("abi: could not locate named method or event.")
if err != nil {
return err
}
if err := set(value, reflect.ValueOf(marshalledValue), method.Outputs[0]); err != nil {
return err
}
} }
return nil // requires a struct to unpack into for a tuple return...
if unpack.isTupleReturn() {
return unpack.tupleUnpack(v, output)
}
return unpack.singleUnpack(v, output)
} }
func (abi *ABI) UnmarshalJSON(data []byte) error { func (abi *ABI) UnmarshalJSON(data []byte) error {

View File

@ -41,6 +41,7 @@ import (
var _ bind.ContractBackend = (*SimulatedBackend)(nil) var _ bind.ContractBackend = (*SimulatedBackend)(nil)
var errBlockNumberUnsupported = errors.New("SimulatedBackend cannot access blocks other than the latest block") var errBlockNumberUnsupported = errors.New("SimulatedBackend cannot access blocks other than the latest block")
var errGasEstimationFailed = errors.New("gas required exceeds allowance or always failing transaction")
// SimulatedBackend implements bind.ContractBackend, simulating a blockchain in // SimulatedBackend implements bind.ContractBackend, simulating a blockchain in
// the background. Its main purpose is to allow easily testing contract bindings. // the background. Its main purpose is to allow easily testing contract bindings.
@ -59,7 +60,7 @@ type SimulatedBackend struct {
// for testing purposes. // for testing purposes.
func NewSimulatedBackend(alloc core.GenesisAlloc) *SimulatedBackend { func NewSimulatedBackend(alloc core.GenesisAlloc) *SimulatedBackend {
database, _ := ethdb.NewMemDatabase() database, _ := ethdb.NewMemDatabase()
genesis := core.Genesis{Config: params.AllProtocolChanges, Alloc: alloc} genesis := core.Genesis{Config: params.AllEthashProtocolChanges, Alloc: alloc}
genesis.MustCommit(database) genesis.MustCommit(database)
blockchain, _ := core.NewBlockChain(database, genesis.Config, ethash.NewFaker(), vm.Config{}) blockchain, _ := core.NewBlockChain(database, genesis.Config, ethash.NewFaker(), vm.Config{})
backend := &SimulatedBackend{database: database, blockchain: blockchain, config: genesis.Config} backend := &SimulatedBackend{database: database, blockchain: blockchain, config: genesis.Config}
@ -203,32 +204,46 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs
b.mu.Lock() b.mu.Lock()
defer b.mu.Unlock() defer b.mu.Unlock()
// Binary search the gas requirement, as it may be higher than the amount used // Determine the lowest and highest possible gas limits to binary search in between
var ( var (
lo uint64 = params.TxGas - 1 lo uint64 = params.TxGas - 1
hi uint64 hi uint64
cap uint64
) )
if call.Gas != nil && call.Gas.Uint64() >= params.TxGas { if call.Gas != nil && call.Gas.Uint64() >= params.TxGas {
hi = call.Gas.Uint64() hi = call.Gas.Uint64()
} else { } else {
hi = b.pendingBlock.GasLimit().Uint64() hi = b.pendingBlock.GasLimit().Uint64()
} }
for lo+1 < hi { cap = hi
// Take a guess at the gas, and check transaction validity
mid := (hi + lo) / 2 // Create a helper to check if a gas allowance results in an executable transaction
call.Gas = new(big.Int).SetUint64(mid) executable := func(gas uint64) bool {
call.Gas = new(big.Int).SetUint64(gas)
snapshot := b.pendingState.Snapshot() snapshot := b.pendingState.Snapshot()
_, _, failed, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState) _, _, failed, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
b.pendingState.RevertToSnapshot(snapshot) b.pendingState.RevertToSnapshot(snapshot)
// If the transaction became invalid or execution failed, raise the gas limit
if err != nil || failed { if err != nil || failed {
lo = mid return false
continue }
return true
}
// Execute the binary search and hone in on an executable gas limit
for lo+1 < hi {
mid := (hi + lo) / 2
if !executable(mid) {
lo = mid
} else {
hi = mid
}
}
// Reject the transaction as invalid if it still fails at the highest allowance
if hi == cap {
if !executable(hi) {
return nil, errGasEstimationFailed
} }
// Otherwise assume the transaction succeeded, lower the gas limit
hi = mid
} }
return new(big.Int).SetUint64(hi), nil return new(big.Int).SetUint64(hi), nil
} }

View File

@ -39,22 +39,23 @@ func formatSliceString(kind reflect.Kind, sliceSize int) string {
// type in t. // type in t.
func sliceTypeCheck(t Type, val reflect.Value) error { func sliceTypeCheck(t Type, val reflect.Value) error {
if val.Kind() != reflect.Slice && val.Kind() != reflect.Array { if val.Kind() != reflect.Slice && val.Kind() != reflect.Array {
return typeErr(formatSliceString(t.Kind, t.SliceSize), val.Type()) return typeErr(formatSliceString(t.Kind, t.Size), val.Type())
}
if t.IsArray && val.Len() != t.SliceSize {
return typeErr(formatSliceString(t.Elem.Kind, t.SliceSize), formatSliceString(val.Type().Elem().Kind(), val.Len()))
} }
if t.Elem.IsSlice { if t.T == ArrayTy && val.Len() != t.Size {
return typeErr(formatSliceString(t.Elem.Kind, t.Size), formatSliceString(val.Type().Elem().Kind(), val.Len()))
}
if t.Elem.T == SliceTy {
if val.Len() > 0 { if val.Len() > 0 {
return sliceTypeCheck(*t.Elem, val.Index(0)) return sliceTypeCheck(*t.Elem, val.Index(0))
} }
} else if t.Elem.IsArray { } else if t.Elem.T == ArrayTy {
return sliceTypeCheck(*t.Elem, val.Index(0)) return sliceTypeCheck(*t.Elem, val.Index(0))
} }
if elemKind := val.Type().Elem().Kind(); elemKind != t.Elem.Kind { if elemKind := val.Type().Elem().Kind(); elemKind != t.Elem.Kind {
return typeErr(formatSliceString(t.Elem.Kind, t.SliceSize), val.Type()) return typeErr(formatSliceString(t.Elem.Kind, t.Size), val.Type())
} }
return nil return nil
} }
@ -62,20 +63,19 @@ func sliceTypeCheck(t Type, val reflect.Value) error {
// typeCheck checks that the given reflection value can be assigned to the reflection // typeCheck checks that the given reflection value can be assigned to the reflection
// type in t. // type in t.
func typeCheck(t Type, value reflect.Value) error { func typeCheck(t Type, value reflect.Value) error {
if t.IsSlice || t.IsArray { if t.T == SliceTy || t.T == ArrayTy {
return sliceTypeCheck(t, value) return sliceTypeCheck(t, value)
} }
// Check base type validity. Element types will be checked later on. // Check base type validity. Element types will be checked later on.
if t.Kind != value.Kind() { if t.Kind != value.Kind() {
return typeErr(t.Kind, value.Kind()) return typeErr(t.Kind, value.Kind())
} else if t.T == FixedBytesTy && t.Size != value.Len() {
return typeErr(t.Type, value.Type())
} else {
return nil
} }
return nil
}
// varErr returns a formatted error.
func varErr(expected, got reflect.Kind) error {
return typeErr(expected, got)
} }
// typeErr returns a formatted type casting error. // typeErr returns a formatted type casting error.

View File

@ -18,6 +18,7 @@ package abi
import ( import (
"fmt" "fmt"
"reflect"
"strings" "strings"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -44,3 +45,93 @@ func (e Event) Id() common.Hash {
} }
return common.BytesToHash(crypto.Keccak256([]byte(fmt.Sprintf("%v(%v)", e.Name, strings.Join(types, ","))))) return common.BytesToHash(crypto.Keccak256([]byte(fmt.Sprintf("%v(%v)", e.Name, strings.Join(types, ",")))))
} }
// unpacks an event return tuple into a struct of corresponding go types
//
// Unpacking can be done into a struct or a slice/array.
func (e Event) tupleUnpack(v interface{}, output []byte) error {
// make sure the passed value is a pointer
valueOf := reflect.ValueOf(v)
if reflect.Ptr != valueOf.Kind() {
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
}
var (
value = valueOf.Elem()
typ = value.Type()
)
if value.Kind() != reflect.Struct {
return fmt.Errorf("abi: cannot unmarshal tuple in to %v", typ)
}
j := 0
for i := 0; i < len(e.Inputs); i++ {
input := e.Inputs[i]
if input.Indexed {
// can't read, continue
continue
} else if input.Type.T == ArrayTy {
// need to move this up because they read sequentially
j += input.Type.Size
}
marshalledValue, err := toGoType((i+j)*32, input.Type, output)
if err != nil {
return err
}
reflectValue := reflect.ValueOf(marshalledValue)
switch value.Kind() {
case reflect.Struct:
for j := 0; j < typ.NumField(); j++ {
field := typ.Field(j)
// TODO read tags: `abi:"fieldName"`
if field.Name == strings.ToUpper(e.Inputs[i].Name[:1])+e.Inputs[i].Name[1:] {
if err := set(value.Field(j), reflectValue, e.Inputs[i]); err != nil {
return err
}
}
}
case reflect.Slice, reflect.Array:
if value.Len() < i {
return fmt.Errorf("abi: insufficient number of arguments for unpack, want %d, got %d", len(e.Inputs), value.Len())
}
v := value.Index(i)
if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface {
return fmt.Errorf("abi: cannot unmarshal %v in to %v", v.Type(), reflectValue.Type())
}
reflectValue := reflect.ValueOf(marshalledValue)
if err := set(v.Elem(), reflectValue, e.Inputs[i]); err != nil {
return err
}
default:
return fmt.Errorf("abi: cannot unmarshal tuple in to %v", typ)
}
}
return nil
}
func (e Event) isTupleReturn() bool { return len(e.Inputs) > 1 }
func (e Event) singleUnpack(v interface{}, output []byte) error {
// make sure the passed value is a pointer
valueOf := reflect.ValueOf(v)
if reflect.Ptr != valueOf.Kind() {
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
}
if e.Inputs[0].Indexed {
return fmt.Errorf("abi: attempting to unpack indexed variable into element.")
}
value := valueOf.Elem()
marshalledValue, err := toGoType(0, e.Inputs[0].Type, output)
if err != nil {
return err
}
if err := set(value, reflect.ValueOf(marshalledValue), e.Inputs[0]); err != nil {
return err
}
return nil
}

View File

@ -77,6 +77,85 @@ func (method Method) pack(args ...interface{}) ([]byte, error) {
return ret, nil return ret, nil
} }
// unpacks a method return tuple into a struct of corresponding go types
//
// Unpacking can be done into a struct or a slice/array.
func (method Method) tupleUnpack(v interface{}, output []byte) error {
// make sure the passed value is a pointer
valueOf := reflect.ValueOf(v)
if reflect.Ptr != valueOf.Kind() {
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
}
var (
value = valueOf.Elem()
typ = value.Type()
)
j := 0
for i := 0; i < len(method.Outputs); i++ {
toUnpack := method.Outputs[i]
if toUnpack.Type.T == ArrayTy {
// need to move this up because they read sequentially
j += toUnpack.Type.Size
}
marshalledValue, err := toGoType((i+j)*32, toUnpack.Type, output)
if err != nil {
return err
}
reflectValue := reflect.ValueOf(marshalledValue)
switch value.Kind() {
case reflect.Struct:
for j := 0; j < typ.NumField(); j++ {
field := typ.Field(j)
// TODO read tags: `abi:"fieldName"`
if field.Name == strings.ToUpper(method.Outputs[i].Name[:1])+method.Outputs[i].Name[1:] {
if err := set(value.Field(j), reflectValue, method.Outputs[i]); err != nil {
return err
}
}
}
case reflect.Slice, reflect.Array:
if value.Len() < i {
return fmt.Errorf("abi: insufficient number of arguments for unpack, want %d, got %d", len(method.Outputs), value.Len())
}
v := value.Index(i)
if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface {
return fmt.Errorf("abi: cannot unmarshal %v in to %v", v.Type(), reflectValue.Type())
}
reflectValue := reflect.ValueOf(marshalledValue)
if err := set(v.Elem(), reflectValue, method.Outputs[i]); err != nil {
return err
}
default:
return fmt.Errorf("abi: cannot unmarshal tuple in to %v", typ)
}
}
return nil
}
func (method Method) isTupleReturn() bool { return len(method.Outputs) > 1 }
func (method Method) singleUnpack(v interface{}, output []byte) error {
// make sure the passed value is a pointer
valueOf := reflect.ValueOf(v)
if reflect.Ptr != valueOf.Kind() {
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
}
value := valueOf.Elem()
marshalledValue, err := toGoType(0, method.Outputs[0].Type, output)
if err != nil {
return err
}
if err := set(value, reflect.ValueOf(marshalledValue), method.Outputs[0]); err != nil {
return err
}
return nil
}
// Sig returns the methods string signature according to the ABI spec. // Sig returns the methods string signature according to the ABI spec.
// //
// Example // Example

View File

@ -25,36 +25,23 @@ import (
) )
var ( var (
big_t = reflect.TypeOf(big.Int{}) big_t = reflect.TypeOf(&big.Int{})
ubig_t = reflect.TypeOf(big.Int{}) derefbig_t = reflect.TypeOf(big.Int{})
byte_t = reflect.TypeOf(byte(0)) uint8_t = reflect.TypeOf(uint8(0))
byte_ts = reflect.TypeOf([]byte(nil)) uint16_t = reflect.TypeOf(uint16(0))
uint_t = reflect.TypeOf(uint(0)) uint32_t = reflect.TypeOf(uint32(0))
uint8_t = reflect.TypeOf(uint8(0)) uint64_t = reflect.TypeOf(uint64(0))
uint16_t = reflect.TypeOf(uint16(0)) int_t = reflect.TypeOf(int(0))
uint32_t = reflect.TypeOf(uint32(0)) int8_t = reflect.TypeOf(int8(0))
uint64_t = reflect.TypeOf(uint64(0)) int16_t = reflect.TypeOf(int16(0))
int_t = reflect.TypeOf(int(0)) int32_t = reflect.TypeOf(int32(0))
int8_t = reflect.TypeOf(int8(0)) int64_t = reflect.TypeOf(int64(0))
int16_t = reflect.TypeOf(int16(0)) address_t = reflect.TypeOf(common.Address{})
int32_t = reflect.TypeOf(int32(0)) int_ts = reflect.TypeOf([]int(nil))
int64_t = reflect.TypeOf(int64(0)) int8_ts = reflect.TypeOf([]int8(nil))
hash_t = reflect.TypeOf(common.Hash{}) int16_ts = reflect.TypeOf([]int16(nil))
address_t = reflect.TypeOf(common.Address{}) int32_ts = reflect.TypeOf([]int32(nil))
int64_ts = reflect.TypeOf([]int64(nil))
uint_ts = reflect.TypeOf([]uint(nil))
uint8_ts = reflect.TypeOf([]uint8(nil))
uint16_ts = reflect.TypeOf([]uint16(nil))
uint32_ts = reflect.TypeOf([]uint32(nil))
uint64_ts = reflect.TypeOf([]uint64(nil))
ubig_ts = reflect.TypeOf([]*big.Int(nil))
int_ts = reflect.TypeOf([]int(nil))
int8_ts = reflect.TypeOf([]int8(nil))
int16_ts = reflect.TypeOf([]int16(nil))
int32_ts = reflect.TypeOf([]int32(nil))
int64_ts = reflect.TypeOf([]int64(nil))
big_ts = reflect.TypeOf([]*big.Int(nil))
) )
// U256 converts a big Int into a 256bit EVM number. // U256 converts a big Int into a 256bit EVM number.

View File

@ -61,8 +61,9 @@ func packElement(t Type, reflectValue reflect.Value) []byte {
reflectValue = mustArrayToByteSlice(reflectValue) reflectValue = mustArrayToByteSlice(reflectValue)
} }
return common.RightPadBytes(reflectValue.Bytes(), 32) return common.RightPadBytes(reflectValue.Bytes(), 32)
default:
panic("abi: fatal error")
} }
panic("abi: fatal error")
} }
// packNum packs the given number (using the reflect value) and will cast it to appropriate number representation // packNum packs the given number (using the reflect value) and will cast it to appropriate number representation
@ -74,6 +75,8 @@ func packNum(value reflect.Value) []byte {
return U256(big.NewInt(value.Int())) return U256(big.NewInt(value.Int()))
case reflect.Ptr: case reflect.Ptr:
return U256(value.Interface().(*big.Int)) return U256(value.Interface().(*big.Int))
default:
panic("abi: fatal error")
} }
return nil
} }

View File

@ -24,7 +24,7 @@ import (
// indirect recursively dereferences the value until it either gets the value // indirect recursively dereferences the value until it either gets the value
// or finds a big.Int // or finds a big.Int
func indirect(v reflect.Value) reflect.Value { func indirect(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Ptr && v.Elem().Type() != big_t { if v.Kind() == reflect.Ptr && v.Elem().Type() != derefbig_t {
return indirect(v.Elem()) return indirect(v.Elem())
} }
return v return v
@ -73,15 +73,9 @@ func mustArrayToByteSlice(value reflect.Value) reflect.Value {
func set(dst, src reflect.Value, output Argument) error { func set(dst, src reflect.Value, output Argument) error {
dstType := dst.Type() dstType := dst.Type()
srcType := src.Type() srcType := src.Type()
switch { switch {
case dstType.AssignableTo(src.Type()): case dstType.AssignableTo(srcType):
dst.Set(src) dst.Set(src)
case dstType.Kind() == reflect.Array && srcType.Kind() == reflect.Slice:
if dst.Len() < output.Type.SliceSize {
return fmt.Errorf("abi: cannot unmarshal src (len=%d) in to dst (len=%d)", output.Type.SliceSize, dst.Len())
}
reflect.Copy(dst, src)
case dstType.Kind() == reflect.Interface: case dstType.Kind() == reflect.Interface:
dst.Set(src) dst.Set(src)
case dstType.Kind() == reflect.Ptr: case dstType.Kind() == reflect.Ptr:

View File

@ -21,6 +21,7 @@ import (
"reflect" "reflect"
"regexp" "regexp"
"strconv" "strconv"
"strings"
) )
const ( const (
@ -29,6 +30,7 @@ const (
BoolTy BoolTy
StringTy StringTy
SliceTy SliceTy
ArrayTy
AddressTy AddressTy
FixedBytesTy FixedBytesTy
BytesTy BytesTy
@ -39,9 +41,6 @@ const (
// Type is the reflection of the supported argument type // Type is the reflection of the supported argument type
type Type struct { type Type struct {
IsSlice, IsArray bool
SliceSize int
Elem *Type Elem *Type
Kind reflect.Kind Kind reflect.Kind
@ -53,118 +52,116 @@ type Type struct {
} }
var ( var (
// fullTypeRegex parses the abi types
//
// Types can be in the format of:
//
// Input = Type [ "[" [ Number ] "]" ] Name .
// Type = [ "u" ] "int" [ Number ] [ x ] [ Number ].
//
// Examples:
//
// string int uint fixed
// string32 int8 uint8 uint[]
// address int256 uint256 fixed128x128[2]
fullTypeRegex = regexp.MustCompile(`([a-zA-Z0-9]+)(\[([0-9]*)\])?`)
// typeRegex parses the abi sub types // typeRegex parses the abi sub types
typeRegex = regexp.MustCompile("([a-zA-Z]+)(([0-9]+)(x([0-9]+))?)?") typeRegex = regexp.MustCompile("([a-zA-Z]+)(([0-9]+)(x([0-9]+))?)?")
) )
// NewType creates a new reflection type of abi type given in t. // NewType creates a new reflection type of abi type given in t.
func NewType(t string) (typ Type, err error) { func NewType(t string) (typ Type, err error) {
res := fullTypeRegex.FindAllStringSubmatch(t, -1)[0] // check that array brackets are equal if they exist
// check if type is slice and parse type. if strings.Count(t, "[") != strings.Count(t, "]") {
switch { return Type{}, fmt.Errorf("invalid arg type in abi")
case res[3] != "":
// err is ignored. Already checked for number through the regexp
typ.SliceSize, _ = strconv.Atoi(res[3])
typ.IsArray = true
case res[2] != "":
typ.IsSlice, typ.SliceSize = true, -1
case res[0] == "":
return Type{}, fmt.Errorf("abi: type parse error: %s", t)
} }
if typ.IsArray || typ.IsSlice {
sliceType, err := NewType(res[1]) typ.stringKind = t
// if there are brackets, get ready to go into slice/array mode and
// recursively create the type
if strings.Count(t, "[") != 0 {
i := strings.LastIndex(t, "[")
// recursively embed the type
embeddedType, err := NewType(t[:i])
if err != nil { if err != nil {
return Type{}, err return Type{}, err
} }
typ.Elem = &sliceType // grab the last cell and create a type from there
typ.stringKind = sliceType.stringKind + t[len(res[1]):] sliced := t[i:]
// Although we know that this is an array, we cannot return // grab the slice size with regexp
// as we don't know the type of the element, however, if it re := regexp.MustCompile("[0-9]+")
// is still an array, then don't determine the type. intz := re.FindAllString(sliced, -1)
if typ.Elem.IsArray || typ.Elem.IsSlice {
return typ, nil
}
}
// parse the type and size of the abi-type. if len(intz) == 0 {
parsedType := typeRegex.FindAllStringSubmatch(res[1], -1)[0] // is a slice
// varSize is the size of the variable typ.T = SliceTy
var varSize int typ.Kind = reflect.Slice
if len(parsedType[3]) > 0 { typ.Elem = &embeddedType
var err error typ.Type = reflect.SliceOf(embeddedType.Type)
varSize, err = strconv.Atoi(parsedType[2]) } else if len(intz) == 1 {
if err != nil { // is a array
return Type{}, fmt.Errorf("abi: error parsing variable size: %v", err) typ.T = ArrayTy
} typ.Kind = reflect.Array
} typ.Elem = &embeddedType
// varType is the parsed abi type typ.Size, err = strconv.Atoi(intz[0])
varType := parsedType[1] if err != nil {
// substitute canonical integer return Type{}, fmt.Errorf("abi: error parsing variable size: %v", err)
if varSize == 0 && (varType == "int" || varType == "uint") { }
varSize = 256 typ.Type = reflect.ArrayOf(typ.Size, embeddedType.Type)
t += "256"
}
// only set stringKind if not array or slice, as for those,
// the correct string type has been set
if !(typ.IsArray || typ.IsSlice) {
typ.stringKind = t
}
switch varType {
case "int":
typ.Kind, typ.Type = reflectIntKindAndType(false, varSize)
typ.Size = varSize
typ.T = IntTy
case "uint":
typ.Kind, typ.Type = reflectIntKindAndType(true, varSize)
typ.Size = varSize
typ.T = UintTy
case "bool":
typ.Kind = reflect.Bool
typ.T = BoolTy
case "address":
typ.Kind = reflect.Array
typ.Type = address_t
typ.Size = 20
typ.T = AddressTy
case "string":
typ.Kind = reflect.String
typ.Size = -1
typ.T = StringTy
case "bytes":
sliceType, _ := NewType("uint8")
typ.Elem = &sliceType
if varSize == 0 {
typ.IsSlice = true
typ.T = BytesTy
typ.SliceSize = -1
} else { } else {
typ.IsArray = true return Type{}, fmt.Errorf("invalid formatting of array type")
typ.T = FixedBytesTy }
typ.SliceSize = varSize return typ, err
} else {
// parse the type and size of the abi-type.
parsedType := typeRegex.FindAllStringSubmatch(t, -1)[0]
// varSize is the size of the variable
var varSize int
if len(parsedType[3]) > 0 {
var err error
varSize, err = strconv.Atoi(parsedType[2])
if err != nil {
return Type{}, fmt.Errorf("abi: error parsing variable size: %v", err)
}
} else {
if parsedType[0] == "uint" || parsedType[0] == "int" {
// this should fail because it means that there's something wrong with
// the abi type (the compiler should always format it to the size...always)
return Type{}, fmt.Errorf("unsupported arg type: %s", t)
}
}
// varType is the parsed abi type
varType := parsedType[1]
switch varType {
case "int":
typ.Kind, typ.Type = reflectIntKindAndType(false, varSize)
typ.Size = varSize
typ.T = IntTy
case "uint":
typ.Kind, typ.Type = reflectIntKindAndType(true, varSize)
typ.Size = varSize
typ.T = UintTy
case "bool":
typ.Kind = reflect.Bool
typ.T = BoolTy
typ.Type = reflect.TypeOf(bool(false))
case "address":
typ.Kind = reflect.Array
typ.Type = address_t
typ.Size = 20
typ.T = AddressTy
case "string":
typ.Kind = reflect.String
typ.Type = reflect.TypeOf("")
typ.T = StringTy
case "bytes":
if varSize == 0 {
typ.T = BytesTy
typ.Kind = reflect.Slice
typ.Type = reflect.SliceOf(reflect.TypeOf(byte(0)))
} else {
typ.T = FixedBytesTy
typ.Kind = reflect.Array
typ.Size = varSize
typ.Type = reflect.ArrayOf(varSize, reflect.TypeOf(byte(0)))
}
case "function":
typ.Kind = reflect.Array
typ.T = FunctionTy
typ.Size = 24
typ.Type = reflect.ArrayOf(24, reflect.TypeOf(byte(0)))
default:
return Type{}, fmt.Errorf("unsupported arg type: %s", t)
} }
case "function":
sliceType, _ := NewType("uint8")
typ.Elem = &sliceType
typ.IsArray = true
typ.T = FunctionTy
typ.SliceSize = 24
default:
return Type{}, fmt.Errorf("unsupported arg type: %s", t)
} }
return return
@ -183,7 +180,7 @@ func (t Type) pack(v reflect.Value) ([]byte, error) {
return nil, err return nil, err
} }
if (t.IsSlice || t.IsArray) && t.T != BytesTy && t.T != FixedBytesTy && t.T != FunctionTy { if t.T == SliceTy || t.T == ArrayTy {
var packed []byte var packed []byte
for i := 0; i < v.Len(); i++ { for i := 0; i < v.Len(); i++ {
@ -193,18 +190,17 @@ func (t Type) pack(v reflect.Value) ([]byte, error) {
} }
packed = append(packed, val...) packed = append(packed, val...)
} }
if t.IsSlice { if t.T == SliceTy {
return packBytesSlice(packed, v.Len()), nil return packBytesSlice(packed, v.Len()), nil
} else if t.IsArray { } else if t.T == ArrayTy {
return packed, nil return packed, nil
} }
} }
return packElement(t, v), nil return packElement(t, v), nil
} }
// requireLengthPrefix returns whether the type requires any sort of length // requireLengthPrefix returns whether the type requires any sort of length
// prefixing. // prefixing.
func (t Type) requiresLengthPrefix() bool { func (t Type) requiresLengthPrefix() bool {
return t.T != FixedBytesTy && (t.T == StringTy || t.T == BytesTy || t.IsSlice) return t.T == StringTy || t.T == BytesTy || t.T == SliceTy
} }

View File

@ -25,122 +25,20 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
) )
// toGoSliceType parses the input and casts it to the proper slice defined by the ABI // unpacker is a utility interface that enables us to have
// argument in T. // abstraction between events and methods and also to properly
func toGoSlice(i int, t Argument, output []byte) (interface{}, error) { // "unpack" them; e.g. events use Inputs, methods use Outputs.
index := i * 32 type unpacker interface {
// The slice must, at very least be large enough for the index+32 which is exactly the size required tupleUnpack(v interface{}, output []byte) error
// for the [offset in output, size of offset]. singleUnpack(v interface{}, output []byte) error
if index+32 > len(output) { isTupleReturn() bool
return nil, fmt.Errorf("abi: cannot marshal in to go slice: insufficient size output %d require %d", len(output), index+32)
}
elem := t.Type.Elem
// first we need to create a slice of the type
var refSlice reflect.Value
switch elem.T {
case IntTy, UintTy, BoolTy:
// create a new reference slice matching the element type
switch t.Type.Kind {
case reflect.Bool:
refSlice = reflect.ValueOf([]bool(nil))
case reflect.Uint8:
refSlice = reflect.ValueOf([]uint8(nil))
case reflect.Uint16:
refSlice = reflect.ValueOf([]uint16(nil))
case reflect.Uint32:
refSlice = reflect.ValueOf([]uint32(nil))
case reflect.Uint64:
refSlice = reflect.ValueOf([]uint64(nil))
case reflect.Int8:
refSlice = reflect.ValueOf([]int8(nil))
case reflect.Int16:
refSlice = reflect.ValueOf([]int16(nil))
case reflect.Int32:
refSlice = reflect.ValueOf([]int32(nil))
case reflect.Int64:
refSlice = reflect.ValueOf([]int64(nil))
default:
refSlice = reflect.ValueOf([]*big.Int(nil))
}
case AddressTy: // address must be of slice Address
refSlice = reflect.ValueOf([]common.Address(nil))
case HashTy: // hash must be of slice hash
refSlice = reflect.ValueOf([]common.Hash(nil))
case FixedBytesTy:
refSlice = reflect.ValueOf([][]byte(nil))
default: // no other types are supported
return nil, fmt.Errorf("abi: unsupported slice type %v", elem.T)
}
var slice []byte
var size int
var offset int
if t.Type.IsSlice {
// get the offset which determines the start of this array ...
offset = int(binary.BigEndian.Uint64(output[index+24 : index+32]))
if offset+32 > len(output) {
return nil, fmt.Errorf("abi: cannot marshal in to go slice: offset %d would go over slice boundary (len=%d)", len(output), offset+32)
}
slice = output[offset:]
// ... starting with the size of the array in elements ...
size = int(binary.BigEndian.Uint64(slice[24:32]))
slice = slice[32:]
// ... and make sure that we've at the very least the amount of bytes
// available in the buffer.
if size*32 > len(slice) {
return nil, fmt.Errorf("abi: cannot marshal in to go slice: insufficient size output %d require %d", len(output), offset+32+size*32)
}
// reslice to match the required size
slice = slice[:size*32]
} else if t.Type.IsArray {
//get the number of elements in the array
size = t.Type.SliceSize
//check to make sure array size matches up
if index+32*size > len(output) {
return nil, fmt.Errorf("abi: cannot marshal in to go array: offset %d would go over slice boundary (len=%d)", len(output), index+32*size)
}
//slice is there for a fixed amount of times
slice = output[index : index+size*32]
}
for i := 0; i < size; i++ {
var (
inter interface{} // interface type
returnOutput = slice[i*32 : i*32+32] // the return output
err error
)
// set inter to the correct type (cast)
switch elem.T {
case IntTy, UintTy:
inter = readInteger(t.Type.Kind, returnOutput)
case BoolTy:
inter, err = readBool(returnOutput)
if err != nil {
return nil, err
}
case AddressTy:
inter = common.BytesToAddress(returnOutput)
case HashTy:
inter = common.BytesToHash(returnOutput)
case FixedBytesTy:
inter = returnOutput
}
// append the item to our reflect slice
refSlice = reflect.Append(refSlice, reflect.ValueOf(inter))
}
// return the interface
return refSlice.Interface(), nil
} }
// reads the integer based on its kind
func readInteger(kind reflect.Kind, b []byte) interface{} { func readInteger(kind reflect.Kind, b []byte) interface{} {
switch kind { switch kind {
case reflect.Uint8: case reflect.Uint8:
return uint8(b[len(b)-1]) return b[len(b)-1]
case reflect.Uint16: case reflect.Uint16:
return binary.BigEndian.Uint16(b[len(b)-2:]) return binary.BigEndian.Uint16(b[len(b)-2:])
case reflect.Uint32: case reflect.Uint32:
@ -160,13 +58,10 @@ func readInteger(kind reflect.Kind, b []byte) interface{} {
} }
} }
// reads a bool
func readBool(word []byte) (bool, error) { func readBool(word []byte) (bool, error) {
if len(word) != 32 { for _, b := range word[:31] {
return false, fmt.Errorf("abi: fatal error: incorrect word length") if b != 0 {
}
for i, b := range word {
if b != 0 && i != 31 {
return false, errBadBool return false, errBadBool
} }
} }
@ -178,58 +73,144 @@ func readBool(word []byte) (bool, error) {
default: default:
return false, errBadBool return false, errBadBool
} }
}
// A function type is simply the address with the function selection signature at the end.
// This enforces that standard by always presenting it as a 24-array (address + sig = 24 bytes)
func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) {
if t.T != FunctionTy {
return [24]byte{}, fmt.Errorf("abi: invalid type in call to make function type byte array.")
}
if garbage := binary.BigEndian.Uint64(word[24:32]); garbage != 0 {
err = fmt.Errorf("abi: got improperly encoded function type, got %v", word)
} else {
copy(funcTy[:], word[0:24])
}
return
}
// through reflection, creates a fixed array to be read from
func readFixedBytes(t Type, word []byte) (interface{}, error) {
if t.T != FixedBytesTy {
return nil, fmt.Errorf("abi: invalid type in call to make fixed byte array.")
}
// convert
array := reflect.New(t.Type).Elem()
reflect.Copy(array, reflect.ValueOf(word[0:t.Size]))
return array.Interface(), nil
} }
// toGoType parses the input and casts it to the proper type defined by the ABI // iteratively unpack elements
// argument in T. func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) {
func toGoType(i int, t Argument, output []byte) (interface{}, error) { if start+32*size > len(output) {
// we need to treat slices differently return nil, fmt.Errorf("abi: cannot marshal in to go array: offset %d would go over slice boundary (len=%d)", len(output), start+32*size)
if (t.Type.IsSlice || t.Type.IsArray) && t.Type.T != BytesTy && t.Type.T != StringTy && t.Type.T != FixedBytesTy && t.Type.T != FunctionTy {
return toGoSlice(i, t, output)
} }
index := i * 32 // this value will become our slice or our array, depending on the type
var refSlice reflect.Value
slice := output[start : start+size*32]
if t.T == SliceTy {
// declare our slice
refSlice = reflect.MakeSlice(t.Type, size, size)
} else if t.T == ArrayTy {
// declare our array
refSlice = reflect.New(t.Type).Elem()
} else {
return nil, fmt.Errorf("abi: invalid type in array/slice unpacking stage")
}
for i, j := start, 0; j*32 < len(slice); i, j = i+32, j+1 {
// this corrects the arrangement so that we get all the underlying array values
if t.Elem.T == ArrayTy && j != 0 {
i = start + t.Elem.Size*32*j
}
inter, err := toGoType(i, *t.Elem, output)
if err != nil {
return nil, err
}
// append the item to our reflect slice
refSlice.Index(j).Set(reflect.ValueOf(inter))
}
// return the interface
return refSlice.Interface(), nil
}
// toGoType parses the output bytes and recursively assigns the value of these bytes
// into a go type with accordance with the ABI spec.
func toGoType(index int, t Type, output []byte) (interface{}, error) {
if index+32 > len(output) { if index+32 > len(output) {
return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), index+32) return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), index+32)
} }
// Parse the given index output and check whether we need to read var (
// a different offset and length based on the type (i.e. string, bytes) returnOutput []byte
var returnOutput []byte begin, end int
switch t.Type.T { err error
case StringTy, BytesTy: // variable arrays are written at the end of the return bytes )
// parse offset from which we should start reading
offset := int(binary.BigEndian.Uint64(output[index+24 : index+32]))
if offset+32 > len(output) {
return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), offset+32)
}
// parse the size up until we should be reading
size := int(binary.BigEndian.Uint64(output[offset+24 : offset+32]))
if offset+32+size > len(output) {
return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), offset+32+size)
}
// get the bytes for this return value // if we require a length prefix, find the beginning word and size returned.
returnOutput = output[offset+32 : offset+32+size] if t.requiresLengthPrefix() {
default: begin, end, err = lengthPrefixPointsTo(index, output)
if err != nil {
return nil, err
}
} else {
returnOutput = output[index : index+32] returnOutput = output[index : index+32]
} }
// convert the bytes to whatever is specified by the ABI. switch t.T {
switch t.Type.T { case SliceTy:
return forEachUnpack(t, output, begin, end)
case ArrayTy:
return forEachUnpack(t, output, index, t.Size)
case StringTy: // variable arrays are written at the end of the return bytes
return string(output[begin : begin+end]), nil
case IntTy, UintTy: case IntTy, UintTy:
return readInteger(t.Type.Kind, returnOutput), nil return readInteger(t.Kind, returnOutput), nil
case BoolTy: case BoolTy:
return readBool(returnOutput) return readBool(returnOutput)
case AddressTy: case AddressTy:
return common.BytesToAddress(returnOutput), nil return common.BytesToAddress(returnOutput), nil
case HashTy: case HashTy:
return common.BytesToHash(returnOutput), nil return common.BytesToHash(returnOutput), nil
case BytesTy, FixedBytesTy, FunctionTy: case BytesTy:
return returnOutput, nil return output[begin : begin+end], nil
case StringTy: case FixedBytesTy:
return string(returnOutput), nil return readFixedBytes(t, returnOutput)
case FunctionTy:
return readFunctionType(t, returnOutput)
default:
return nil, fmt.Errorf("abi: unknown type %v", t.T)
}
}
// interprets a 32 byte slice as an offset and then determines which indice to look to decode the type.
func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err error) {
offset := int(binary.BigEndian.Uint64(output[index+24 : index+32]))
if offset+32 > len(output) {
return 0, 0, fmt.Errorf("abi: cannot marshal in to go slice: offset %d would go over slice boundary (len=%d)", len(output), offset+32)
}
length = int(binary.BigEndian.Uint64(output[offset+24 : offset+32]))
if offset+32+length > len(output) {
return 0, 0, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), offset+32+length)
}
start = offset + 32
//fmt.Printf("LENGTH PREFIX INFO: \nsize: %v\noffset: %v\nstart: %v\n", length, offset, start)
return
}
// checks for proper formatting of byte output
func bytesAreProper(output []byte) error {
if len(output) == 0 {
return fmt.Errorf("abi: unmarshalling empty output")
} else if len(output)%32 != 0 {
return fmt.Errorf("abi: improperly formatted output")
} else {
return nil
} }
return nil, fmt.Errorf("abi: unknown type %v", t.Type.T)
} }

View File

@ -20,7 +20,6 @@ import (
"bufio" "bufio"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"sort" "sort"
@ -75,13 +74,6 @@ type accountCache struct {
fileC fileCache fileC fileCache
} }
// fileCache is a cache of files seen during scan of keystore
type fileCache struct {
all *set.SetNonTS // list of all files
mtime time.Time // latest mtime seen
mu sync.RWMutex
}
func newAccountCache(keydir string) (*accountCache, chan struct{}) { func newAccountCache(keydir string) (*accountCache, chan struct{}) {
ac := &accountCache{ ac := &accountCache{
keydir: keydir, keydir: keydir,
@ -236,66 +228,22 @@ func (ac *accountCache) close() {
ac.mu.Unlock() ac.mu.Unlock()
} }
// scanFiles performs a new scan on the given directory, compares against the already
// cached filenames, and returns file sets: new, missing , modified
func (fc *fileCache) scanFiles(keyDir string) (set.Interface, set.Interface, set.Interface, error) {
t0 := time.Now()
files, err := ioutil.ReadDir(keyDir)
t1 := time.Now()
if err != nil {
return nil, nil, nil, err
}
fc.mu.RLock()
prevMtime := fc.mtime
fc.mu.RUnlock()
filesNow := set.NewNonTS()
moddedFiles := set.NewNonTS()
var newMtime time.Time
for _, fi := range files {
modTime := fi.ModTime()
path := filepath.Join(keyDir, fi.Name())
if skipKeyFile(fi) {
log.Trace("Ignoring file on account scan", "path", path)
continue
}
filesNow.Add(path)
if modTime.After(prevMtime) {
moddedFiles.Add(path)
}
if modTime.After(newMtime) {
newMtime = modTime
}
}
t2 := time.Now()
fc.mu.Lock()
// Missing = previous - current
missing := set.Difference(fc.all, filesNow)
// New = current - previous
newFiles := set.Difference(filesNow, fc.all)
// Modified = modified - new
modified := set.Difference(moddedFiles, newFiles)
fc.all = filesNow
fc.mtime = newMtime
fc.mu.Unlock()
t3 := time.Now()
log.Debug("FS scan times", "list", t1.Sub(t0), "set", t2.Sub(t1), "diff", t3.Sub(t2))
return newFiles, missing, modified, nil
}
// scanAccounts checks if any changes have occurred on the filesystem, and // scanAccounts checks if any changes have occurred on the filesystem, and
// updates the account cache accordingly // updates the account cache accordingly
func (ac *accountCache) scanAccounts() error { func (ac *accountCache) scanAccounts() error {
newFiles, missingFiles, modified, err := ac.fileC.scanFiles(ac.keydir) // Scan the entire folder metadata for file changes
t1 := time.Now() creates, deletes, updates, err := ac.fileC.scan(ac.keydir)
if err != nil { if err != nil {
log.Debug("Failed to reload keystore contents", "err", err) log.Debug("Failed to reload keystore contents", "err", err)
return err return err
} }
if creates.Size() == 0 && deletes.Size() == 0 && updates.Size() == 0 {
return nil
}
// Create a helper method to scan the contents of the key files
var ( var (
buf = new(bufio.Reader) buf = new(bufio.Reader)
keyJSON struct { key struct {
Address string `json:"address"` Address string `json:"address"`
} }
) )
@ -308,9 +256,9 @@ func (ac *accountCache) scanAccounts() error {
defer fd.Close() defer fd.Close()
buf.Reset(fd) buf.Reset(fd)
// Parse the address. // Parse the address.
keyJSON.Address = "" key.Address = ""
err = json.NewDecoder(buf).Decode(&keyJSON) err = json.NewDecoder(buf).Decode(&key)
addr := common.HexToAddress(keyJSON.Address) addr := common.HexToAddress(key.Address)
switch { switch {
case err != nil: case err != nil:
log.Debug("Failed to decode keystore key", "path", path, "err", err) log.Debug("Failed to decode keystore key", "path", path, "err", err)
@ -321,47 +269,30 @@ func (ac *accountCache) scanAccounts() error {
} }
return nil return nil
} }
// Process all the file diffs
start := time.Now()
for _, p := range newFiles.List() { for _, p := range creates.List() {
path, _ := p.(string) if a := readAccount(p.(string)); a != nil {
a := readAccount(path)
if a != nil {
ac.add(*a) ac.add(*a)
} }
} }
for _, p := range missingFiles.List() { for _, p := range deletes.List() {
path, _ := p.(string) ac.deleteByFile(p.(string))
ac.deleteByFile(path)
} }
for _, p := range updates.List() {
for _, p := range modified.List() { path := p.(string)
path, _ := p.(string)
a := readAccount(path)
ac.deleteByFile(path) ac.deleteByFile(path)
if a != nil { if a := readAccount(path); a != nil {
ac.add(*a) ac.add(*a)
} }
} }
end := time.Now()
t2 := time.Now()
select { select {
case ac.notify <- struct{}{}: case ac.notify <- struct{}{}:
default: default:
} }
log.Trace("Handled keystore changes", "time", t2.Sub(t1)) log.Trace("Handled keystore changes", "time", end.Sub(start))
return nil return nil
} }
func skipKeyFile(fi os.FileInfo) bool {
// Skip editor backups and UNIX-style hidden files.
if strings.HasSuffix(fi.Name(), "~") || strings.HasPrefix(fi.Name(), ".") {
return true
}
// Skip misc special files, directories (yes, symlinks too).
if fi.IsDir() || fi.Mode()&os.ModeType != 0 {
return true
}
return false
}

View File

@ -0,0 +1,102 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package keystore
import (
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/ethereum/go-ethereum/log"
set "gopkg.in/fatih/set.v0"
)
// fileCache is a cache of files seen during scan of keystore.
type fileCache struct {
all *set.SetNonTS // Set of all files from the keystore folder
lastMod time.Time // Last time instance when a file was modified
mu sync.RWMutex
}
// scan performs a new scan on the given directory, compares against the already
// cached filenames, and returns file sets: creates, deletes, updates.
func (fc *fileCache) scan(keyDir string) (set.Interface, set.Interface, set.Interface, error) {
t0 := time.Now()
// List all the failes from the keystore folder
files, err := ioutil.ReadDir(keyDir)
if err != nil {
return nil, nil, nil, err
}
t1 := time.Now()
fc.mu.Lock()
defer fc.mu.Unlock()
// Iterate all the files and gather their metadata
all := set.NewNonTS()
mods := set.NewNonTS()
var newLastMod time.Time
for _, fi := range files {
// Skip any non-key files from the folder
path := filepath.Join(keyDir, fi.Name())
if skipKeyFile(fi) {
log.Trace("Ignoring file on account scan", "path", path)
continue
}
// Gather the set of all and fresly modified files
all.Add(path)
modified := fi.ModTime()
if modified.After(fc.lastMod) {
mods.Add(path)
}
if modified.After(newLastMod) {
newLastMod = modified
}
}
t2 := time.Now()
// Update the tracked files and return the three sets
deletes := set.Difference(fc.all, all) // Deletes = previous - current
creates := set.Difference(all, fc.all) // Creates = current - previous
updates := set.Difference(mods, creates) // Updates = modified - creates
fc.all, fc.lastMod = all, newLastMod
t3 := time.Now()
// Report on the scanning stats and return
log.Debug("FS scan times", "list", t1.Sub(t0), "set", t2.Sub(t1), "diff", t3.Sub(t2))
return creates, deletes, updates, nil
}
// skipKeyFile ignores editor backups, hidden files and folders/symlinks.
func skipKeyFile(fi os.FileInfo) bool {
// Skip editor backups and UNIX-style hidden files.
if strings.HasSuffix(fi.Name(), "~") || strings.HasPrefix(fi.Name(), ".") {
return true
}
// Skip misc special files, directories (yes, symlinks too).
if fi.IsDir() || fi.Mode()&os.ModeType != 0 {
return true
}
return false
}

View File

@ -28,6 +28,7 @@ package keystore
import ( import (
"bytes" "bytes"
"crypto/aes" "crypto/aes"
crand "crypto/rand"
"crypto/sha256" "crypto/sha256"
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
@ -91,6 +92,12 @@ func (ks keyStorePassphrase) GetKey(addr common.Address, filename, auth string)
return key, nil return key, nil
} }
// StoreKey generates a key, encrypts with 'auth' and stores in the given directory
func StoreKey(dir, auth string, scryptN, scryptP int) (common.Address, error) {
_, a, err := storeNewKey(&keyStorePassphrase{dir, scryptN, scryptP}, crand.Reader, auth)
return a.Address, err
}
func (ks keyStorePassphrase) StoreKey(filename string, key *Key, auth string) error { func (ks keyStorePassphrase) StoreKey(filename string, key *Key, auth string) error {
keyjson, err := EncryptKey(key, auth, ks.scryptN, ks.scryptP) keyjson, err := EncryptKey(key, auth, ks.scryptN, ks.scryptP)
if err != nil { if err != nil {

View File

@ -81,10 +81,14 @@ func (w *watcher) loop() {
// When an event occurs, the reload call is delayed a bit so that // When an event occurs, the reload call is delayed a bit so that
// multiple events arriving quickly only cause a single reload. // multiple events arriving quickly only cause a single reload.
var ( var (
debounce = time.NewTimer(0)
debounceDuration = 500 * time.Millisecond debounceDuration = 500 * time.Millisecond
rescanTriggered = false rescanTriggered = false
debounce = time.NewTimer(0)
) )
// Ignore initial trigger
if !debounce.Stop() {
<-debounce.C
}
defer debounce.Stop() defer debounce.Stop()
for { for {
select { select {

View File

@ -41,6 +41,11 @@ type Manager struct {
// NewManager creates a generic account manager to sign transaction via various // NewManager creates a generic account manager to sign transaction via various
// supported backends. // supported backends.
func NewManager(backends ...Backend) *Manager { func NewManager(backends ...Backend) *Manager {
// Retrieve the initial list of wallets from the backends and sort by URL
var wallets []Wallet
for _, backend := range backends {
wallets = merge(wallets, backend.Wallets()...)
}
// Subscribe to wallet notifications from all backends // Subscribe to wallet notifications from all backends
updates := make(chan WalletEvent, 4*len(backends)) updates := make(chan WalletEvent, 4*len(backends))
@ -48,11 +53,6 @@ func NewManager(backends ...Backend) *Manager {
for i, backend := range backends { for i, backend := range backends {
subs[i] = backend.Subscribe(updates) subs[i] = backend.Subscribe(updates)
} }
// Retrieve the initial list of wallets from the backends and sort by URL
var wallets []Wallet
for _, backend := range backends {
wallets = merge(wallets, backend.Wallets()...)
}
// Assemble the account manager and return // Assemble the account manager and return
am := &Manager{ am := &Manager{
backends: make(map[reflect.Type][]Backend), backends: make(map[reflect.Type][]Backend),

View File

@ -94,7 +94,8 @@ func stateTestCmd(ctx *cli.Context) error {
for _, st := range test.Subtests() { for _, st := range test.Subtests() {
// Run the test and aggregate the result // Run the test and aggregate the result
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true} result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
if state, err := test.Run(st, cfg); err != nil { state, err := test.Run(st, cfg)
if err != nil {
// Test failed, mark as so and dump any state to aid debugging // Test failed, mark as so and dump any state to aid debugging
result.Pass, result.Error = false, err.Error() result.Pass, result.Error = false, err.Error()
if ctx.GlobalBool(DumpFlag.Name) && state != nil { if ctx.GlobalBool(DumpFlag.Name) && state != nil {
@ -102,6 +103,11 @@ func stateTestCmd(ctx *cli.Context) error {
result.State = &dump result.State = &dump
} }
} }
// print state root for evmlab tracing (already committed above, so no need to delete objects again
if ctx.GlobalBool(MachineFlag.Name) && state != nil {
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false))
}
results = append(results, *result) results = append(results, *result)
// Print any structured logs collected // Print any structured logs collected

View File

@ -21,8 +21,10 @@ package main
import ( import (
"bytes" "bytes"
"compress/zlib"
"context" "context"
"encoding/json" "encoding/json"
"errors"
"flag" "flag"
"fmt" "fmt"
"html/template" "html/template"
@ -33,6 +35,7 @@ import (
"net/url" "net/url"
"os" "os"
"path/filepath" "path/filepath"
"regexp"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
@ -181,10 +184,10 @@ func main() {
// request represents an accepted funding request. // request represents an accepted funding request.
type request struct { type request struct {
Username string `json:"username"` // GitHub user for displaying an avatar Avatar string `json:"avatar"` // Avatar URL to make the UI nicer
Account common.Address `json:"account"` // Ethereum address being funded Account common.Address `json:"account"` // Ethereum address being funded
Time time.Time `json:"time"` // Timestamp when te request was accepted Time time.Time `json:"time"` // Timestamp when the request was accepted
Tx *types.Transaction `json:"tx"` // Transaction funding the account Tx *types.Transaction `json:"tx"` // Transaction funding the account
} }
// faucet represents a crypto faucet backed by an Ethereum light client. // faucet represents a crypto faucet backed by an Ethereum light client.
@ -299,6 +302,8 @@ func (f *faucet) webHandler(w http.ResponseWriter, r *http.Request) {
// apiHandler handles requests for Ether grants and transaction statuses. // apiHandler handles requests for Ether grants and transaction statuses.
func (f *faucet) apiHandler(conn *websocket.Conn) { func (f *faucet) apiHandler(conn *websocket.Conn) {
// Start tracking the connection and drop at the end // Start tracking the connection and drop at the end
defer conn.Close()
f.lock.Lock() f.lock.Lock()
f.conns = append(f.conns, conn) f.conns = append(f.conns, conn)
f.lock.Unlock() f.lock.Unlock()
@ -313,25 +318,50 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
} }
f.lock.Unlock() f.lock.Unlock()
}() }()
// Send a few initial stats to the client // Gather the initial stats from the network to report
balance, _ := f.client.BalanceAt(context.Background(), f.account.Address, nil) var (
nonce, _ := f.client.NonceAt(context.Background(), f.account.Address, nil) head *types.Header
balance *big.Int
nonce uint64
err error
)
for {
// Attempt to retrieve the stats, may error on no faucet connectivity
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
head, err = f.client.HeaderByNumber(ctx, nil)
if err == nil {
balance, err = f.client.BalanceAt(ctx, f.account.Address, head.Number)
if err == nil {
nonce, err = f.client.NonceAt(ctx, f.account.Address, nil)
}
}
cancel()
websocket.JSON.Send(conn, map[string]interface{}{ // If stats retrieval failed, wait a bit and retry
if err != nil {
if err = sendError(conn, errors.New("Faucet offline: "+err.Error())); err != nil {
log.Warn("Failed to send faucet error to client", "err", err)
return
}
time.Sleep(3 * time.Second)
continue
}
// Initial stats reported successfully, proceed with user interaction
break
}
// Send over the initial stats and the latest header
if err = send(conn, map[string]interface{}{
"funds": balance.Div(balance, ether), "funds": balance.Div(balance, ether),
"funded": nonce, "funded": nonce,
"peers": f.stack.Server().PeerCount(), "peers": f.stack.Server().PeerCount(),
"requests": f.reqs, "requests": f.reqs,
}) }, 3*time.Second); err != nil {
// Send the initial block to the client log.Warn("Failed to send initial stats to client", "err", err)
ctx, cancel := context.WithTimeout(context.Background(), time.Second) return
header, err := f.client.HeaderByNumber(ctx, nil) }
cancel() if err = send(conn, head, 3*time.Second); err != nil {
log.Warn("Failed to send initial header to client", "err", err)
if err != nil { return
log.Error("Failed to retrieve latest header", "err", err)
} else {
websocket.JSON.Send(conn, header)
} }
// Keep reading requests from the websocket until the connection breaks // Keep reading requests from the websocket until the connection breaks
for { for {
@ -341,18 +371,25 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
Tier uint `json:"tier"` Tier uint `json:"tier"`
Captcha string `json:"captcha"` Captcha string `json:"captcha"`
} }
if err := websocket.JSON.Receive(conn, &msg); err != nil { if err = websocket.JSON.Receive(conn, &msg); err != nil {
return return
} }
if !strings.HasPrefix(msg.URL, "https://gist.github.com/") { if !strings.HasPrefix(msg.URL, "https://gist.github.com/") && !strings.HasPrefix(msg.URL, "https://twitter.com/") &&
websocket.JSON.Send(conn, map[string]string{"error": "URL doesn't link to GitHub Gists"}) !strings.HasPrefix(msg.URL, "https://plus.google.com/") && !strings.HasPrefix(msg.URL, "https://www.facebook.com/") {
if err = sendError(conn, errors.New("URL doesn't link to supported services")); err != nil {
log.Warn("Failed to send URL error to client", "err", err)
return
}
continue continue
} }
if msg.Tier >= uint(*tiersFlag) { if msg.Tier >= uint(*tiersFlag) {
websocket.JSON.Send(conn, map[string]string{"error": "Invalid funding tier requested"}) if err = sendError(conn, errors.New("Invalid funding tier requested")); err != nil {
log.Warn("Failed to send tier error to client", "err", err)
return
}
continue continue
} }
log.Info("Faucet funds requested", "gist", msg.URL, "tier", msg.Tier) log.Info("Faucet funds requested", "url", msg.URL, "tier", msg.Tier)
// If captcha verifications are enabled, make sure we're not dealing with a robot // If captcha verifications are enabled, make sure we're not dealing with a robot
if *captchaToken != "" { if *captchaToken != "" {
@ -362,7 +399,10 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
res, err := http.PostForm("https://www.google.com/recaptcha/api/siteverify", form) res, err := http.PostForm("https://www.google.com/recaptcha/api/siteverify", form)
if err != nil { if err != nil {
websocket.JSON.Send(conn, map[string]string{"error": err.Error()}) if err = sendError(conn, err); err != nil {
log.Warn("Failed to send captcha post error to client", "err", err)
return
}
continue continue
} }
var result struct { var result struct {
@ -372,74 +412,55 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
err = json.NewDecoder(res.Body).Decode(&result) err = json.NewDecoder(res.Body).Decode(&result)
res.Body.Close() res.Body.Close()
if err != nil { if err != nil {
websocket.JSON.Send(conn, map[string]string{"error": err.Error()}) if err = sendError(conn, err); err != nil {
log.Warn("Failed to send captcha decode error to client", "err", err)
return
}
continue continue
} }
if !result.Success { if !result.Success {
log.Warn("Captcha verification failed", "err", string(result.Errors)) log.Warn("Captcha verification failed", "err", string(result.Errors))
websocket.JSON.Send(conn, map[string]string{"error": "Beep-bop, you're a robot!"}) if err = sendError(conn, errors.New("Beep-bop, you're a robot!")); err != nil {
log.Warn("Failed to send captcha failure to client", "err", err)
return
}
continue continue
} }
} }
// Retrieve the gist from the GitHub Gist APIs // Retrieve the Ethereum address to fund, the requesting user and a profile picture
parts := strings.Split(msg.URL, "/") var (
req, _ := http.NewRequest("GET", "https://api.github.com/gists/"+parts[len(parts)-1], nil) username string
if *githubUser != "" { avatar string
req.SetBasicAuth(*githubUser, *githubToken) address common.Address
)
switch {
case strings.HasPrefix(msg.URL, "https://gist.github.com/"):
username, avatar, address, err = authGitHub(msg.URL)
case strings.HasPrefix(msg.URL, "https://twitter.com/"):
username, avatar, address, err = authTwitter(msg.URL)
case strings.HasPrefix(msg.URL, "https://plus.google.com/"):
username, avatar, address, err = authGooglePlus(msg.URL)
case strings.HasPrefix(msg.URL, "https://www.facebook.com/"):
username, avatar, address, err = authFacebook(msg.URL)
default:
err = errors.New("Something funky happened, please open an issue at https://github.com/ethereum/go-ethereum/issues")
} }
res, err := http.DefaultClient.Do(req)
if err != nil { if err != nil {
websocket.JSON.Send(conn, map[string]string{"error": err.Error()}) if err = sendError(conn, err); err != nil {
continue log.Warn("Failed to send prefix error to client", "err", err)
} return
var gist struct {
Owner struct {
Login string `json:"login"`
} `json:"owner"`
Files map[string]struct {
Content string `json:"content"`
} `json:"files"`
}
err = json.NewDecoder(res.Body).Decode(&gist)
res.Body.Close()
if err != nil {
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
continue
}
if gist.Owner.Login == "" {
websocket.JSON.Send(conn, map[string]string{"error": "Anonymous Gists not allowed"})
continue
}
// Iterate over all the files and look for Ethereum addresses
var address common.Address
for _, file := range gist.Files {
content := strings.TrimSpace(file.Content)
if len(content) == 2+common.AddressLength*2 {
address = common.HexToAddress(content)
} }
}
if address == (common.Address{}) {
websocket.JSON.Send(conn, map[string]string{"error": "No Ethereum address found to fund"})
continue continue
} }
// Validate the user's existence since the API is unhelpful here log.Info("Faucet request valid", "url", msg.URL, "tier", msg.Tier, "user", username, "address", address)
if res, err = http.Head("https://github.com/" + gist.Owner.Login); err != nil {
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
continue
}
res.Body.Close()
if res.StatusCode != 200 {
websocket.JSON.Send(conn, map[string]string{"error": "Invalid user... boom!"})
continue
}
// Ensure the user didn't request funds too recently // Ensure the user didn't request funds too recently
f.lock.Lock() f.lock.Lock()
var ( var (
fund bool fund bool
timeout time.Time timeout time.Time
) )
if timeout = f.timeouts[gist.Owner.Login]; time.Now().After(timeout) { if timeout = f.timeouts[username]; time.Now().After(timeout) {
// User wasn't funded recently, create the funding transaction // User wasn't funded recently, create the funding transaction
amount := new(big.Int).Mul(big.NewInt(int64(*payoutFlag)), ether) amount := new(big.Int).Mul(big.NewInt(int64(*payoutFlag)), ether)
amount = new(big.Int).Mul(amount, new(big.Int).Exp(big.NewInt(5), big.NewInt(int64(msg.Tier)), nil)) amount = new(big.Int).Mul(amount, new(big.Int).Exp(big.NewInt(5), big.NewInt(int64(msg.Tier)), nil))
@ -448,33 +469,45 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
tx := types.NewTransaction(f.nonce+uint64(len(f.reqs)), address, amount, big.NewInt(21000), f.price, nil) tx := types.NewTransaction(f.nonce+uint64(len(f.reqs)), address, amount, big.NewInt(21000), f.price, nil)
signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainId) signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainId)
if err != nil { if err != nil {
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
f.lock.Unlock() f.lock.Unlock()
if err = sendError(conn, err); err != nil {
log.Warn("Failed to send transaction creation error to client", "err", err)
return
}
continue continue
} }
// Submit the transaction and mark as funded if successful // Submit the transaction and mark as funded if successful
if err := f.client.SendTransaction(context.Background(), signed); err != nil { if err := f.client.SendTransaction(context.Background(), signed); err != nil {
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
f.lock.Unlock() f.lock.Unlock()
if err = sendError(conn, err); err != nil {
log.Warn("Failed to send transaction transmission error to client", "err", err)
return
}
continue continue
} }
f.reqs = append(f.reqs, &request{ f.reqs = append(f.reqs, &request{
Username: gist.Owner.Login, Avatar: avatar,
Account: address, Account: address,
Time: time.Now(), Time: time.Now(),
Tx: signed, Tx: signed,
}) })
f.timeouts[gist.Owner.Login] = time.Now().Add(time.Duration(*minutesFlag*int(math.Pow(3, float64(msg.Tier)))) * time.Minute) f.timeouts[username] = time.Now().Add(time.Duration(*minutesFlag*int(math.Pow(3, float64(msg.Tier)))) * time.Minute)
fund = true fund = true
} }
f.lock.Unlock() f.lock.Unlock()
// Send an error if too frequent funding, othewise a success // Send an error if too frequent funding, othewise a success
if !fund { if !fund {
websocket.JSON.Send(conn, map[string]string{"error": fmt.Sprintf("%s left until next allowance", common.PrettyDuration(timeout.Sub(time.Now())))}) if err = sendError(conn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(timeout.Sub(time.Now())))); err != nil {
log.Warn("Failed to send funding error to client", "err", err)
return
}
continue continue
} }
websocket.JSON.Send(conn, map[string]string{"success": fmt.Sprintf("Funding request accepted for %s into %s", gist.Owner.Login, address.Hex())}) if err = sendSuccess(conn, fmt.Sprintf("Funding request accepted for %s into %s", username, address.Hex())); err != nil {
log.Warn("Failed to send funding success to client", "err", err)
return
}
select { select {
case f.update <- struct{}{}: case f.update <- struct{}{}:
default: default:
@ -497,11 +530,31 @@ func (f *faucet) loop() {
select { select {
case head := <-heads: case head := <-heads:
// New chain head arrived, query the current stats and stream to clients // New chain head arrived, query the current stats and stream to clients
balance, _ := f.client.BalanceAt(context.Background(), f.account.Address, nil) var (
balance = new(big.Int).Div(balance, ether) balance *big.Int
nonce uint64
price *big.Int
err error
)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
balance, err = f.client.BalanceAt(ctx, f.account.Address, head.Number)
if err == nil {
nonce, err = f.client.NonceAt(ctx, f.account.Address, nil)
if err == nil {
price, err = f.client.SuggestGasPrice(ctx)
}
}
cancel()
price, _ := f.client.SuggestGasPrice(context.Background()) // If querying the data failed, try for the next block
nonce, _ := f.client.NonceAt(context.Background(), f.account.Address, nil) if err != nil {
log.Warn("Failed to update faucet state", "block", head.Number, "hash", head.Hash(), "err", err)
continue
} else {
log.Info("Updated faucet state", "block", head.Number, "hash", head.Hash(), "balance", balance, "nonce", nonce, "price", price)
}
// Faucet state retrieved, update locally and send to clients
balance = new(big.Int).Div(balance, ether)
f.lock.Lock() f.lock.Lock()
f.price, f.nonce = price, nonce f.price, f.nonce = price, nonce
@ -512,17 +565,17 @@ func (f *faucet) loop() {
f.lock.RLock() f.lock.RLock()
for _, conn := range f.conns { for _, conn := range f.conns {
if err := websocket.JSON.Send(conn, map[string]interface{}{ if err := send(conn, map[string]interface{}{
"funds": balance, "funds": balance,
"funded": f.nonce, "funded": f.nonce,
"peers": f.stack.Server().PeerCount(), "peers": f.stack.Server().PeerCount(),
"requests": f.reqs, "requests": f.reqs,
}); err != nil { }, time.Second); err != nil {
log.Warn("Failed to send stats to client", "err", err) log.Warn("Failed to send stats to client", "err", err)
conn.Close() conn.Close()
continue continue
} }
if err := websocket.JSON.Send(conn, head); err != nil { if err := send(conn, head, time.Second); err != nil {
log.Warn("Failed to send header to client", "err", err) log.Warn("Failed to send header to client", "err", err)
conn.Close() conn.Close()
} }
@ -533,7 +586,7 @@ func (f *faucet) loop() {
// Pending requests updated, stream to clients // Pending requests updated, stream to clients
f.lock.RLock() f.lock.RLock()
for _, conn := range f.conns { for _, conn := range f.conns {
if err := websocket.JSON.Send(conn, map[string]interface{}{"requests": f.reqs}); err != nil { if err := send(conn, map[string]interface{}{"requests": f.reqs}, time.Second); err != nil {
log.Warn("Failed to send requests to client", "err", err) log.Warn("Failed to send requests to client", "err", err)
conn.Close() conn.Close()
} }
@ -542,3 +595,184 @@ func (f *faucet) loop() {
} }
} }
} }
// sends transmits a data packet to the remote end of the websocket, but also
// setting a write deadline to prevent waiting forever on the node.
func send(conn *websocket.Conn, value interface{}, timeout time.Duration) error {
if timeout == 0 {
timeout = 60 * time.Second
}
conn.SetWriteDeadline(time.Now().Add(timeout))
return websocket.JSON.Send(conn, value)
}
// sendError transmits an error to the remote end of the websocket, also setting
// the write deadline to 1 second to prevent waiting forever.
func sendError(conn *websocket.Conn, err error) error {
return send(conn, map[string]string{"error": err.Error()}, time.Second)
}
// sendSuccess transmits a success message to the remote end of the websocket, also
// setting the write deadline to 1 second to prevent waiting forever.
func sendSuccess(conn *websocket.Conn, msg string) error {
return send(conn, map[string]string{"success": msg}, time.Second)
}
// authGitHub tries to authenticate a faucet request using GitHub gists, returning
// the username, avatar URL and Ethereum address to fund on success.
func authGitHub(url string) (string, string, common.Address, error) {
// Retrieve the gist from the GitHub Gist APIs
parts := strings.Split(url, "/")
req, _ := http.NewRequest("GET", "https://api.github.com/gists/"+parts[len(parts)-1], nil)
if *githubUser != "" {
req.SetBasicAuth(*githubUser, *githubToken)
}
res, err := http.DefaultClient.Do(req)
if err != nil {
return "", "", common.Address{}, err
}
var gist struct {
Owner struct {
Login string `json:"login"`
} `json:"owner"`
Files map[string]struct {
Content string `json:"content"`
} `json:"files"`
}
err = json.NewDecoder(res.Body).Decode(&gist)
res.Body.Close()
if err != nil {
return "", "", common.Address{}, err
}
if gist.Owner.Login == "" {
return "", "", common.Address{}, errors.New("Anonymous Gists not allowed")
}
// Iterate over all the files and look for Ethereum addresses
var address common.Address
for _, file := range gist.Files {
content := strings.TrimSpace(file.Content)
if len(content) == 2+common.AddressLength*2 {
address = common.HexToAddress(content)
}
}
if address == (common.Address{}) {
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
}
// Validate the user's existence since the API is unhelpful here
if res, err = http.Head("https://github.com/" + gist.Owner.Login); err != nil {
return "", "", common.Address{}, err
}
res.Body.Close()
if res.StatusCode != 200 {
return "", "", common.Address{}, errors.New("Invalid user... boom!")
}
// Everything passed validation, return the gathered infos
return gist.Owner.Login + "@github", fmt.Sprintf("https://github.com/%s.png?size=64", gist.Owner.Login), address, nil
}
// authTwitter tries to authenticate a faucet request using Twitter posts, returning
// the username, avatar URL and Ethereum address to fund on success.
func authTwitter(url string) (string, string, common.Address, error) {
// Ensure the user specified a meaningful URL, no fancy nonsense
parts := strings.Split(url, "/")
if len(parts) < 4 || parts[len(parts)-2] != "status" {
return "", "", common.Address{}, errors.New("Invalid Twitter status URL")
}
username := parts[len(parts)-3]
// Twitter's API isn't really friendly with direct links. Still, we don't
// want to do ask read permissions from users, so just load the public posts and
// scrape it for the Ethereum address and profile URL.
res, err := http.Get(url)
if err != nil {
return "", "", common.Address{}, err
}
defer res.Body.Close()
reader, err := zlib.NewReader(res.Body)
if err != nil {
return "", "", common.Address{}, err
}
body, err := ioutil.ReadAll(reader)
if err != nil {
return "", "", common.Address{}, err
}
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
if address == (common.Address{}) {
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
}
var avatar string
if parts = regexp.MustCompile("src=\"([^\"]+twimg.com/profile_images[^\"]+)\"").FindStringSubmatch(string(body)); len(parts) == 2 {
avatar = parts[1]
}
return username + "@twitter", avatar, address, nil
}
// authGooglePlus tries to authenticate a faucet request using GooglePlus posts,
// returning the username, avatar URL and Ethereum address to fund on success.
func authGooglePlus(url string) (string, string, common.Address, error) {
// Ensure the user specified a meaningful URL, no fancy nonsense
parts := strings.Split(url, "/")
if len(parts) < 4 || parts[len(parts)-2] != "posts" {
return "", "", common.Address{}, errors.New("Invalid Google+ post URL")
}
username := parts[len(parts)-3]
// Google's API isn't really friendly with direct links. Still, we don't
// want to do ask read permissions from users, so just load the public posts and
// scrape it for the Ethereum address and profile URL.
res, err := http.Get(url)
if err != nil {
return "", "", common.Address{}, err
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", common.Address{}, err
}
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
if address == (common.Address{}) {
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
}
var avatar string
if parts = regexp.MustCompile("src=\"([^\"]+googleusercontent.com[^\"]+photo.jpg)\"").FindStringSubmatch(string(body)); len(parts) == 2 {
avatar = parts[1]
}
return username + "@google+", avatar, address, nil
}
// authFacebook tries to authenticate a faucet request using Facebook posts,
// returning the username, avatar URL and Ethereum address to fund on success.
func authFacebook(url string) (string, string, common.Address, error) {
// Ensure the user specified a meaningful URL, no fancy nonsense
parts := strings.Split(url, "/")
if len(parts) < 4 || parts[len(parts)-2] != "posts" {
return "", "", common.Address{}, errors.New("Invalid Facebook post URL")
}
username := parts[len(parts)-3]
// Facebook's Graph API isn't really friendly with direct links. Still, we don't
// want to do ask read permissions from users, so just load the public posts and
// scrape it for the Ethereum address and profile URL.
res, err := http.Get(url)
if err != nil {
return "", "", common.Address{}, err
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", common.Address{}, err
}
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
if address == (common.Address{}) {
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
}
var avatar string
if parts = regexp.MustCompile("src=\"([^\"]+fbcdn.net[^\"]+)\"").FindStringSubmatch(string(body)); len(parts) == 2 {
avatar = parts[1]
}
return username + "@facebook", avatar, address, nil
}

View File

@ -5,7 +5,7 @@
<meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<title>{{.Network}}: GitHub Faucet</title> <title>{{.Network}}: Authenticated Faucet</title>
<link href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/css/bootstrap.min.css" rel="stylesheet" /> <link href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/css/bootstrap.min.css" rel="stylesheet" />
<link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css" rel="stylesheet" /> <link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css" rel="stylesheet" />
@ -43,13 +43,13 @@
<div class="container"> <div class="container">
<div class="row" style="margin-bottom: 16px;"> <div class="row" style="margin-bottom: 16px;">
<div class="col-lg-12"> <div class="col-lg-12">
<h1 style="text-align: center;"><i class="fa fa-bath" aria-hidden="true"></i> {{.Network}} GitHub Authenticated Faucet <i class="fa fa-github-alt" aria-hidden="true"></i></h1> <h1 style="text-align: center;"><i class="fa fa-bath" aria-hidden="true"></i> {{.Network}} Authenticated Faucet</h1>
</div> </div>
</div> </div>
<div class="row"> <div class="row">
<div class="col-lg-8 col-lg-offset-2"> <div class="col-lg-8 col-lg-offset-2">
<div class="input-group"> <div class="input-group">
<input id="gist" type="text" class="form-control" placeholder="GitHub Gist URL containing your Ethereum address..."> <input id="url" name="url" type="text" class="form-control" placeholder="Social network URL containing your Ethereum address...">
<span class="input-group-btn"> <span class="input-group-btn">
<button class="btn btn-default dropdown-toggle" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Give me Ether <i class="fa fa-caret-down" aria-hidden="true"></i></button> <button class="btn btn-default dropdown-toggle" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Give me Ether <i class="fa fa-caret-down" aria-hidden="true"></i></button>
<ul class="dropdown-menu dropdown-menu-right">{{range $idx, $amount := .Amounts}} <ul class="dropdown-menu dropdown-menu-right">{{range $idx, $amount := .Amounts}}
@ -80,8 +80,21 @@
<div class="row" style="margin-top: 32px;"> <div class="row" style="margin-top: 32px;">
<div class="col-lg-12"> <div class="col-lg-12">
<h3>How does this work?</h3> <h3>How does this work?</h3>
<p>This Ether faucet is running on the {{.Network}} network. To prevent malicious actors from exhausting all available funds or accumulating enough Ether to mount long running spam attacks, requests are tied to GitHub accounts. Anyone having a GitHub account may request funds within the permitted limits.</p> <p>This Ether faucet is running on the {{.Network}} network. To prevent malicious actors from exhausting all available funds or accumulating enough Ether to mount long running spam attacks, requests are tied to certain common 3rd party accounts. Anyone having a GitHub, Twitter, Google+ or Facebook account may request funds within the permitted limits.</p>
<p>To request funds, simply create a <a href="https://gist.github.com/" target="_about:blank">GitHub Gist</a> with your Ethereum address pasted into the contents (the file name doesn't matter), copy paste the gists URL into the above input box and fire away! You can track the current pending requests below the input field to see how much you have to wait until your turn comes.</p> <dl class="dl-horizontal">
<dt style="width: auto; margin-left: 40px;"><i class="fa fa-github-alt" aria-hidden="true" style="font-size: 36px;"></i></dt>
<dd style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds via GitHub, create a <a href="https://gist.github.com/" target="_about:blank">gist</a> with your Ethereum address embedded into the content (the file name doesn't matter).<br/>Copy-paste the gists URL into the above input box and fire away!</dd>
<dt style="width: auto; margin-left: 40px;"><i class="fa fa-twitter" aria-hidden="true" style="font-size: 36px;"></i></dt>
<dd style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds via Twitter, make a <a href="https://twitter.com/intent/tweet?text=Requesting%20faucet%20funds%20into%200x0000000000000000000000000000000000000000%20on%20the%20%23{{.Network}}%20%23Ethereum%20test%20network." target="_about:blank">tweet</a> with your Ethereum address pasted into the contents (surrounding text doesn't matter).<br/>Copy-paste the <a href="https://support.twitter.com/articles/80586" target="_about:blank">tweets URL</a> into the above input box and fire away!</dd>
<dt style="width: auto; margin-left: 40px;"><i class="fa fa-google-plus-official" aria-hidden="true" style="font-size: 36px;"></i></dt>
<dd style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds via Google Plus, publish a new <strong>public</strong> post with your Ethereum address embedded into the content (surrounding text doesn't matter).<br/>Copy-paste the posts URL into the above input box and fire away!</dd>
<dt style="width: auto; margin-left: 40px;"><i class="fa fa-facebook" aria-hidden="true" style="font-size: 36px;"></i></dt>
<dd style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds via Facebook, publish a new <strong>public</strong> post with your Ethereum address embedded into the content (surrounding text doesn't matter).<br/>Copy-paste the <a href="https://www.facebook.com/help/community/question/?id=282662498552845" target="_about:blank">posts URL</a> into the above input box and fire away!</dd>
</dl>
<p>You can track the current pending requests below the input field to see how much you have to wait until your turn comes.</p>
{{if .Recaptcha}}<em>The faucet is running invisible reCaptcha protection against bots.</em>{{end}} {{if .Recaptcha}}<em>The faucet is running invisible reCaptcha protection against bots.</em>{{end}}
</div> </div>
</div> </div>
@ -93,10 +106,22 @@
var attempt = 0; var attempt = 0;
var server; var server;
var tier = 0; var tier = 0;
var requests = [];
// Define a function that creates closures to drop old requests
var dropper = function(hash) {
return function() {
for (var i=0; i<requests.length; i++) {
if (requests[i].tx.hash == hash) {
requests.splice(i, 1);
break;
}
}
}
};
// Define the function that submits a gist url to the server // Define the function that submits a gist url to the server
var submit = function({{if .Recaptcha}}captcha{{end}}) { var submit = function({{if .Recaptcha}}captcha{{end}}) {
server.send(JSON.stringify({url: $("#gist")[0].value, tier: tier{{if .Recaptcha}}, captcha: captcha{{end}}}));{{if .Recaptcha}} server.send(JSON.stringify({url: $("#url")[0].value, tier: tier{{if .Recaptcha}}, captcha: captcha{{end}}}));{{if .Recaptcha}}
grecaptcha.reset();{{end}} grecaptcha.reset();{{end}}
}; };
// Define a method to reconnect upon server loss // Define a method to reconnect upon server loss
@ -127,21 +152,85 @@
$("#block").text(parseInt(msg.number, 16)); $("#block").text(parseInt(msg.number, 16));
} }
if (msg.error !== undefined) { if (msg.error !== undefined) {
noty({layout: 'topCenter', text: msg.error, type: 'error'}); noty({layout: 'topCenter', text: msg.error, type: 'error', timeout: 5000, progressBar: true});
} }
if (msg.success !== undefined) { if (msg.success !== undefined) {
noty({layout: 'topCenter', text: msg.success, type: 'success'}); noty({layout: 'topCenter', text: msg.success, type: 'success', timeout: 5000, progressBar: true});
} }
if (msg.requests !== undefined && msg.requests !== null) { if (msg.requests !== undefined && msg.requests !== null) {
// Mark all previous requests missing as done
for (var i=0; i<requests.length; i++) {
if (msg.requests.length > 0 && msg.requests[0].tx.hash == requests[i].tx.hash) {
break;
}
if (requests[i].time != "") {
requests[i].time = "";
setTimeout(dropper(requests[i].tx.hash), 3000);
}
}
// Append any new requests into our local collection
var common = -1;
if (requests.length > 0) {
for (var i=0; i<msg.requests.length; i++) {
if (requests[requests.length-1].tx.hash == msg.requests[i].tx.hash) {
common = i;
break;
}
}
}
for (var i=common+1; i<msg.requests.length; i++) {
requests.push(msg.requests[i]);
}
// Iterate over our entire local collection and re-render the funding table
var content = ""; var content = "";
for (var i=0; i<msg.requests.length; i++) { for (var i=0; i<requests.length; i++) {
content += "<tr><td><div style=\"background: url('https://github.com/" + msg.requests[i].username + ".png?size=64'); background-size: cover; width:32px; height: 32px; border-radius: 4px;\"></div></td><td><pre>" + msg.requests[i].account + "</pre></td><td style=\"width: 100%; text-align: center; vertical-align: middle;\">" + moment.duration(moment(msg.requests[i].time).unix()-moment().unix(), 'seconds').humanize(true) + "</td></tr>"; var done = requests[i].time == "";
var elapsed = moment().unix()-moment(requests[i].time).unix();
content += "<tr id='" + requests[i].tx.hash + "'>";
content += " <td><div style=\"background: url('" + requests[i].avatar + "'); background-size: cover; width:32px; height: 32px; border-radius: 4px;\"></div></td>";
content += " <td><pre>" + requests[i].account + "</pre></td>";
content += " <td style=\"width: 100%; text-align: center; vertical-align: middle;\">";
if (done) {
content += " funded";
} else {
content += " <span id='time-" + i + "' class='timer'>" + moment.duration(-elapsed, 'seconds').humanize(true) + "</span>";
}
content += " <div class='progress' style='height: 4px; margin: 0;'>";
if (done) {
content += " <div class='progress-bar progress-bar-success' role='progressbar' aria-valuenow='30' style='width:100%;'></div>";
} else if (elapsed > 30) {
content += " <div class='progress-bar progress-bar-danger progress-bar-striped active' role='progressbar' aria-valuenow='30' style='width:100%;'></div>";
} else {
content += " <div class='progress-bar progress-bar-striped active' role='progressbar' aria-valuenow='" + elapsed + "' style='width:" + (elapsed * 100 / 30) + "%;'></div>";
}
content += " </div>";
content += " </td>";
content += "</tr>";
} }
$("#requests").html("<tbody>" + content + "</tbody>"); $("#requests").html("<tbody>" + content + "</tbody>");
} }
} }
server.onclose = function() { setTimeout(reconnect, 3000); }; server.onclose = function() { setTimeout(reconnect, 3000); };
} }
// Start a UI updater to push the progress bars forward until they are done
setInterval(function() {
$('.progress-bar').each(function() {
var progress = Number($(this).attr('aria-valuenow')) + 1;
if (progress < 30) {
$(this).attr('aria-valuenow', progress);
$(this).css('width', (progress * 100 / 30) + '%');
} else if (progress == 30) {
$(this).css('width', '100%');
$(this).addClass("progress-bar-danger");
}
})
$('.timer').each(function() {
var index = Number($(this).attr('id').substring(5));
$(this).html(moment.duration(moment(requests[index].time).unix()-moment().unix(), 'seconds').humanize(true));
})
}, 1000);
// Establish a websocket connection to the API server // Establish a websocket connection to the API server
reconnect(); reconnect();
</script>{{if .Recaptcha}} </script>{{if .Recaptcha}}

File diff suppressed because one or more lines are too long

View File

@ -291,15 +291,28 @@ func ambiguousAddrRecovery(ks *keystore.KeyStore, err *keystore.AmbiguousAddrErr
// accountCreate creates a new account into the keystore defined by the CLI flags. // accountCreate creates a new account into the keystore defined by the CLI flags.
func accountCreate(ctx *cli.Context) error { func accountCreate(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx) cfg := gethConfig{Node: defaultNodeConfig()}
// Load config file.
if file := ctx.GlobalString(configFileFlag.Name); file != "" {
if err := loadConfig(file, &cfg); err != nil {
utils.Fatalf("%v", err)
}
}
utils.SetNodeConfig(ctx, &cfg.Node)
scryptN, scryptP, keydir, err := cfg.Node.AccountConfig()
if err != nil {
utils.Fatalf("Failed to read configuration: %v", err)
}
password := getPassPhrase("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx)) password := getPassPhrase("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx))
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) address, err := keystore.StoreKey(keydir, password, scryptN, scryptP)
account, err := ks.NewAccount(password)
if err != nil { if err != nil {
utils.Fatalf("Failed to create account: %v", err) utils.Fatalf("Failed to create account: %v", err)
} }
fmt.Printf("Address: {%x}\n", account.Address) fmt.Printf("Address: {%x}\n", address)
return nil return nil
} }

View File

@ -30,6 +30,7 @@ import (
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/contracts/release" "github.com/ethereum/go-ethereum/contracts/release"
"github.com/ethereum/go-ethereum/dashboard"
"github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
@ -76,10 +77,11 @@ type ethstatsConfig struct {
} }
type gethConfig struct { type gethConfig struct {
Eth eth.Config Eth eth.Config
Shh whisper.Config Shh whisper.Config
Node node.Config Node node.Config
Ethstats ethstatsConfig Ethstats ethstatsConfig
Dashboard dashboard.Config
} }
func loadConfig(file string, cfg *gethConfig) error { func loadConfig(file string, cfg *gethConfig) error {
@ -110,9 +112,10 @@ func defaultNodeConfig() node.Config {
func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) { func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
// Load defaults. // Load defaults.
cfg := gethConfig{ cfg := gethConfig{
Eth: eth.DefaultConfig, Eth: eth.DefaultConfig,
Shh: whisper.DefaultConfig, Shh: whisper.DefaultConfig,
Node: defaultNodeConfig(), Node: defaultNodeConfig(),
Dashboard: dashboard.DefaultConfig,
} }
// Load config file. // Load config file.
@ -134,6 +137,7 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
} }
utils.SetShhConfig(ctx, stack, &cfg.Shh) utils.SetShhConfig(ctx, stack, &cfg.Shh)
utils.SetDashboardConfig(ctx, &cfg.Dashboard)
return stack, cfg return stack, cfg
} }
@ -153,9 +157,12 @@ func makeFullNode(ctx *cli.Context) *node.Node {
utils.RegisterEthService(stack, &cfg.Eth) utils.RegisterEthService(stack, &cfg.Eth)
if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) {
utils.RegisterDashboardService(stack, &cfg.Dashboard)
}
// Whisper must be explicitly enabled by specifying at least 1 whisper flag or in dev mode // Whisper must be explicitly enabled by specifying at least 1 whisper flag or in dev mode
shhEnabled := enableWhisper(ctx) shhEnabled := enableWhisper(ctx)
shhAutoEnabled := !ctx.GlobalIsSet(utils.WhisperEnabledFlag.Name) && ctx.GlobalIsSet(utils.DevModeFlag.Name) shhAutoEnabled := !ctx.GlobalIsSet(utils.WhisperEnabledFlag.Name) && ctx.GlobalIsSet(utils.DeveloperFlag.Name)
if shhEnabled || shhAutoEnabled { if shhEnabled || shhAutoEnabled {
if ctx.GlobalIsSet(utils.WhisperMaxMessageSizeFlag.Name) { if ctx.GlobalIsSet(utils.WhisperMaxMessageSizeFlag.Name) {
cfg.Shh.MaxMessageSize = uint32(ctx.Int(utils.WhisperMaxMessageSizeFlag.Name)) cfg.Shh.MaxMessageSize = uint32(ctx.Int(utils.WhisperMaxMessageSizeFlag.Name))

View File

@ -61,6 +61,11 @@ var (
utils.DataDirFlag, utils.DataDirFlag,
utils.KeyStoreDirFlag, utils.KeyStoreDirFlag,
utils.NoUSBFlag, utils.NoUSBFlag,
utils.DashboardEnabledFlag,
utils.DashboardAddrFlag,
utils.DashboardPortFlag,
utils.DashboardRefreshFlag,
utils.DashboardAssetsFlag,
utils.EthashCacheDirFlag, utils.EthashCacheDirFlag,
utils.EthashCachesInMemoryFlag, utils.EthashCachesInMemoryFlag,
utils.EthashCachesOnDiskFlag, utils.EthashCachesOnDiskFlag,
@ -99,7 +104,8 @@ var (
utils.NetrestrictFlag, utils.NetrestrictFlag,
utils.NodeKeyFileFlag, utils.NodeKeyFileFlag,
utils.NodeKeyHexFlag, utils.NodeKeyHexFlag,
utils.DevModeFlag, utils.DeveloperFlag,
utils.DeveloperPeriodFlag,
utils.TestnetFlag, utils.TestnetFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.VMEnableDebugFlag, utils.VMEnableDebugFlag,
@ -270,7 +276,7 @@ func startNode(ctx *cli.Context, stack *node.Node) {
} }
}() }()
// Start auxiliary services if enabled // Start auxiliary services if enabled
if ctx.GlobalBool(utils.MiningEnabledFlag.Name) { if ctx.GlobalBool(utils.MiningEnabledFlag.Name) || ctx.GlobalBool(utils.DeveloperFlag.Name) {
// Mining only makes sense if a full Ethereum node is running // Mining only makes sense if a full Ethereum node is running
var ethereum *eth.Ethereum var ethereum *eth.Ethereum
if err := stack.Service(&ethereum); err != nil { if err := stack.Service(&ethereum); err != nil {

View File

@ -25,6 +25,7 @@ import (
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/internal/debug" "github.com/ethereum/go-ethereum/internal/debug"
"gopkg.in/urfave/cli.v1" "gopkg.in/urfave/cli.v1"
"strings"
) )
// AppHelpTemplate is the test template for the default, global app help topic. // AppHelpTemplate is the test template for the default, global app help topic.
@ -72,7 +73,6 @@ var AppHelpFlagGroups = []flagGroup{
utils.NetworkIdFlag, utils.NetworkIdFlag,
utils.TestnetFlag, utils.TestnetFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.DevModeFlag,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.EthStatsURLFlag, utils.EthStatsURLFlag,
utils.IdentityFlag, utils.IdentityFlag,
@ -81,6 +81,12 @@ var AppHelpFlagGroups = []flagGroup{
utils.LightKDFFlag, utils.LightKDFFlag,
}, },
}, },
{Name: "DEVELOPER CHAIN",
Flags: []cli.Flag{
utils.DeveloperFlag,
utils.DeveloperPeriodFlag,
},
},
{ {
Name: "ETHASH", Name: "ETHASH",
Flags: []cli.Flag{ Flags: []cli.Flag{
@ -92,6 +98,16 @@ var AppHelpFlagGroups = []flagGroup{
utils.EthashDatasetsOnDiskFlag, utils.EthashDatasetsOnDiskFlag,
}, },
}, },
//{
// Name: "DASHBOARD",
// Flags: []cli.Flag{
// utils.DashboardEnabledFlag,
// utils.DashboardAddrFlag,
// utils.DashboardPortFlag,
// utils.DashboardRefreshFlag,
// utils.DashboardAssetsFlag,
// },
//},
{ {
Name: "TRANSACTION POOL", Name: "TRANSACTION POOL",
Flags: []cli.Flag{ Flags: []cli.Flag{
@ -263,6 +279,9 @@ func init() {
uncategorized := []cli.Flag{} uncategorized := []cli.Flag{}
for _, flag := range data.(*cli.App).Flags { for _, flag := range data.(*cli.App).Flags {
if _, ok := categorized[flag.String()]; !ok { if _, ok := categorized[flag.String()]; !ok {
if strings.HasPrefix(flag.GetName(), "dashboard") {
continue
}
uncategorized = append(uncategorized, flag) uncategorized = append(uncategorized, flag)
} }
} }

View File

@ -133,7 +133,7 @@ func deployFaucet(client *sshClient, network string, bootnodes []string, config
}) })
files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes() files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes()
files[filepath.Join(workdir, "genesis.json")] = []byte(config.node.genesis) files[filepath.Join(workdir, "genesis.json")] = config.node.genesis
files[filepath.Join(workdir, "account.json")] = []byte(config.node.keyJSON) files[filepath.Join(workdir, "account.json")] = []byte(config.node.keyJSON)
files[filepath.Join(workdir, "account.pass")] = []byte(config.node.keyPass) files[filepath.Join(workdir, "account.pass")] = []byte(config.node.keyPass)

View File

@ -128,7 +128,7 @@ func deployNode(client *sshClient, network string, bootv4, bootv5 []string, conf
files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes() files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes()
//genesisfile, _ := json.MarshalIndent(config.genesis, "", " ") //genesisfile, _ := json.MarshalIndent(config.genesis, "", " ")
files[filepath.Join(workdir, "genesis.json")] = []byte(config.genesis) files[filepath.Join(workdir, "genesis.json")] = config.genesis
if config.keyJSON != "" { if config.keyJSON != "" {
files[filepath.Join(workdir, "signer.json")] = []byte(config.keyJSON) files[filepath.Join(workdir, "signer.json")] = []byte(config.keyJSON)

View File

@ -27,7 +27,6 @@ import (
"os/user" "os/user"
"path/filepath" "path/filepath"
"strings" "strings"
"syscall"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh"
@ -78,14 +77,25 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
} else { } else {
key, err := ssh.ParsePrivateKey(buf) key, err := ssh.ParsePrivateKey(buf)
if err != nil { if err != nil {
log.Warn("Bad SSH key, falling back to passwords", "path", path, "err", err) fmt.Printf("What's the decryption password for %s? (won't be echoed)\n>", path)
blob, err := terminal.ReadPassword(int(os.Stdin.Fd()))
fmt.Println()
if err != nil {
log.Warn("Couldn't read password", "err", err)
}
key, err := ssh.ParsePrivateKeyWithPassphrase(buf, blob)
if err != nil {
log.Warn("Failed to decrypt SSH key, falling back to passwords", "path", path, "err", err)
} else {
auths = append(auths, ssh.PublicKeys(key))
}
} else { } else {
auths = append(auths, ssh.PublicKeys(key)) auths = append(auths, ssh.PublicKeys(key))
} }
} }
auths = append(auths, ssh.PasswordCallback(func() (string, error) { auths = append(auths, ssh.PasswordCallback(func() (string, error) {
fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", login, server) fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", login, server)
blob, err := terminal.ReadPassword(int(syscall.Stdin)) blob, err := terminal.ReadPassword(int(os.Stdin.Fd()))
fmt.Println() fmt.Println()
return string(blob), err return string(blob), err

View File

@ -28,7 +28,6 @@ import (
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
"syscall"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
@ -231,7 +230,7 @@ func (w *wizard) readDefaultFloat(def float64) float64 {
// line and returns it. The input will not be echoed. // line and returns it. The input will not be echoed.
func (w *wizard) readPassword() string { func (w *wizard) readPassword() string {
fmt.Printf("> ") fmt.Printf("> ")
text, err := terminal.ReadPassword(int(syscall.Stdin)) text, err := terminal.ReadPassword(int(os.Stdin.Fd()))
if err != nil { if err != nil {
log.Crit("Failed to read password", "err", err) log.Crit("Failed to read password", "err", err)
} }

View File

@ -71,7 +71,7 @@ func (w *wizard) makeServer() string {
fmt.Println() fmt.Println()
fmt.Println("Please enter remote server's address:") fmt.Println("Please enter remote server's address:")
// Read and fial the server to ensure docker is present // Read and dial the server to ensure docker is present
input := w.readString() input := w.readString()
client, err := dial(input, nil) client, err := dial(input, nil)

View File

@ -51,7 +51,7 @@ func main() {
var r io.Reader var r io.Reader
switch { switch {
case *hexMode != "": case *hexMode != "":
data, err := hex.DecodeString(*hexMode) data, err := hex.DecodeString(strings.TrimPrefix(*hexMode, "0x"))
if err != nil { if err != nil {
die(err) die(err)
} }

View File

@ -38,6 +38,7 @@ import (
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/dashboard"
"github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/gasprice" "github.com/ethereum/go-ethereum/eth/gasprice"
@ -137,9 +138,13 @@ var (
Name: "rinkeby", Name: "rinkeby",
Usage: "Rinkeby network: pre-configured proof-of-authority test network", Usage: "Rinkeby network: pre-configured proof-of-authority test network",
} }
DevModeFlag = cli.BoolFlag{ DeveloperFlag = cli.BoolFlag{
Name: "dev", Name: "dev",
Usage: "Developer mode: pre-configured private network with several debugging flags", Usage: "Ephemeral proof-of-authority network with a pre-funded developer account, mining enabled",
}
DeveloperPeriodFlag = cli.IntFlag{
Name: "dev.period",
Usage: "Block period to use in developer mode (0 = mine only if transaction pending)",
} }
IdentityFlag = cli.StringFlag{ IdentityFlag = cli.StringFlag{
Name: "identity", Name: "identity",
@ -179,6 +184,31 @@ var (
Name: "lightkdf", Name: "lightkdf",
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength", Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
} }
// Dashboard settings
DashboardEnabledFlag = cli.BoolFlag{
Name: "dashboard",
Usage: "Enable the dashboard",
}
DashboardAddrFlag = cli.StringFlag{
Name: "dashboard.addr",
Usage: "Dashboard listening interface",
Value: dashboard.DefaultConfig.Host,
}
DashboardPortFlag = cli.IntFlag{
Name: "dashboard.host",
Usage: "Dashboard listening port",
Value: dashboard.DefaultConfig.Port,
}
DashboardRefreshFlag = cli.DurationFlag{
Name: "dashboard.refresh",
Usage: "Dashboard metrics collection refresh rate",
Value: dashboard.DefaultConfig.Refresh,
}
DashboardAssetsFlag = cli.StringFlag{
Name: "dashboard.assets",
Usage: "Developer flag to serve the dashboard from the local file system",
Value: dashboard.DefaultConfig.Assets,
}
// Ethash settings // Ethash settings
EthashCacheDirFlag = DirectoryFlag{ EthashCacheDirFlag = DirectoryFlag{
Name: "ethash.cachedir", Name: "ethash.cachedir",
@ -796,7 +826,7 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
cfg.NetRestrict = list cfg.NetRestrict = list
} }
if ctx.GlobalBool(DevModeFlag.Name) { if ctx.GlobalBool(DeveloperFlag.Name) {
// --dev mode can't use p2p networking. // --dev mode can't use p2p networking.
cfg.MaxPeers = 0 cfg.MaxPeers = 0
cfg.ListenAddr = ":0" cfg.ListenAddr = ":0"
@ -817,8 +847,8 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
switch { switch {
case ctx.GlobalIsSet(DataDirFlag.Name): case ctx.GlobalIsSet(DataDirFlag.Name):
cfg.DataDir = ctx.GlobalString(DataDirFlag.Name) cfg.DataDir = ctx.GlobalString(DataDirFlag.Name)
case ctx.GlobalBool(DevModeFlag.Name): case ctx.GlobalBool(DeveloperFlag.Name):
cfg.DataDir = filepath.Join(os.TempDir(), "ethereum_dev_mode") cfg.DataDir = "" // unless explicitly requested, use memory databases
case ctx.GlobalBool(TestnetFlag.Name): case ctx.GlobalBool(TestnetFlag.Name):
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "testnet") cfg.DataDir = filepath.Join(node.DefaultDataDir(), "testnet")
case ctx.GlobalBool(RinkebyFlag.Name): case ctx.GlobalBool(RinkebyFlag.Name):
@ -924,7 +954,7 @@ func SetShhConfig(ctx *cli.Context, stack *node.Node, cfg *whisper.Config) {
// SetEthConfig applies eth-related command line flags to the config. // SetEthConfig applies eth-related command line flags to the config.
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
// Avoid conflicting network flags // Avoid conflicting network flags
checkExclusive(ctx, DevModeFlag, TestnetFlag, RinkebyFlag) checkExclusive(ctx, DeveloperFlag, TestnetFlag, RinkebyFlag)
checkExclusive(ctx, FastSyncFlag, LightModeFlag, SyncModeFlag) checkExclusive(ctx, FastSyncFlag, LightModeFlag, SyncModeFlag)
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
@ -985,20 +1015,44 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
cfg.NetworkId = 4 cfg.NetworkId = 4
} }
cfg.Genesis = core.DefaultRinkebyGenesisBlock() cfg.Genesis = core.DefaultRinkebyGenesisBlock()
case ctx.GlobalBool(DevModeFlag.Name): case ctx.GlobalBool(DeveloperFlag.Name):
cfg.Genesis = core.DevGenesisBlock() // Create new developer account or reuse existing one
if !ctx.GlobalIsSet(GasPriceFlag.Name) { var (
cfg.GasPrice = new(big.Int) developer accounts.Account
err error
)
if accs := ks.Accounts(); len(accs) > 0 {
developer = ks.Accounts()[0]
} else {
developer, err = ks.NewAccount("")
if err != nil {
Fatalf("Failed to create developer account: %v", err)
}
} }
cfg.PowTest = true if err := ks.Unlock(developer, ""); err != nil {
} Fatalf("Failed to unlock developer account: %v", err)
}
log.Info("Using developer account", "address", developer.Address)
cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.GlobalInt(DeveloperPeriodFlag.Name)), developer.Address)
if !ctx.GlobalIsSet(GasPriceFlag.Name) {
cfg.GasPrice = big.NewInt(1)
}
}
// TODO(fjl): move trie cache generations into config // TODO(fjl): move trie cache generations into config
if gen := ctx.GlobalInt(TrieCacheGenFlag.Name); gen > 0 { if gen := ctx.GlobalInt(TrieCacheGenFlag.Name); gen > 0 {
state.MaxTrieCacheGen = uint16(gen) state.MaxTrieCacheGen = uint16(gen)
} }
} }
// SetDashboardConfig applies dashboard related command line flags to the config.
func SetDashboardConfig(ctx *cli.Context, cfg *dashboard.Config) {
cfg.Host = ctx.GlobalString(DashboardAddrFlag.Name)
cfg.Port = ctx.GlobalInt(DashboardPortFlag.Name)
cfg.Refresh = ctx.GlobalDuration(DashboardRefreshFlag.Name)
cfg.Assets = ctx.GlobalString(DashboardAssetsFlag.Name)
}
// RegisterEthService adds an Ethereum client to the stack. // RegisterEthService adds an Ethereum client to the stack.
func RegisterEthService(stack *node.Node, cfg *eth.Config) { func RegisterEthService(stack *node.Node, cfg *eth.Config) {
var err error var err error
@ -1021,6 +1075,13 @@ func RegisterEthService(stack *node.Node, cfg *eth.Config) {
} }
} }
// RegisterDashboardService adds a dashboard to the stack.
func RegisterDashboardService(stack *node.Node, cfg *dashboard.Config) {
stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
return dashboard.New(cfg)
})
}
// RegisterShhService configures Whisper and adds it to the given node. // RegisterShhService configures Whisper and adds it to the given node.
func RegisterShhService(stack *node.Node, cfg *whisper.Config) { func RegisterShhService(stack *node.Node, cfg *whisper.Config) {
if err := stack.Register(func(n *node.ServiceContext) (node.Service, error) { if err := stack.Register(func(n *node.ServiceContext) (node.Service, error) {
@ -1077,8 +1138,8 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis {
genesis = core.DefaultTestnetGenesisBlock() genesis = core.DefaultTestnetGenesisBlock()
case ctx.GlobalBool(RinkebyFlag.Name): case ctx.GlobalBool(RinkebyFlag.Name):
genesis = core.DefaultRinkebyGenesisBlock() genesis = core.DefaultRinkebyGenesisBlock()
case ctx.GlobalBool(DevModeFlag.Name): case ctx.GlobalBool(DeveloperFlag.Name):
genesis = core.DevGenesisBlock() Fatalf("Developer chains are ephemeral")
} }
return genesis return genesis
} }

View File

@ -53,9 +53,7 @@ var (
type decError struct{ msg string } type decError struct{ msg string }
func (err decError) Error() string { func (err decError) Error() string { return err.msg }
return string(err.msg)
}
// Decode decodes a hex string with 0x prefix. // Decode decodes a hex string with 0x prefix.
func Decode(input string) ([]byte, error) { func Decode(input string) ([]byte, error) {

View File

@ -223,7 +223,7 @@ func (b *Uint64) UnmarshalText(input []byte) error {
return ErrSyntax return ErrSyntax
} }
dec *= 16 dec *= 16
dec += uint64(nib) dec += nib
} }
*b = Uint64(dec) *b = Uint64(dec)
return nil return nil

View File

@ -125,6 +125,11 @@ var (
// errUnauthorized is returned if a header is signed by a non-authorized entity. // errUnauthorized is returned if a header is signed by a non-authorized entity.
errUnauthorized = errors.New("unauthorized") errUnauthorized = errors.New("unauthorized")
// errWaitTransactions is returned if an empty block is attempted to be sealed
// on an instant chain (0 second period). It's important to refuse these as the
// block reward is zero, so an empty block just bloats the chain... fast.
errWaitTransactions = errors.New("waiting for transactions")
) )
// SignerFn is a signer callback function to request a hash to be signed by a // SignerFn is a signer callback function to request a hash to be signed by a
@ -211,9 +216,6 @@ func New(config *params.CliqueConfig, db ethdb.Database) *Clique {
if conf.Epoch == 0 { if conf.Epoch == 0 {
conf.Epoch = epochLength conf.Epoch = epochLength
} }
if conf.Period == 0 {
conf.Period = blockPeriod
}
// Allocate the snapshot caches and create the engine // Allocate the snapshot caches and create the engine
recents, _ := lru.NewARC(inmemorySnapshots) recents, _ := lru.NewARC(inmemorySnapshots)
signatures, _ := lru.NewARC(inmemorySignatures) signatures, _ := lru.NewARC(inmemorySignatures)
@ -599,6 +601,10 @@ func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, stop <-ch
if number == 0 { if number == 0 {
return nil, errUnknownBlock return nil, errUnknownBlock
} }
// For 0-period chains, refuse to seal empty blocks (no reward but would spin sealing)
if c.config.Period == 0 && len(block.Transactions()) == 0 {
return nil, errWaitTransactions
}
// Don't hold the signer fields for the entire sealing procedure // Don't hold the signer fields for the entire sealing procedure
c.lock.RLock() c.lock.RLock()
signer, signFn := c.signer, c.signFn signer, signFn := c.signer, c.signFn

View File

@ -31,7 +31,7 @@ func cacheSize(block uint64) uint64 {
return cacheSizes[epoch] return cacheSizes[epoch]
} }
// No known cache size, calculate manually (sanity branch only) // No known cache size, calculate manually (sanity branch only)
size := uint64(cacheInitBytes + cacheGrowthBytes*uint64(epoch) - hashBytes) size := cacheInitBytes + cacheGrowthBytes*uint64(epoch) - hashBytes
for !new(big.Int).SetUint64(size / hashBytes).ProbablyPrime(1) { // Always accurate for n < 2^64 for !new(big.Int).SetUint64(size / hashBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
size -= 2 * hashBytes size -= 2 * hashBytes
} }
@ -49,7 +49,7 @@ func datasetSize(block uint64) uint64 {
return datasetSizes[epoch] return datasetSizes[epoch]
} }
// No known dataset size, calculate manually (sanity branch only) // No known dataset size, calculate manually (sanity branch only)
size := uint64(datasetInitBytes + datasetGrowthBytes*uint64(epoch) - mixBytes) size := datasetInitBytes + datasetGrowthBytes*uint64(epoch) - mixBytes
for !new(big.Int).SetUint64(size / mixBytes).ProbablyPrime(1) { // Always accurate for n < 2^64 for !new(big.Int).SetUint64(size / mixBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
size -= 2 * mixBytes size -= 2 * mixBytes
} }

View File

@ -1,7 +1,9 @@
import "mortal"; pragma solidity ^0.4.18;
import "https://github.com/ethereum/solidity/std/mortal.sol";
/// @title Chequebook for Ethereum micropayments /// @title Chequebook for Ethereum micropayments
/// @author Daniel A. Nagy <daniel@ethdev.com> /// @author Daniel A. Nagy <daniel@ethereum.org>
contract chequebook is mortal { contract chequebook is mortal {
// Cumulative paid amount in wei to each beneficiary // Cumulative paid amount in wei to each beneficiary
mapping (address => uint256) public sent; mapping (address => uint256) public sent;
@ -21,26 +23,23 @@ contract chequebook is mortal {
uint8 sig_v, bytes32 sig_r, bytes32 sig_s) { uint8 sig_v, bytes32 sig_r, bytes32 sig_s) {
// Check if the cheque is old. // Check if the cheque is old.
// Only cheques that are more recent than the last cashed one are considered. // Only cheques that are more recent than the last cashed one are considered.
if(amount <= sent[beneficiary]) return; require(amount > sent[beneficiary]);
// Check the digital signature of the cheque. // Check the digital signature of the cheque.
bytes32 hash = sha3(address(this), beneficiary, amount); bytes32 hash = keccak256(address(this), beneficiary, amount);
if(owner != ecrecover(hash, sig_v, sig_r, sig_s)) return; require(owner == ecrecover(hash, sig_v, sig_r, sig_s));
// Attempt sending the difference between the cumulative amount on the cheque // Attempt sending the difference between the cumulative amount on the cheque
// and the cumulative amount on the last cashed cheque to beneficiary. // and the cumulative amount on the last cashed cheque to beneficiary.
uint256 diff = amount - sent[beneficiary]; uint256 diff = amount - sent[beneficiary];
if (diff <= this.balance) { if (diff <= this.balance) {
// update the cumulative amount before sending // update the cumulative amount before sending
sent[beneficiary] = amount; sent[beneficiary] = amount;
if (!beneficiary.send(diff)) { beneficiary.transfer(diff);
// Upon failure to execute send, revert everything
throw;
}
} else { } else {
// Upon failure, punish owner for writing a bounced cheque. // Upon failure, punish owner for writing a bounced cheque.
// owner.sendToDebtorsPrison(); // owner.sendToDebtorsPrison();
Overdraft(owner); Overdraft(owner);
// Compensate beneficiary. // Compensate beneficiary.
suicide(beneficiary); selfdestruct(beneficiary);
} }
} }
} }

View File

@ -818,7 +818,12 @@ func (bc *BlockChain) WriteBlockAndState(block *types.Block, receipts []*types.R
// If the total difficulty is higher than our known, add it to the canonical chain // If the total difficulty is higher than our known, add it to the canonical chain
// Second clause in the if statement reduces the vulnerability to selfish mining. // Second clause in the if statement reduces the vulnerability to selfish mining.
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
if externTd.Cmp(localTd) > 0 || (externTd.Cmp(localTd) == 0 && mrand.Float64() < 0.5) { reorg := externTd.Cmp(localTd) > 0
if !reorg && externTd.Cmp(localTd) == 0 {
// Split same-difficulty blocks by number, then at random
reorg = block.NumberU64() < bc.currentBlock.NumberU64() || (block.NumberU64() == bc.currentBlock.NumberU64() && mrand.Float64() < 0.5)
}
if reorg {
// Reorganise the chain if the parent is not the head block // Reorganise the chain if the parent is not the head block
if block.ParentHash() != bc.currentBlock.Hash() { if block.ParentHash() != bc.currentBlock.Hash() {
if err := bc.reorg(bc.currentBlock, block); err != nil { if err := bc.reorg(bc.currentBlock, block); err != nil {

View File

@ -18,6 +18,7 @@ package bloombits
import ( import (
"bytes" "bytes"
"context"
"errors" "errors"
"math" "math"
"sort" "sort"
@ -56,10 +57,16 @@ type partialMatches struct {
// Retrieval represents a request for retrieval task assignments for a given // Retrieval represents a request for retrieval task assignments for a given
// bit with the given number of fetch elements, or a response for such a request. // bit with the given number of fetch elements, or a response for such a request.
// It can also have the actual results set to be used as a delivery data struct. // It can also have the actual results set to be used as a delivery data struct.
//
// The contest and error fields are used by the light client to terminate matching
// early if an error is enountered on some path of the pipeline.
type Retrieval struct { type Retrieval struct {
Bit uint Bit uint
Sections []uint64 Sections []uint64
Bitsets [][]byte Bitsets [][]byte
Context context.Context
Error error
} }
// Matcher is a pipelined system of schedulers and logic matchers which perform // Matcher is a pipelined system of schedulers and logic matchers which perform
@ -137,7 +144,7 @@ func (m *Matcher) addScheduler(idx uint) {
// Start starts the matching process and returns a stream of bloom matches in // Start starts the matching process and returns a stream of bloom matches in
// a given range of blocks. If there are no more matches in the range, the result // a given range of blocks. If there are no more matches in the range, the result
// channel is closed. // channel is closed.
func (m *Matcher) Start(begin, end uint64, results chan uint64) (*MatcherSession, error) { func (m *Matcher) Start(ctx context.Context, begin, end uint64, results chan uint64) (*MatcherSession, error) {
// Make sure we're not creating concurrent sessions // Make sure we're not creating concurrent sessions
if atomic.SwapUint32(&m.running, 1) == 1 { if atomic.SwapUint32(&m.running, 1) == 1 {
return nil, errors.New("matcher already running") return nil, errors.New("matcher already running")
@ -149,6 +156,7 @@ func (m *Matcher) Start(begin, end uint64, results chan uint64) (*MatcherSession
matcher: m, matcher: m,
quit: make(chan struct{}), quit: make(chan struct{}),
kill: make(chan struct{}), kill: make(chan struct{}),
ctx: ctx,
} }
for _, scheduler := range m.schedulers { for _, scheduler := range m.schedulers {
scheduler.reset() scheduler.reset()
@ -184,10 +192,12 @@ func (m *Matcher) Start(begin, end uint64, results chan uint64) (*MatcherSession
} }
// Iterate over all the blocks in the section and return the matching ones // Iterate over all the blocks in the section and return the matching ones
for i := first; i <= last; i++ { for i := first; i <= last; i++ {
// Skip the entire byte if no matches are found inside // Skip the entire byte if no matches are found inside (and we're processing an entire byte!)
next := res.bitset[(i-sectionStart)/8] next := res.bitset[(i-sectionStart)/8]
if next == 0 { if next == 0 {
i += 7 if i%8 == 0 {
i += 7
}
continue continue
} }
// Some bit it set, do the actual submatching // Some bit it set, do the actual submatching
@ -502,25 +512,34 @@ func (m *Matcher) distributor(dist chan *request, session *MatcherSession) {
type MatcherSession struct { type MatcherSession struct {
matcher *Matcher matcher *Matcher
quit chan struct{} // Quit channel to request pipeline termination closer sync.Once // Sync object to ensure we only ever close once
kill chan struct{} // Term channel to signal non-graceful forced shutdown quit chan struct{} // Quit channel to request pipeline termination
kill chan struct{} // Term channel to signal non-graceful forced shutdown
ctx context.Context // Context used by the light client to abort filtering
err atomic.Value // Global error to track retrieval failures deep in the chain
pend sync.WaitGroup pend sync.WaitGroup
} }
// Close stops the matching process and waits for all subprocesses to terminate // Close stops the matching process and waits for all subprocesses to terminate
// before returning. The timeout may be used for graceful shutdown, allowing the // before returning. The timeout may be used for graceful shutdown, allowing the
// currently running retrievals to complete before this time. // currently running retrievals to complete before this time.
func (s *MatcherSession) Close(timeout time.Duration) { func (s *MatcherSession) Close() {
// Bail out if the matcher is not running s.closer.Do(func() {
select { // Signal termination and wait for all goroutines to tear down
case <-s.quit: close(s.quit)
return time.AfterFunc(time.Second, func() { close(s.kill) })
default: s.pend.Wait()
})
}
// Error returns any failure encountered during the matching session.
func (s *MatcherSession) Error() error {
if err := s.err.Load(); err != nil {
return err.(error)
} }
// Signal termination and wait for all goroutines to tear down return nil
close(s.quit)
time.AfterFunc(timeout, func() { close(s.kill) })
s.pend.Wait()
} }
// AllocateRetrieval assigns a bloom bit index to a client process that can either // AllocateRetrieval assigns a bloom bit index to a client process that can either
@ -618,9 +637,13 @@ func (s *MatcherSession) Multiplex(batch int, wait time.Duration, mux chan chan
case mux <- request: case mux <- request:
// Retrieval accepted, something must arrive before we're aborting // Retrieval accepted, something must arrive before we're aborting
request <- &Retrieval{Bit: bit, Sections: sections} request <- &Retrieval{Bit: bit, Sections: sections, Context: s.ctx}
result := <-request result := <-request
if result.Error != nil {
s.err.Store(result.Error)
s.Close()
}
s.DeliverSections(result.Bit, result.Sections, result.Bitsets) s.DeliverSections(result.Bit, result.Sections, result.Bitsets)
} }
} }

View File

@ -36,7 +36,7 @@ import (
type ChainIndexerBackend interface { type ChainIndexerBackend interface {
// Reset initiates the processing of a new chain segment, potentially terminating // Reset initiates the processing of a new chain segment, potentially terminating
// any partially completed operations (in case of a reorg). // any partially completed operations (in case of a reorg).
Reset(section uint64) Reset(section uint64, prevHead common.Hash) error
// Process crunches through the next header in the chain segment. The caller // Process crunches through the next header in the chain segment. The caller
// will ensure a sequential order of headers. // will ensure a sequential order of headers.
@ -46,6 +46,15 @@ type ChainIndexerBackend interface {
Commit() error Commit() error
} }
// ChainIndexerChain interface is used for connecting the indexer to a blockchain
type ChainIndexerChain interface {
// CurrentHeader retrieves the latest locally known header.
CurrentHeader() *types.Header
// SubscribeChainEvent subscribes to new head header notifications.
SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription
}
// ChainIndexer does a post-processing job for equally sized sections of the // ChainIndexer does a post-processing job for equally sized sections of the
// canonical chain (like BlooomBits and CHT structures). A ChainIndexer is // canonical chain (like BlooomBits and CHT structures). A ChainIndexer is
// connected to the blockchain through the event system by starting a // connected to the blockchain through the event system by starting a
@ -100,11 +109,27 @@ func NewChainIndexer(chainDb, indexDb ethdb.Database, backend ChainIndexerBacken
return c return c
} }
// AddKnownSectionHead marks a new section head as known/processed if it is newer
// than the already known best section head
func (c *ChainIndexer) AddKnownSectionHead(section uint64, shead common.Hash) {
c.lock.Lock()
defer c.lock.Unlock()
if section < c.storedSections {
return
}
c.setSectionHead(section, shead)
c.setValidSections(section + 1)
}
// Start creates a goroutine to feed chain head events into the indexer for // Start creates a goroutine to feed chain head events into the indexer for
// cascading background processing. Children do not need to be started, they // cascading background processing. Children do not need to be started, they
// are notified about new events by their parents. // are notified about new events by their parents.
func (c *ChainIndexer) Start(currentHeader *types.Header, chainEventer func(ch chan<- ChainEvent) event.Subscription) { func (c *ChainIndexer) Start(chain ChainIndexerChain) {
go c.eventLoop(currentHeader, chainEventer) events := make(chan ChainEvent, 10)
sub := chain.SubscribeChainEvent(events)
go c.eventLoop(chain.CurrentHeader(), events, sub)
} }
// Close tears down all goroutines belonging to the indexer and returns any error // Close tears down all goroutines belonging to the indexer and returns any error
@ -147,12 +172,10 @@ func (c *ChainIndexer) Close() error {
// eventLoop is a secondary - optional - event loop of the indexer which is only // eventLoop is a secondary - optional - event loop of the indexer which is only
// started for the outermost indexer to push chain head events into a processing // started for the outermost indexer to push chain head events into a processing
// queue. // queue.
func (c *ChainIndexer) eventLoop(currentHeader *types.Header, chainEventer func(ch chan<- ChainEvent) event.Subscription) { func (c *ChainIndexer) eventLoop(currentHeader *types.Header, events chan ChainEvent, sub event.Subscription) {
// Mark the chain indexer as active, requiring an additional teardown // Mark the chain indexer as active, requiring an additional teardown
atomic.StoreUint32(&c.active, 1) atomic.StoreUint32(&c.active, 1)
events := make(chan ChainEvent, 10)
sub := chainEventer(events)
defer sub.Unsubscribe() defer sub.Unsubscribe()
// Fire the initial new head event to start any outstanding processing // Fire the initial new head event to start any outstanding processing
@ -178,7 +201,11 @@ func (c *ChainIndexer) eventLoop(currentHeader *types.Header, chainEventer func(
} }
header := ev.Block.Header() header := ev.Block.Header()
if header.ParentHash != prevHash { if header.ParentHash != prevHash {
c.newHead(FindCommonAncestor(c.chainDb, prevHeader, header).Number.Uint64(), true) // Reorg to the common ancestor (might not exist in light sync mode, skip reorg then)
// TODO(karalabe, zsfelfoldi): This seems a bit brittle, can we detect this case explicitly?
if h := FindCommonAncestor(c.chainDb, prevHeader, header); h != nil {
c.newHead(h.Number.Uint64(), true)
}
} }
c.newHead(header.Number.Uint64(), false) c.newHead(header.Number.Uint64(), false)
@ -236,6 +263,7 @@ func (c *ChainIndexer) updateLoop() {
updating bool updating bool
updated time.Time updated time.Time
) )
for { for {
select { select {
case errc := <-c.quit: case errc := <-c.quit:
@ -259,7 +287,7 @@ func (c *ChainIndexer) updateLoop() {
section := c.storedSections section := c.storedSections
var oldHead common.Hash var oldHead common.Hash
if section > 0 { if section > 0 {
oldHead = c.sectionHead(section - 1) oldHead = c.SectionHead(section - 1)
} }
// Process the newly defined section in the background // Process the newly defined section in the background
c.lock.Unlock() c.lock.Unlock()
@ -270,7 +298,7 @@ func (c *ChainIndexer) updateLoop() {
c.lock.Lock() c.lock.Lock()
// If processing succeeded and no reorgs occcurred, mark the section completed // If processing succeeded and no reorgs occcurred, mark the section completed
if err == nil && oldHead == c.sectionHead(section-1) { if err == nil && oldHead == c.SectionHead(section-1) {
c.setSectionHead(section, newHead) c.setSectionHead(section, newHead)
c.setValidSections(section + 1) c.setValidSections(section + 1)
if c.storedSections == c.knownSections && updating { if c.storedSections == c.knownSections && updating {
@ -311,7 +339,11 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com
c.log.Trace("Processing new chain section", "section", section) c.log.Trace("Processing new chain section", "section", section)
// Reset and partial processing // Reset and partial processing
c.backend.Reset(section)
if err := c.backend.Reset(section, lastHead); err != nil {
c.setValidSections(0)
return common.Hash{}, err
}
for number := section * c.sectionSize; number < (section+1)*c.sectionSize; number++ { for number := section * c.sectionSize; number < (section+1)*c.sectionSize; number++ {
hash := GetCanonicalHash(c.chainDb, number) hash := GetCanonicalHash(c.chainDb, number)
@ -341,7 +373,7 @@ func (c *ChainIndexer) Sections() (uint64, uint64, common.Hash) {
c.lock.Lock() c.lock.Lock()
defer c.lock.Unlock() defer c.lock.Unlock()
return c.storedSections, c.storedSections*c.sectionSize - 1, c.sectionHead(c.storedSections - 1) return c.storedSections, c.storedSections*c.sectionSize - 1, c.SectionHead(c.storedSections - 1)
} }
// AddChildIndexer adds a child ChainIndexer that can use the output of this one // AddChildIndexer adds a child ChainIndexer that can use the output of this one
@ -381,9 +413,9 @@ func (c *ChainIndexer) setValidSections(sections uint64) {
c.storedSections = sections // needed if new > old c.storedSections = sections // needed if new > old
} }
// sectionHead retrieves the last block hash of a processed section from the // SectionHead retrieves the last block hash of a processed section from the
// index database. // index database.
func (c *ChainIndexer) sectionHead(section uint64) common.Hash { func (c *ChainIndexer) SectionHead(section uint64) common.Hash {
var data [8]byte var data [8]byte
binary.BigEndian.PutUint64(data[:], section) binary.BigEndian.PutUint64(data[:], section)

View File

@ -235,7 +235,7 @@ func newCanonical(n int, full bool) (ethdb.Database, *BlockChain, error) {
db, _ := ethdb.NewMemDatabase() db, _ := ethdb.NewMemDatabase()
genesis := gspec.MustCommit(db) genesis := gspec.MustCommit(db)
blockchain, _ := NewBlockChain(db, params.AllProtocolChanges, ethash.NewFaker(), vm.Config{}) blockchain, _ := NewBlockChain(db, params.AllEthashProtocolChanges, ethash.NewFaker(), vm.Config{})
// Create and inject the requested chain // Create and inject the requested chain
if n == 0 { if n == 0 {
return db, blockchain, nil return db, blockchain, nil

View File

@ -74,9 +74,9 @@ var (
preimageHitCounter = metrics.NewCounter("db/preimage/hits") preimageHitCounter = metrics.NewCounter("db/preimage/hits")
) )
// txLookupEntry is a positional metadata to help looking up the data content of // TxLookupEntry is a positional metadata to help looking up the data content of
// a transaction or receipt given only its hash. // a transaction or receipt given only its hash.
type txLookupEntry struct { type TxLookupEntry struct {
BlockHash common.Hash BlockHash common.Hash
BlockIndex uint64 BlockIndex uint64
Index uint64 Index uint64
@ -260,7 +260,7 @@ func GetTxLookupEntry(db DatabaseReader, hash common.Hash) (common.Hash, uint64,
return common.Hash{}, 0, 0 return common.Hash{}, 0, 0
} }
// Parse and return the contents of the lookup entry // Parse and return the contents of the lookup entry
var entry txLookupEntry var entry TxLookupEntry
if err := rlp.DecodeBytes(data, &entry); err != nil { if err := rlp.DecodeBytes(data, &entry); err != nil {
log.Error("Invalid lookup entry RLP", "hash", hash, "err", err) log.Error("Invalid lookup entry RLP", "hash", hash, "err", err)
return common.Hash{}, 0, 0 return common.Hash{}, 0, 0
@ -296,7 +296,7 @@ func GetTransaction(db DatabaseReader, hash common.Hash) (*types.Transaction, co
if len(data) == 0 { if len(data) == 0 {
return nil, common.Hash{}, 0, 0 return nil, common.Hash{}, 0, 0
} }
var entry txLookupEntry var entry TxLookupEntry
if err := rlp.DecodeBytes(data, &entry); err != nil { if err := rlp.DecodeBytes(data, &entry); err != nil {
return nil, common.Hash{}, 0, 0 return nil, common.Hash{}, 0, 0
} }
@ -332,14 +332,13 @@ func GetReceipt(db DatabaseReader, hash common.Hash) (*types.Receipt, common.Has
// GetBloomBits retrieves the compressed bloom bit vector belonging to the given // GetBloomBits retrieves the compressed bloom bit vector belonging to the given
// section and bit index from the. // section and bit index from the.
func GetBloomBits(db DatabaseReader, bit uint, section uint64, head common.Hash) []byte { func GetBloomBits(db DatabaseReader, bit uint, section uint64, head common.Hash) ([]byte, error) {
key := append(append(bloomBitsPrefix, make([]byte, 10)...), head.Bytes()...) key := append(append(bloomBitsPrefix, make([]byte, 10)...), head.Bytes()...)
binary.BigEndian.PutUint16(key[1:], uint16(bit)) binary.BigEndian.PutUint16(key[1:], uint16(bit))
binary.BigEndian.PutUint64(key[3:], section) binary.BigEndian.PutUint64(key[3:], section)
bits, _ := db.Get(key) return db.Get(key)
return bits
} }
// WriteCanonicalHash stores the canonical hash for the given block number. // WriteCanonicalHash stores the canonical hash for the given block number.
@ -465,7 +464,7 @@ func WriteBlockReceipts(db ethdb.Putter, hash common.Hash, number uint64, receip
func WriteTxLookupEntries(db ethdb.Putter, block *types.Block) error { func WriteTxLookupEntries(db ethdb.Putter, block *types.Block) error {
// Iterate over each transaction and encode its metadata // Iterate over each transaction and encode its metadata
for i, tx := range block.Transactions() { for i, tx := range block.Transactions() {
entry := txLookupEntry{ entry := TxLookupEntry{
BlockHash: block.Hash(), BlockHash: block.Hash(),
BlockIndex: block.NumberU64(), BlockIndex: block.NumberU64(),
Index: uint64(i), Index: uint64(i),

View File

@ -151,7 +151,7 @@ func (e *GenesisMismatchError) Error() string {
// The returned chain configuration is never nil. // The returned chain configuration is never nil.
func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) { func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) {
if genesis != nil && genesis.Config == nil { if genesis != nil && genesis.Config == nil {
return params.AllProtocolChanges, common.Hash{}, errGenesisNoConfig return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
} }
// Just commit the new block if there is no stored genesis block. // Just commit the new block if there is no stored genesis block.
@ -216,7 +216,7 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
case ghash == params.TestnetGenesisHash: case ghash == params.TestnetGenesisHash:
return params.TestnetChainConfig return params.TestnetChainConfig
default: default:
return params.AllProtocolChanges return params.AllEthashProtocolChanges
} }
} }
@ -285,7 +285,7 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
} }
config := g.Config config := g.Config
if config == nil { if config == nil {
config = params.AllProtocolChanges config = params.AllEthashProtocolChanges
} }
return block, WriteChainConfig(db, block.Hash(), config) return block, WriteChainConfig(db, block.Hash(), config)
} }
@ -342,14 +342,30 @@ func DefaultRinkebyGenesisBlock() *Genesis {
} }
} }
// DevGenesisBlock returns the 'geth --dev' genesis block. // DeveloperGenesisBlock returns the 'geth --dev' genesis block. Note, this must
func DevGenesisBlock() *Genesis { // be seeded with the
func DeveloperGenesisBlock(period uint64, faucet common.Address) *Genesis {
// Override the default period to the user requested one
config := *params.AllCliqueProtocolChanges
config.Clique.Period = period
// Assemble and return the genesis with the precompiles and faucet pre-funded
return &Genesis{ return &Genesis{
Config: params.AllProtocolChanges, Config: &config,
Nonce: 42, ExtraData: append(append(make([]byte, 32), faucet[:]...), make([]byte, 65)...),
GasLimit: 4712388, GasLimit: 6283185,
Difficulty: big.NewInt(131072), Difficulty: big.NewInt(1),
Alloc: decodePrealloc(devAllocData), Alloc: map[common.Address]GenesisAccount{
common.BytesToAddress([]byte{1}): {Balance: big.NewInt(1)}, // ECRecover
common.BytesToAddress([]byte{2}): {Balance: big.NewInt(1)}, // SHA256
common.BytesToAddress([]byte{3}): {Balance: big.NewInt(1)}, // RIPEMD
common.BytesToAddress([]byte{4}): {Balance: big.NewInt(1)}, // Identity
common.BytesToAddress([]byte{5}): {Balance: big.NewInt(1)}, // ModExp
common.BytesToAddress([]byte{6}): {Balance: big.NewInt(1)}, // ECAdd
common.BytesToAddress([]byte{7}): {Balance: big.NewInt(1)}, // ECScalarMul
common.BytesToAddress([]byte{8}): {Balance: big.NewInt(1)}, // ECPairing
faucet: {Balance: new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 256), big.NewInt(9))},
},
} }
} }

File diff suppressed because one or more lines are too long

View File

@ -23,8 +23,6 @@ import (
"math" "math"
"math/big" "math/big"
mrand "math/rand" mrand "math/rand"
"os"
"strconv"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -42,21 +40,6 @@ const (
numberCacheLimit = 2048 numberCacheLimit = 2048
) )
var writeDelay time.Duration = 0
func init() {
str, ok := os.LookupEnv("FEATURE_SYNC_DELAY")
if !ok {
return
}
delay, err := strconv.ParseInt(str, 10, 0)
if err != nil {
panic(fmt.Errorf("FEATURE_SYNC_DELAY value is invalid:", err))
}
writeDelay = time.Duration(delay) * time.Microsecond
}
// HeaderChain implements the basic block header chain logic that is shared by // HeaderChain implements the basic block header chain logic that is shared by
// core.BlockChain and light.LightChain. It is not usable in itself, only as // core.BlockChain and light.LightChain. It is not usable in itself, only as
// a part of either structure. // a part of either structure.
@ -120,10 +103,6 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c
} }
hc.currentHeaderHash = hc.currentHeader.Hash() hc.currentHeaderHash = hc.currentHeader.Hash()
if writeDelay > 0 {
log.Info("Header writes are slowed down", "delay", writeDelay)
}
return hc, nil return hc, nil
} }
@ -292,11 +271,6 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, writeHeader WhCa
stats.ignored++ stats.ignored++
continue continue
} }
// introduce delay for the expensive write to
// trade off sync time with CPU usage
time.Sleep(writeDelay)
if err := writeHeader(header); err != nil { if err := writeHeader(header); err != nil {
return i, err return i, err
} }

View File

@ -31,6 +31,15 @@ import (
// into the journal, but no such file is currently open. // into the journal, but no such file is currently open.
var errNoActiveJournal = errors.New("no active journal") var errNoActiveJournal = errors.New("no active journal")
// devNull is a WriteCloser that just discards anything written into it. Its
// goal is to allow the transaction journal to write into a fake journal when
// loading transactions on startup without printing warnings due to no file
// being readt for write.
type devNull struct{}
func (*devNull) Write(p []byte) (n int, err error) { return len(p), nil }
func (*devNull) Close() error { return nil }
// txJournal is a rotating log of transactions with the aim of storing locally // txJournal is a rotating log of transactions with the aim of storing locally
// created transactions to allow non-executed ones to survive node restarts. // created transactions to allow non-executed ones to survive node restarts.
type txJournal struct { type txJournal struct {
@ -59,6 +68,10 @@ func (journal *txJournal) load(add func(*types.Transaction) error) error {
} }
defer input.Close() defer input.Close()
// Temporarily discard any journal additions (don't double add on load)
journal.writer = new(devNull)
defer func() { journal.writer = nil }()
// Inject all transactions from the journal into the pool // Inject all transactions from the journal into the pool
stream := rlp.NewStream(input, 0) stream := rlp.NewStream(input, 0)
total, dropped := 0, 0 total, dropped := 0, 0

View File

@ -254,7 +254,10 @@ func (l *txList) Add(tx *types.Transaction, priceBump uint64) (bool, *types.Tran
old := l.txs.Get(tx.Nonce()) old := l.txs.Get(tx.Nonce())
if old != nil { if old != nil {
threshold := new(big.Int).Div(new(big.Int).Mul(old.GasPrice(), big.NewInt(100+int64(priceBump))), big.NewInt(100)) threshold := new(big.Int).Div(new(big.Int).Mul(old.GasPrice(), big.NewInt(100+int64(priceBump))), big.NewInt(100))
if threshold.Cmp(tx.GasPrice()) >= 0 { // Have to ensure that the new gas price is higher than the old gas
// price as well as checking the percentage threshold to ensure that
// this is accurate for low (Wei-level) gas price replacements
if old.GasPrice().Cmp(tx.GasPrice()) >= 0 || threshold.Cmp(tx.GasPrice()) > 0 {
return false, nil return false, nil
} }
} }

View File

@ -103,6 +103,16 @@ var (
underpricedTxCounter = metrics.NewCounter("txpool/underpriced") underpricedTxCounter = metrics.NewCounter("txpool/underpriced")
) )
// TxStatus is the current status of a transaction as seen py the pool.
type TxStatus uint
const (
TxStatusUnknown TxStatus = iota
TxStatusQueued
TxStatusPending
TxStatusIncluded
)
// blockChain provides the state of blockchain and current gas limit to do // blockChain provides the state of blockchain and current gas limit to do
// some pre checks in tx pool and event subscribers. // some pre checks in tx pool and event subscribers.
type blockChain interface { type blockChain interface {
@ -640,6 +650,10 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) {
pool.journalTx(from, tx) pool.journalTx(from, tx)
log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
// We've directly injected a replacement transaction, notify subsystems
go pool.txFeed.Send(TxPreEvent{tx})
return old != nil, nil return old != nil, nil
} }
// New transaction isn't replacing a pending one, push into queue // New transaction isn't replacing a pending one, push into queue
@ -729,6 +743,7 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T
// Set the potentially new pending nonce and notify any subsystems of the new tx // Set the potentially new pending nonce and notify any subsystems of the new tx
pool.beats[addr] = time.Now() pool.beats[addr] = time.Now()
pool.pendingState.SetNonce(addr, tx.Nonce()+1) pool.pendingState.SetNonce(addr, tx.Nonce()+1)
go pool.txFeed.Send(TxPreEvent{tx}) go pool.txFeed.Send(TxPreEvent{tx})
} }
@ -749,14 +764,14 @@ func (pool *TxPool) AddRemote(tx *types.Transaction) error {
// AddLocals enqueues a batch of transactions into the pool if they are valid, // AddLocals enqueues a batch of transactions into the pool if they are valid,
// marking the senders as a local ones in the mean time, ensuring they go around // marking the senders as a local ones in the mean time, ensuring they go around
// the local pricing constraints. // the local pricing constraints.
func (pool *TxPool) AddLocals(txs []*types.Transaction) error { func (pool *TxPool) AddLocals(txs []*types.Transaction) []error {
return pool.addTxs(txs, !pool.config.NoLocals) return pool.addTxs(txs, !pool.config.NoLocals)
} }
// AddRemotes enqueues a batch of transactions into the pool if they are valid. // AddRemotes enqueues a batch of transactions into the pool if they are valid.
// If the senders are not among the locally tracked ones, full pricing constraints // If the senders are not among the locally tracked ones, full pricing constraints
// will apply. // will apply.
func (pool *TxPool) AddRemotes(txs []*types.Transaction) error { func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error {
return pool.addTxs(txs, false) return pool.addTxs(txs, false)
} }
@ -779,7 +794,7 @@ func (pool *TxPool) addTx(tx *types.Transaction, local bool) error {
} }
// addTxs attempts to queue a batch of transactions if they are valid. // addTxs attempts to queue a batch of transactions if they are valid.
func (pool *TxPool) addTxs(txs []*types.Transaction, local bool) error { func (pool *TxPool) addTxs(txs []*types.Transaction, local bool) []error {
pool.mu.Lock() pool.mu.Lock()
defer pool.mu.Unlock() defer pool.mu.Unlock()
@ -788,11 +803,14 @@ func (pool *TxPool) addTxs(txs []*types.Transaction, local bool) error {
// addTxsLocked attempts to queue a batch of transactions if they are valid, // addTxsLocked attempts to queue a batch of transactions if they are valid,
// whilst assuming the transaction pool lock is already held. // whilst assuming the transaction pool lock is already held.
func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) error { func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) []error {
// Add the batch of transaction, tracking the accepted ones // Add the batch of transaction, tracking the accepted ones
dirty := make(map[common.Address]struct{}) dirty := make(map[common.Address]struct{})
for _, tx := range txs { errs := make([]error, len(txs))
if replace, err := pool.add(tx, local); err == nil {
for i, tx := range txs {
var replace bool
if replace, errs[i] = pool.add(tx, local); errs[i] == nil {
if !replace { if !replace {
from, _ := types.Sender(pool.signer, tx) // already validated from, _ := types.Sender(pool.signer, tx) // already validated
dirty[from] = struct{}{} dirty[from] = struct{}{}
@ -802,12 +820,32 @@ func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) error {
// Only reprocess the internal state if something was actually added // Only reprocess the internal state if something was actually added
if len(dirty) > 0 { if len(dirty) > 0 {
addrs := make([]common.Address, 0, len(dirty)) addrs := make([]common.Address, 0, len(dirty))
for addr, _ := range dirty { for addr := range dirty {
addrs = append(addrs, addr) addrs = append(addrs, addr)
} }
pool.promoteExecutables(addrs) pool.promoteExecutables(addrs)
} }
return nil return errs
}
// Status returns the status (unknown/pending/queued) of a batch of transactions
// identified by their hashes.
func (pool *TxPool) Status(hashes []common.Hash) []TxStatus {
pool.mu.RLock()
defer pool.mu.RUnlock()
status := make([]TxStatus, len(hashes))
for i, hash := range hashes {
if tx := pool.all[hash]; tx != nil {
from, _ := types.Sender(pool.signer, tx) // already validated
if pool.pending[from].txs.items[tx.Nonce()] != nil {
status[i] = TxStatusPending
} else {
status[i] = TxStatusQueued
}
}
}
return status
} }
// Get returns a transaction if it is contained in the pool // Get returns a transaction if it is contained in the pool
@ -869,7 +907,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
// Gather all the accounts potentially needing updates // Gather all the accounts potentially needing updates
if accounts == nil { if accounts == nil {
accounts = make([]common.Address, 0, len(pool.queue)) accounts = make([]common.Address, 0, len(pool.queue))
for addr, _ := range pool.queue { for addr := range pool.queue {
accounts = append(accounts, addr) accounts = append(accounts, addr)
} }
} }

View File

@ -25,7 +25,7 @@ import (
// destinations stores one map per contract (keyed by hash of code). // destinations stores one map per contract (keyed by hash of code).
// The maps contain an entry for each location of a JUMPDEST // The maps contain an entry for each location of a JUMPDEST
// instruction. // instruction.
type destinations map[common.Hash][]byte type destinations map[common.Hash]bitvec
// has checks whether code has a JUMPDEST at dest. // has checks whether code has a JUMPDEST at dest.
func (d destinations) has(codehash common.Hash, code []byte, dest *big.Int) bool { func (d destinations) has(codehash common.Hash, code []byte, dest *big.Int) bool {
@ -38,24 +38,53 @@ func (d destinations) has(codehash common.Hash, code []byte, dest *big.Int) bool
m, analysed := d[codehash] m, analysed := d[codehash]
if !analysed { if !analysed {
m = jumpdests(code) m = codeBitmap(code)
d[codehash] = m d[codehash] = m
} }
return (m[udest/8] & (1 << (udest % 8))) != 0 return OpCode(code[udest]) == JUMPDEST && m.codeSegment(udest)
} }
// jumpdests creates a map that contains an entry for each // bitvec is a bit vector which maps bytes in a program.
// PC location that is a JUMPDEST instruction. // An unset bit means the byte is an opcode, a set bit means
func jumpdests(code []byte) []byte { // it's data (i.e. argument of PUSHxx).
m := make([]byte, len(code)/8+1) type bitvec []byte
for pc := uint64(0); pc < uint64(len(code)); pc++ {
func (bits *bitvec) set(pos uint64) {
(*bits)[pos/8] |= 0x80 >> (pos % 8)
}
func (bits *bitvec) set8(pos uint64) {
(*bits)[pos/8] |= 0xFF >> (pos % 8)
(*bits)[pos/8+1] |= ^(0xFF >> (pos % 8))
}
// codeSegment checks if the position is in a code segment.
func (bits *bitvec) codeSegment(pos uint64) bool {
return ((*bits)[pos/8] & (0x80 >> (pos % 8))) == 0
}
// codeBitmap collects data locations in code.
func codeBitmap(code []byte) bitvec {
// The bitmap is 4 bytes longer than necessary, in case the code
// ends with a PUSH32, the algorithm will push zeroes onto the
// bitvector outside the bounds of the actual code.
bits := make(bitvec, len(code)/8+1+4)
for pc := uint64(0); pc < uint64(len(code)); {
op := OpCode(code[pc]) op := OpCode(code[pc])
if op == JUMPDEST {
m[pc/8] |= 1 << (pc % 8) if op >= PUSH1 && op <= PUSH32 {
} else if op >= PUSH1 && op <= PUSH32 { numbits := op - PUSH1 + 1
a := uint64(op) - uint64(PUSH1) + 1 pc++
pc += a for ; numbits >= 8; numbits -= 8 {
bits.set8(pc) // 8
pc += 8
}
for ; numbits > 0; numbits-- {
bits.set(pc)
pc++
}
} else {
pc++
} }
} }
return m return bits
} }

View File

@ -161,8 +161,8 @@ func (in *Interpreter) Run(snapshot int, contract *Contract, input []byte) (ret
if in.cfg.Debug { if in.cfg.Debug {
logged = false logged = false
pcCopy = uint64(pc) pcCopy = pc
gasCopy = uint64(contract.Gas) gasCopy = contract.Gas
stackCopy = newstack() stackCopy = newstack()
for _, val := range stack.data { for _, val := range stack.data {
stackCopy.push(val) stackCopy.push(val)

View File

@ -42,12 +42,12 @@ type operation struct {
// memorySize returns the memory size required for the operation // memorySize returns the memory size required for the operation
memorySize memorySizeFunc memorySize memorySizeFunc
halts bool // indicates whether the operation shoult halt further execution halts bool // indicates whether the operation should halt further execution
jumps bool // indicates whether the program counter should not increment jumps bool // indicates whether the program counter should not increment
writes bool // determines whether this a state modifying operation writes bool // determines whether this a state modifying operation
valid bool // indication whether the retrieved operation is valid and known valid bool // indication whether the retrieved operation is valid and known
reverts bool // determines whether the operation reverts state (implicitly halts) reverts bool // determines whether the operation reverts state (implicitly halts)
returns bool // determines whether the opertions sets the return data content returns bool // determines whether the operations sets the return data content
} }
var ( var (

View File

@ -45,7 +45,6 @@ type LogConfig struct {
DisableMemory bool // disable memory capture DisableMemory bool // disable memory capture
DisableStack bool // disable stack capture DisableStack bool // disable stack capture
DisableStorage bool // disable storage capture DisableStorage bool // disable storage capture
FullStorage bool // show full storage (slow)
Limit int // maximum length of output, but zero means unlimited Limit int // maximum length of output, but zero means unlimited
} }
@ -136,14 +135,13 @@ func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost ui
) )
l.changedValues[contract.Address()][address] = value l.changedValues[contract.Address()][address] = value
} }
// copy a snapstot of the current memory state to a new buffer // Copy a snapstot of the current memory state to a new buffer
var mem []byte var mem []byte
if !l.cfg.DisableMemory { if !l.cfg.DisableMemory {
mem = make([]byte, len(memory.Data())) mem = make([]byte, len(memory.Data()))
copy(mem, memory.Data()) copy(mem, memory.Data())
} }
// Copy a snapshot of the current stack state to a new buffer
// copy a snapshot of the current stack state to a new buffer
var stck []*big.Int var stck []*big.Int
if !l.cfg.DisableStack { if !l.cfg.DisableStack {
stck = make([]*big.Int, len(stack.Data())) stck = make([]*big.Int, len(stack.Data()))
@ -151,26 +149,10 @@ func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost ui
stck[i] = new(big.Int).Set(item) stck[i] = new(big.Int).Set(item)
} }
} }
// Copy a snapshot of the current storage to a new container
// Copy the storage based on the settings specified in the log config. If full storage
// is disabled (default) we can use the simple Storage.Copy method, otherwise we use
// the state object to query for all values (slow process).
var storage Storage var storage Storage
if !l.cfg.DisableStorage { if !l.cfg.DisableStorage {
if l.cfg.FullStorage { storage = l.changedValues[contract.Address()].Copy()
storage = make(Storage)
// Get the contract account and loop over each storage entry. This may involve looping over
// the trie and is a very expensive process.
env.StateDB.ForEachStorage(contract.Address(), func(key, value common.Hash) bool {
storage[key] = value
// Return true, indicating we'd like to continue.
return true
})
} else {
// copy a snapshot of the current storage to a new container.
storage = l.changedValues[contract.Address()].Copy()
}
} }
// create a new snaptshot of the EVM. // create a new snaptshot of the EVM.
log := StructLog{pc, op, gas, cost, mem, memory.Len(), stck, storage, depth, err} log := StructLog{pc, op, gas, cost, mem, memory.Len(), stck, storage, depth, err}

View File

@ -0,0 +1,46 @@
## Go Ethereum Dashboard
The dashboard is a data visualizer integrated into geth, intended to collect and visualize useful information of an Ethereum node. It consists of two parts:
* The client visualizes the collected data.
* The server collects the data, and updates the clients.
The client's UI uses [React][React] with JSX syntax, which is validated by the [ESLint][ESLint] linter mostly according to the [Airbnb React/JSX Style Guide][Airbnb]. The style is defined in the `.eslintrc` configuration file. The resources are bundled into a single `bundle.js` file using [Webpack][Webpack], which relies on the `webpack.config.js`. The bundled file is referenced from `dashboard.html` and takes part in the `assets.go` too. The necessary dependencies for the module bundler are gathered by [Node.js][Node.js].
### Development and bundling
As the dashboard depends on certain NPM packages (which are not included in the go-ethereum repo), these need to be installed first:
```
$ (cd dashboard/assets && npm install)
```
Normally the dashboard assets are bundled into Geth via `go-bindata` to avoid external dependencies. Rebuilding Geth after each UI modification however is not feasible from a developer perspective. Instead, we can run `webpack` in watch mode to automatically rebundle the UI, and ask `geth` to use external assets to not rely on compiled resources:
```
$ (cd dashboard/assets && ./node_modules/.bin/webpack --watch)
$ geth --dashboard --dashboard.assets=dashboard/assets/public --vmodule=dashboard=5
```
To bundle up the final UI into Geth, run `webpack` and `go generate`:
```
$ (cd dashboard/assets && ./node_modules/.bin/webpack)
$ go generate ./dashboard
```
### Have fun
[Webpack][Webpack] offers handy tools for visualizing the bundle's dependency tree and space usage.
* Generate the bundle's profile running `webpack --profile --json > stats.json`
* For the _dependency tree_ go to [Webpack Analyze][WA], and import `stats.json`
* For the _space usage_ go to [Webpack Visualizer][WV], and import `stats.json`
[React]: https://reactjs.org/
[ESLint]: https://eslint.org/
[Airbnb]: https://github.com/airbnb/javascript/tree/master/react
[Webpack]: https://webpack.github.io/
[WA]: http://webpack.github.io/analyse/
[WV]: http://chrisbateman.github.io/webpack-visualizer/
[Node.js]: https://nodejs.org/en/

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,52 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// React syntax style mostly according to https://github.com/airbnb/javascript/tree/master/react
{
"plugins": [
"react"
],
"parser": "babel-eslint",
"parserOptions": {
"ecmaFeatures": {
"jsx": true,
"modules": true
}
},
"rules": {
"react/prefer-es6-class": 2,
"react/prefer-stateless-function": 2,
"react/jsx-pascal-case": 2,
"react/jsx-closing-bracket-location": [1, {"selfClosing": "tag-aligned", "nonEmpty": "tag-aligned"}],
"react/jsx-closing-tag-location": 1,
"jsx-quotes": ["error", "prefer-double"],
"no-multi-spaces": "error",
"react/jsx-tag-spacing": 2,
"react/jsx-curly-spacing": [2, {"when": "never", "children": true}],
"react/jsx-boolean-value": 2,
"react/no-string-refs": 2,
"react/jsx-wrap-multilines": 2,
"react/self-closing-comp": 2,
"react/jsx-no-bind": 2,
"react/require-render-return": 2,
"react/no-is-mounted": 2,
"key-spacing": ["error", {"align": {
"beforeColon": false,
"afterColon": true,
"on": "value"
}}]
}
}

View File

@ -0,0 +1,52 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// isNullOrUndefined returns true if the given variable is null or undefined.
export const isNullOrUndefined = variable => variable === null || typeof variable === 'undefined';
export const LIMIT = {
memory: 200, // Maximum number of memory data samples.
traffic: 200, // Maximum number of traffic data samples.
log: 200, // Maximum number of logs.
};
// The sidebar menu and the main content are rendered based on these elements.
export const TAGS = (() => {
const T = {
home: { title: "Home", },
chain: { title: "Chain", },
transactions: { title: "Transactions", },
network: { title: "Network", },
system: { title: "System", },
logs: { title: "Logs", },
};
// Using the key is circumstantial in some cases, so it is better to insert it also as a value.
// This way the mistyping is prevented.
for(let key in T) {
T[key]['id'] = key;
}
return T;
})();
export const DATA_KEYS = (() => {
const DK = {};
["memory", "traffic", "logs"].map(key => {
DK[key] = key;
});
return DK;
})();
// Temporary - taken from Material-UI
export const DRAWER_WIDTH = 240;

View File

@ -0,0 +1,169 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
import React, {Component} from 'react';
import PropTypes from 'prop-types';
import {withStyles} from 'material-ui/styles';
import SideBar from './SideBar.jsx';
import Header from './Header.jsx';
import Main from "./Main.jsx";
import {isNullOrUndefined, LIMIT, TAGS, DATA_KEYS,} from "./Common.jsx";
// Styles for the Dashboard component.
const styles = theme => ({
appFrame: {
position: 'relative',
display: 'flex',
width: '100%',
height: '100%',
background: theme.palette.background.default,
},
});
// Dashboard is the main component, which renders the whole page, makes connection with the server and listens for messages.
// When there is an incoming message, updates the page's content correspondingly.
class Dashboard extends Component {
constructor(props) {
super(props);
this.state = {
active: TAGS.home.id, // active menu
sideBar: true, // true if the sidebar is opened
memory: [],
traffic: [],
logs: [],
shouldUpdate: {},
};
}
// componentDidMount initiates the establishment of the first websocket connection after the component is rendered.
componentDidMount() {
this.reconnect();
}
// reconnect establishes a websocket connection with the server, listens for incoming messages
// and tries to reconnect on connection loss.
reconnect = () => {
const server = new WebSocket(((window.location.protocol === "https:") ? "wss://" : "ws://") + window.location.host + "/api");
server.onmessage = event => {
const msg = JSON.parse(event.data);
if (isNullOrUndefined(msg)) {
return;
}
this.update(msg);
};
server.onclose = () => {
setTimeout(this.reconnect, 3000);
};
};
// update analyzes the incoming message, and updates the charts' content correspondingly.
update = msg => {
console.log(msg);
this.setState(prevState => {
let newState = [];
newState.shouldUpdate = {};
const insert = (key, values, limit) => {
newState[key] = [...prevState[key], ...values];
while (newState[key].length > limit) {
newState[key].shift();
}
newState.shouldUpdate[key] = true;
};
// (Re)initialize the state with the past data.
if (!isNullOrUndefined(msg.history)) {
const memory = DATA_KEYS.memory;
const traffic = DATA_KEYS.traffic;
newState[memory] = [];
newState[traffic] = [];
if (!isNullOrUndefined(msg.history.memorySamples)) {
newState[memory] = msg.history.memorySamples.map(elem => isNullOrUndefined(elem.value) ? 0 : elem.value);
while (newState[memory].length > LIMIT.memory) {
newState[memory].shift();
}
newState.shouldUpdate[memory] = true;
}
if (!isNullOrUndefined(msg.history.trafficSamples)) {
newState[traffic] = msg.history.trafficSamples.map(elem => isNullOrUndefined(elem.value) ? 0 : elem.value);
while (newState[traffic].length > LIMIT.traffic) {
newState[traffic].shift();
}
newState.shouldUpdate[traffic] = true;
}
}
// Insert the new data samples.
if (!isNullOrUndefined(msg.memory)) {
insert(DATA_KEYS.memory, [isNullOrUndefined(msg.memory.value) ? 0 : msg.memory.value], LIMIT.memory);
}
if (!isNullOrUndefined(msg.traffic)) {
insert(DATA_KEYS.traffic, [isNullOrUndefined(msg.traffic.value) ? 0 : msg.traffic.value], LIMIT.traffic);
}
if (!isNullOrUndefined(msg.log)) {
insert(DATA_KEYS.logs, [msg.log], LIMIT.log);
}
return newState;
});
};
// The change of the active label on the SideBar component will trigger a new render in the Main component.
changeContent = active => {
this.setState(prevState => prevState.active !== active ? {active: active} : {});
};
openSideBar = () => {
this.setState({sideBar: true});
};
closeSideBar = () => {
this.setState({sideBar: false});
};
render() {
// The classes property is injected by withStyles().
const {classes} = this.props;
return (
<div className={classes.appFrame}>
<Header
opened={this.state.sideBar}
open={this.openSideBar}
/>
<SideBar
opened={this.state.sideBar}
close={this.closeSideBar}
changeContent={this.changeContent}
/>
<Main
opened={this.state.sideBar}
active={this.state.active}
memory={this.state.memory}
traffic={this.state.traffic}
logs={this.state.logs}
shouldUpdate={this.state.shouldUpdate}
/>
</div>
);
}
}
Dashboard.propTypes = {
classes: PropTypes.object.isRequired,
};
export default withStyles(styles)(Dashboard);

View File

@ -0,0 +1,87 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
import React, {Component} from 'react';
import PropTypes from 'prop-types';
import classNames from 'classnames';
import {withStyles} from 'material-ui/styles';
import AppBar from 'material-ui/AppBar';
import Toolbar from 'material-ui/Toolbar';
import Typography from 'material-ui/Typography';
import IconButton from 'material-ui/IconButton';
import MenuIcon from 'material-ui-icons/Menu';
import {DRAWER_WIDTH} from './Common.jsx';
// Styles for the Header component.
const styles = theme => ({
appBar: {
position: 'absolute',
transition: theme.transitions.create(['margin', 'width'], {
easing: theme.transitions.easing.sharp,
duration: theme.transitions.duration.leavingScreen,
}),
},
appBarShift: {
marginLeft: DRAWER_WIDTH,
width: `calc(100% - ${DRAWER_WIDTH}px)`,
transition: theme.transitions.create(['margin', 'width'], {
easing: theme.transitions.easing.easeOut,
duration: theme.transitions.duration.enteringScreen,
}),
},
menuButton: {
marginLeft: 12,
marginRight: 20,
},
hide: {
display: 'none',
},
});
// Header renders a header, which contains a sidebar opener icon when that is closed.
class Header extends Component {
render() {
// The classes property is injected by withStyles().
const {classes} = this.props;
return (
<AppBar className={classNames(classes.appBar, this.props.opened && classes.appBarShift)}>
<Toolbar disableGutters={!this.props.opened}>
<IconButton
color="contrast"
aria-label="open drawer"
onClick={this.props.open}
className={classNames(classes.menuButton, this.props.opened && classes.hide)}
>
<MenuIcon />
</IconButton>
<Typography type="title" color="inherit" noWrap>
Go Ethereum Dashboard
</Typography>
</Toolbar>
</AppBar>
);
}
}
Header.propTypes = {
classes: PropTypes.object.isRequired,
opened: PropTypes.bool.isRequired,
open: PropTypes.func.isRequired,
};
export default withStyles(styles)(Header);

View File

@ -0,0 +1,89 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
import React, {Component} from 'react';
import PropTypes from 'prop-types';
import Grid from 'material-ui/Grid';
import {LineChart, AreaChart, Area, YAxis, CartesianGrid, Line, ResponsiveContainer} from 'recharts';
import {withTheme} from 'material-ui/styles';
import {isNullOrUndefined, DATA_KEYS} from "./Common.jsx";
// ChartGrid renders a grid container for responsive charts.
// The children are Recharts components extended with the Material-UI's xs property.
class ChartGrid extends Component {
render() {
return (
<Grid container spacing={this.props.spacing}>
{
React.Children.map(this.props.children, child => (
<Grid item xs={child.props.xs}>
<ResponsiveContainer width="100%" height={child.props.height}>
{React.cloneElement(child, {data: child.props.values.map(value => ({value: value}))})}
</ResponsiveContainer>
</Grid>
))
}
</Grid>
);
}
}
ChartGrid.propTypes = {
spacing: PropTypes.number.isRequired,
};
// Home renders the home component.
class Home extends Component {
shouldComponentUpdate(nextProps) {
return !isNullOrUndefined(nextProps.shouldUpdate[DATA_KEYS.memory]) ||
!isNullOrUndefined(nextProps.shouldUpdate[DATA_KEYS.traffic]);
}
render() {
const {theme} = this.props;
const memoryColor = theme.palette.primary[300];
const trafficColor = theme.palette.secondary[300];
return (
<ChartGrid spacing={24}>
<AreaChart xs={6} height={300} values={this.props.memory}>
<YAxis />
<Area type="monotone" dataKey="value" stroke={memoryColor} fill={memoryColor} />
</AreaChart>
<LineChart xs={6} height={300} values={this.props.traffic}>
<Line type="monotone" dataKey="value" stroke={trafficColor} dot={false} />
</LineChart>
<LineChart xs={6} height={300} values={this.props.memory}>
<YAxis />
<CartesianGrid stroke="#eee" strokeDasharray="5 5" />
<Line type="monotone" dataKey="value" stroke={memoryColor} dot={false} />
</LineChart>
<AreaChart xs={6} height={300} values={this.props.traffic}>
<CartesianGrid stroke="#eee" strokeDasharray="5 5" vertical={false} />
<Area type="monotone" dataKey="value" stroke={trafficColor} fill={trafficColor} />
</AreaChart>
</ChartGrid>
);
}
}
Home.propTypes = {
theme: PropTypes.object.isRequired,
shouldUpdate: PropTypes.object.isRequired,
};
export default withTheme()(Home);

View File

@ -0,0 +1,109 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
import React, {Component} from 'react';
import PropTypes from 'prop-types';
import classNames from 'classnames';
import {withStyles} from 'material-ui/styles';
import {TAGS, DRAWER_WIDTH} from "./Common.jsx";
import Home from './Home.jsx';
// ContentSwitch chooses and renders the proper page content.
class ContentSwitch extends Component {
render() {
switch(this.props.active) {
case TAGS.home.id:
return <Home memory={this.props.memory} traffic={this.props.traffic} shouldUpdate={this.props.shouldUpdate} />;
case TAGS.chain.id:
return null;
case TAGS.transactions.id:
return null;
case TAGS.network.id:
// Only for testing.
return null;
case TAGS.system.id:
return null;
case TAGS.logs.id:
return <div>{this.props.logs.map((log, index) => <div key={index}>{log}</div>)}</div>;
}
return null;
}
}
ContentSwitch.propTypes = {
active: PropTypes.string.isRequired,
shouldUpdate: PropTypes.object.isRequired,
};
// styles contains the styles for the Main component.
const styles = theme => ({
content: {
width: '100%',
marginLeft: -DRAWER_WIDTH,
flexGrow: 1,
backgroundColor: theme.palette.background.default,
padding: theme.spacing.unit * 3,
transition: theme.transitions.create('margin', {
easing: theme.transitions.easing.sharp,
duration: theme.transitions.duration.leavingScreen,
}),
marginTop: 56,
overflow: 'auto',
[theme.breakpoints.up('sm')]: {
content: {
height: 'calc(100% - 64px)',
marginTop: 64,
},
},
},
contentShift: {
marginLeft: 0,
transition: theme.transitions.create('margin', {
easing: theme.transitions.easing.easeOut,
duration: theme.transitions.duration.enteringScreen,
}),
},
});
// Main renders a component for the page content.
class Main extends Component {
render() {
// The classes property is injected by withStyles().
const {classes} = this.props;
return (
<main className={classNames(classes.content, this.props.opened && classes.contentShift)}>
<ContentSwitch
active={this.props.active}
memory={this.props.memory}
traffic={this.props.traffic}
logs={this.props.logs}
shouldUpdate={this.props.shouldUpdate}
/>
</main>
);
}
}
Main.propTypes = {
classes: PropTypes.object.isRequired,
opened: PropTypes.bool.isRequired,
active: PropTypes.string.isRequired,
shouldUpdate: PropTypes.object.isRequired,
};
export default withStyles(styles)(Main);

View File

@ -0,0 +1,106 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
import React, {Component} from 'react';
import PropTypes from 'prop-types';
import {withStyles} from 'material-ui/styles';
import Drawer from 'material-ui/Drawer';
import {IconButton} from "material-ui";
import List, {ListItem, ListItemText} from 'material-ui/List';
import ChevronLeftIcon from 'material-ui-icons/ChevronLeft';
import {TAGS, DRAWER_WIDTH} from './Common.jsx';
// Styles for the SideBar component.
const styles = theme => ({
drawerPaper: {
position: 'relative',
height: '100%',
width: DRAWER_WIDTH,
},
drawerHeader: {
display: 'flex',
alignItems: 'center',
justifyContent: 'flex-end',
padding: '0 8px',
...theme.mixins.toolbar,
transitionDuration: {
enter: theme.transitions.duration.enteringScreen,
exit: theme.transitions.duration.leavingScreen,
}
},
});
// SideBar renders a sidebar component.
class SideBar extends Component {
constructor(props) {
super(props);
// clickOn contains onClick event functions for the menu items.
// Instantiate only once, and reuse the existing functions to prevent the creation of
// new function instances every time the render method is triggered.
this.clickOn = {};
for(let key in TAGS) {
const id = TAGS[key].id;
this.clickOn[id] = event => {
event.preventDefault();
console.log(event.target.key);
this.props.changeContent(id);
};
}
}
render() {
// The classes property is injected by withStyles().
const {classes} = this.props;
return (
<Drawer
type="persistent"
classes={{paper: classes.drawerPaper,}}
open={this.props.opened}
>
<div>
<div className={classes.drawerHeader}>
<IconButton onClick={this.props.close}>
<ChevronLeftIcon />
</IconButton>
</div>
<List>
{
Object.values(TAGS).map(tag => {
return (
<ListItem button key={tag.id} onClick={this.clickOn[tag.id]}>
<ListItemText primary={tag.title} />
</ListItem>
);
})
}
</List>
</div>
</Drawer>
);
}
}
SideBar.propTypes = {
classes: PropTypes.object.isRequired,
opened: PropTypes.bool.isRequired,
close: PropTypes.func.isRequired,
changeContent: PropTypes.func.isRequired,
};
export default withStyles(styles)(SideBar);

View File

@ -0,0 +1,36 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
import React from 'react';
import {hydrate} from 'react-dom';
import {createMuiTheme, MuiThemeProvider} from 'material-ui/styles';
import Dashboard from './components/Dashboard.jsx';
// Theme for the dashboard.
const theme = createMuiTheme({
palette: {
type: 'dark',
},
});
// Renders the whole dashboard.
hydrate(
<MuiThemeProvider theme={theme}>
<Dashboard />
</MuiThemeProvider>,
document.getElementById('dashboard')
);

View File

@ -0,0 +1,22 @@
{
"dependencies": {
"babel-core": "^6.26.0",
"babel-eslint": "^8.0.1",
"babel-loader": "^7.1.2",
"babel-preset-env": "^1.6.1",
"babel-preset-react": "^6.24.1",
"babel-preset-stage-0": "^6.24.1",
"classnames": "^2.2.5",
"eslint": "^4.5.0",
"eslint-plugin-react": "^7.4.0",
"material-ui": "^1.0.0-beta.18",
"material-ui-icons": "^1.0.0-beta.17",
"path": "^0.12.7",
"prop-types": "^15.6.0",
"recharts": "^1.0.0-beta.0",
"react": "^16.0.0",
"react-dom": "^16.0.0",
"url": "^0.11.0",
"webpack": "^3.5.5"
}
}

View File

@ -0,0 +1,17 @@
<!DOCTYPE html>
<html lang="en" style="height: 100%">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Go Ethereum Dashboard</title>
<link rel="shortcut icon" type="image/ico" href="https://ethereum.org/favicon.ico"/>
<!-- TODO (kurkomisi): Return to the external libraries to speed up the bundling during development -->
</head>
<body style="height: 100%; margin: 0">
<div id="dashboard" style="height: 100%"></div>
<script src="bundle.js"></script>
</body>
</html>

View File

@ -0,0 +1,36 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
const path = require('path');
module.exports = {
entry: './index.jsx',
output: {
path: path.resolve(__dirname, 'public'),
filename: 'bundle.js',
},
module: {
loaders: [
{
test: /\.jsx$/, // regexp for JSX files
loader: 'babel-loader', // The babel configuration is in the package.json.
query: {
presets: ['env', 'react', 'stage-0']
}
},
],
},
};

View File

@ -0,0 +1,45 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package dashboard
import "time"
// DefaultConfig contains default settings for the dashboard.
var DefaultConfig = Config{
Host: "localhost",
Port: 8080,
Refresh: 3 * time.Second,
}
// Config contains the configuration parameters of the dashboard.
type Config struct {
// Host is the host interface on which to start the dashboard server. If this
// field is empty, no dashboard will be started.
Host string `toml:",omitempty"`
// Port is the TCP port number on which to start the dashboard server. The
// default zero value is/ valid and will pick a port number randomly (useful
// for ephemeral nodes).
Port int `toml:",omitempty"`
// Refresh is the refresh rate of the data updates, the chartEntry will be collected this often.
Refresh time.Duration `toml:",omitempty"`
// Assets offers a possibility to manually set the dashboard website's location on the server side.
// It is useful for debugging, avoids the repeated generation of the binary.
Assets string `toml:",omitempty"`
}

View File

@ -0,0 +1,305 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package dashboard
//go:generate go-bindata -nometadata -o assets.go -prefix assets -pkg dashboard assets/public/...
import (
"fmt"
"io/ioutil"
"net"
"net/http"
"path/filepath"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/rpc"
"github.com/rcrowley/go-metrics"
"golang.org/x/net/websocket"
)
const (
memorySampleLimit = 200 // Maximum number of memory data samples
trafficSampleLimit = 200 // Maximum number of traffic data samples
)
var nextId uint32 // Next connection id
// Dashboard contains the dashboard internals.
type Dashboard struct {
config *Config
listener net.Listener
conns map[uint32]*client // Currently live websocket connections
charts charts // The collected data samples to plot
lock sync.RWMutex // Lock protecting the dashboard's internals
quit chan chan error // Channel used for graceful exit
wg sync.WaitGroup
}
// message embraces the data samples of a client message.
type message struct {
History *charts `json:"history,omitempty"` // Past data samples
Memory *chartEntry `json:"memory,omitempty"` // One memory sample
Traffic *chartEntry `json:"traffic,omitempty"` // One traffic sample
Log string `json:"log,omitempty"` // One log
}
// client represents active websocket connection with a remote browser.
type client struct {
conn *websocket.Conn // Particular live websocket connection
msg chan message // Message queue for the update messages
logger log.Logger // Logger for the particular live websocket connection
}
// charts contains the collected data samples.
type charts struct {
Memory []*chartEntry `json:"memorySamples,omitempty"`
Traffic []*chartEntry `json:"trafficSamples,omitempty"`
}
// chartEntry represents one data sample
type chartEntry struct {
Time time.Time `json:"time,omitempty"`
Value float64 `json:"value,omitempty"`
}
// New creates a new dashboard instance with the given configuration.
func New(config *Config) (*Dashboard, error) {
return &Dashboard{
conns: make(map[uint32]*client),
config: config,
quit: make(chan chan error),
}, nil
}
// Protocols is a meaningless implementation of node.Service.
func (db *Dashboard) Protocols() []p2p.Protocol { return nil }
// APIs is a meaningless implementation of node.Service.
func (db *Dashboard) APIs() []rpc.API { return nil }
// Start implements node.Service, starting the data collection thread and the listening server of the dashboard.
func (db *Dashboard) Start(server *p2p.Server) error {
db.wg.Add(2)
go db.collectData()
go db.collectLogs() // In case of removing this line change 2 back to 1 in wg.Add.
http.HandleFunc("/", db.webHandler)
http.Handle("/api", websocket.Handler(db.apiHandler))
listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", db.config.Host, db.config.Port))
if err != nil {
return err
}
db.listener = listener
go http.Serve(listener, nil)
return nil
}
// Stop implements node.Service, stopping the data collection thread and the connection listener of the dashboard.
func (db *Dashboard) Stop() error {
// Close the connection listener.
var errs []error
if err := db.listener.Close(); err != nil {
errs = append(errs, err)
}
// Close the collectors.
errc := make(chan error, 1)
for i := 0; i < 2; i++ {
db.quit <- errc
if err := <-errc; err != nil {
errs = append(errs, err)
}
}
// Close the connections.
db.lock.Lock()
for _, c := range db.conns {
if err := c.conn.Close(); err != nil {
c.logger.Warn("Failed to close connection", "err", err)
}
}
db.lock.Unlock()
// Wait until every goroutine terminates.
db.wg.Wait()
log.Info("Dashboard stopped")
var err error
if len(errs) > 0 {
err = fmt.Errorf("%v", errs)
}
return err
}
// webHandler handles all non-api requests, simply flattening and returning the dashboard website.
func (db *Dashboard) webHandler(w http.ResponseWriter, r *http.Request) {
log.Debug("Request", "URL", r.URL)
path := r.URL.String()
if path == "/" {
path = "/dashboard.html"
}
// If the path of the assets is manually set
if db.config.Assets != "" {
blob, err := ioutil.ReadFile(filepath.Join(db.config.Assets, path))
if err != nil {
log.Warn("Failed to read file", "path", path, "err", err)
http.Error(w, "not found", http.StatusNotFound)
return
}
w.Write(blob)
return
}
blob, err := Asset(filepath.Join("public", path))
if err != nil {
log.Warn("Failed to load the asset", "path", path, "err", err)
http.Error(w, "not found", http.StatusNotFound)
return
}
w.Write(blob)
}
// apiHandler handles requests for the dashboard.
func (db *Dashboard) apiHandler(conn *websocket.Conn) {
id := atomic.AddUint32(&nextId, 1)
client := &client{
conn: conn,
msg: make(chan message, 128),
logger: log.New("id", id),
}
done := make(chan struct{}) // Buffered channel as sender may exit early
// Start listening for messages to send.
db.wg.Add(1)
go func() {
defer db.wg.Done()
for {
select {
case <-done:
return
case msg := <-client.msg:
if err := websocket.JSON.Send(client.conn, msg); err != nil {
client.logger.Warn("Failed to send the message", "msg", msg, "err", err)
client.conn.Close()
return
}
}
}
}()
// Send the past data.
client.msg <- message{
History: &db.charts,
}
// Start tracking the connection and drop at connection loss.
db.lock.Lock()
db.conns[id] = client
db.lock.Unlock()
defer func() {
db.lock.Lock()
delete(db.conns, id)
db.lock.Unlock()
}()
for {
fail := []byte{}
if _, err := conn.Read(fail); err != nil {
close(done)
return
}
// Ignore all messages
}
}
// collectData collects the required data to plot on the dashboard.
func (db *Dashboard) collectData() {
defer db.wg.Done()
for {
select {
case errc := <-db.quit:
errc <- nil
return
case <-time.After(db.config.Refresh):
inboundTraffic := metrics.DefaultRegistry.Get("p2p/InboundTraffic").(metrics.Meter).Rate1()
memoryInUse := metrics.DefaultRegistry.Get("system/memory/inuse").(metrics.Meter).Rate1()
now := time.Now()
memory := &chartEntry{
Time: now,
Value: memoryInUse,
}
traffic := &chartEntry{
Time: now,
Value: inboundTraffic,
}
// Remove the first elements in case the samples' amount exceeds the limit.
first := 0
if len(db.charts.Memory) == memorySampleLimit {
first = 1
}
db.charts.Memory = append(db.charts.Memory[first:], memory)
first = 0
if len(db.charts.Traffic) == trafficSampleLimit {
first = 1
}
db.charts.Traffic = append(db.charts.Traffic[first:], traffic)
db.sendToAll(&message{
Memory: memory,
Traffic: traffic,
})
}
}
}
// collectLogs collects and sends the logs to the active dashboards.
func (db *Dashboard) collectLogs() {
defer db.wg.Done()
// TODO (kurkomisi): log collection comes here.
for {
select {
case errc := <-db.quit:
errc <- nil
return
case <-time.After(db.config.Refresh / 2):
db.sendToAll(&message{
Log: "This is a fake log.",
})
}
}
}
// sendToAll sends the given message to the active dashboards.
func (db *Dashboard) sendToAll(msg *message) {
db.lock.Lock()
for _, c := range db.conns {
select {
case c.msg <- *msg:
default:
c.conn.Close()
}
}
db.lock.Unlock()
}

View File

@ -636,3 +636,86 @@ func storageRangeAt(st state.Trie, start []byte, maxResult int) StorageRangeResu
} }
return result return result
} }
// GetModifiedAccountsByumber returns all accounts that have changed between the
// two blocks specified. A change is defined as a difference in nonce, balance,
// code hash, or storage hash.
//
// With one parameter, returns the list of accounts modified in the specified block.
func (api *PrivateDebugAPI) GetModifiedAccountsByNumber(startNum uint64, endNum *uint64) ([]common.Address, error) {
var startBlock, endBlock *types.Block
startBlock = api.eth.blockchain.GetBlockByNumber(startNum)
if startBlock == nil {
return nil, fmt.Errorf("start block %x not found", startNum)
}
if endNum == nil {
endBlock = startBlock
startBlock = api.eth.blockchain.GetBlockByHash(startBlock.ParentHash())
if startBlock == nil {
return nil, fmt.Errorf("block %x has no parent", endBlock.Number())
}
} else {
endBlock = api.eth.blockchain.GetBlockByNumber(*endNum)
if endBlock == nil {
return nil, fmt.Errorf("end block %d not found", *endNum)
}
}
return api.getModifiedAccounts(startBlock, endBlock)
}
// GetModifiedAccountsByHash returns all accounts that have changed between the
// two blocks specified. A change is defined as a difference in nonce, balance,
// code hash, or storage hash.
//
// With one parameter, returns the list of accounts modified in the specified block.
func (api *PrivateDebugAPI) GetModifiedAccountsByHash(startHash common.Hash, endHash *common.Hash) ([]common.Address, error) {
var startBlock, endBlock *types.Block
startBlock = api.eth.blockchain.GetBlockByHash(startHash)
if startBlock == nil {
return nil, fmt.Errorf("start block %x not found", startHash)
}
if endHash == nil {
endBlock = startBlock
startBlock = api.eth.blockchain.GetBlockByHash(startBlock.ParentHash())
if startBlock == nil {
return nil, fmt.Errorf("block %x has no parent", endBlock.Number())
}
} else {
endBlock = api.eth.blockchain.GetBlockByHash(*endHash)
if endBlock == nil {
return nil, fmt.Errorf("end block %x not found", *endHash)
}
}
return api.getModifiedAccounts(startBlock, endBlock)
}
func (api *PrivateDebugAPI) getModifiedAccounts(startBlock, endBlock *types.Block) ([]common.Address, error) {
if startBlock.Number().Uint64() >= endBlock.Number().Uint64() {
return nil, fmt.Errorf("start block height (%d) must be less than end block height (%d)", startBlock.Number().Uint64(), endBlock.Number().Uint64())
}
oldTrie, err := trie.NewSecure(startBlock.Root(), api.eth.chainDb, 0)
if err != nil {
return nil, err
}
newTrie, err := trie.NewSecure(endBlock.Root(), api.eth.chainDb, 0)
if err != nil {
return nil, err
}
diff, _ := trie.NewDifferenceIterator(oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}))
iter := trie.NewIterator(diff)
var dirty []common.Address
for iter.Next() {
key := newTrie.GetKey(iter.Key)
if key == nil {
return nil, fmt.Errorf("no preimage found for hash %x", iter.Key)
}
dirty = append(dirty, common.BytesToAddress(key))
}
return dirty, nil
}

View File

@ -54,6 +54,7 @@ type LesServer interface {
Start(srvr *p2p.Server) Start(srvr *p2p.Server)
Stop() Stop()
Protocols() []p2p.Protocol Protocols() []p2p.Protocol
SetBloomBitsIndexer(bbIndexer *core.ChainIndexer)
} }
// Ethereum implements the Ethereum full node service. // Ethereum implements the Ethereum full node service.
@ -95,6 +96,7 @@ type Ethereum struct {
func (s *Ethereum) AddLesServer(ls LesServer) { func (s *Ethereum) AddLesServer(ls LesServer) {
s.lesServer = ls s.lesServer = ls
ls.SetBloomBitsIndexer(s.bloomIndexer)
} }
// New creates a new Ethereum object (including the // New creates a new Ethereum object (including the
@ -154,7 +156,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
eth.blockchain.SetHead(compat.RewindTo) eth.blockchain.SetHead(compat.RewindTo)
core.WriteChainConfig(chainDb, genesisHash, chainConfig) core.WriteChainConfig(chainDb, genesisHash, chainConfig)
} }
eth.bloomIndexer.Start(eth.blockchain.CurrentHeader(), eth.blockchain.SubscribeChainEvent) eth.bloomIndexer.Start(eth.blockchain)
if config.TxPool.Journal != "" { if config.TxPool.Journal != "" {
config.TxPool.Journal = ctx.ResolvePath(config.TxPool.Journal) config.TxPool.Journal = ctx.ResolvePath(config.TxPool.Journal)

View File

@ -58,15 +58,18 @@ func (eth *Ethereum) startBloomHandlers() {
case request := <-eth.bloomRequests: case request := <-eth.bloomRequests:
task := <-request task := <-request
task.Bitsets = make([][]byte, len(task.Sections)) task.Bitsets = make([][]byte, len(task.Sections))
for i, section := range task.Sections { for i, section := range task.Sections {
head := core.GetCanonicalHash(eth.chainDb, (section+1)*params.BloomBitsBlocks-1) head := core.GetCanonicalHash(eth.chainDb, (section+1)*params.BloomBitsBlocks-1)
blob, err := bitutil.DecompressBytes(core.GetBloomBits(eth.chainDb, task.Bit, section, head), int(params.BloomBitsBlocks)/8) if compVector, err := core.GetBloomBits(eth.chainDb, task.Bit, section, head); err == nil {
if err != nil { if blob, err := bitutil.DecompressBytes(compVector, int(params.BloomBitsBlocks)/8); err == nil {
panic(err) task.Bitsets[i] = blob
} else {
task.Error = err
}
} else {
task.Error = err
} }
task.Bitsets[i] = blob
} }
request <- task request <- task
} }
@ -111,12 +114,10 @@ func NewBloomIndexer(db ethdb.Database, size uint64) *core.ChainIndexer {
// Reset implements core.ChainIndexerBackend, starting a new bloombits index // Reset implements core.ChainIndexerBackend, starting a new bloombits index
// section. // section.
func (b *BloomIndexer) Reset(section uint64) { func (b *BloomIndexer) Reset(section uint64, lastSectionHead common.Hash) error {
gen, err := bloombits.NewGenerator(uint(b.size)) gen, err := bloombits.NewGenerator(uint(b.size))
if err != nil {
panic(err)
}
b.gen, b.section, b.head = gen, section, common.Hash{} b.gen, b.section, b.head = gen, section, common.Hash{}
return err
} }
// Process implements core.ChainIndexerBackend, adding a new header's bloom into // Process implements core.ChainIndexerBackend, adding a new header's bloom into

View File

@ -333,7 +333,7 @@ func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode
} }
// synchronise will select the peer and use it for synchronising. If an empty string is given // synchronise will select the peer and use it for synchronising. If an empty string is given
// it will use the best peer possible and synchronize if it's TD is higher than our own. If any of the // it will use the best peer possible and synchronize if its TD is higher than our own. If any of the
// checks fail an error will be returned. This method is synchronous // checks fail an error will be returned. This method is synchronous
func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error { func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error {
// Mock out the synchronisation if testing // Mock out the synchronisation if testing
@ -708,7 +708,7 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err
ttl := d.requestTTL() ttl := d.requestTTL()
timeout := time.After(ttl) timeout := time.After(ttl)
go p.peer.RequestHeadersByNumber(uint64(check), 1, 0, false) go p.peer.RequestHeadersByNumber(check, 1, 0, false)
// Wait until a reply arrives to this request // Wait until a reply arrives to this request
for arrived := false; !arrived; { for arrived := false; !arrived; {
@ -1003,8 +1003,8 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
return errCancel return errCancel
case packet := <-deliveryCh: case packet := <-deliveryCh:
// If the peer was previously banned and failed to deliver it's pack // If the peer was previously banned and failed to deliver its pack
// in a reasonable time frame, ignore it's message. // in a reasonable time frame, ignore its message.
if peer := d.peers.Peer(packet.PeerId()); peer != nil { if peer := d.peers.Peer(packet.PeerId()); peer != nil {
// Deliver the received chunk of data and check chain validity // Deliver the received chunk of data and check chain validity
accepted, err := deliver(packet) accepted, err := deliver(packet)
@ -1205,8 +1205,8 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
case <-d.cancelCh: case <-d.cancelCh:
} }
} }
// If no headers were retrieved at all, the peer violated it's TD promise that it had a // If no headers were retrieved at all, the peer violated its TD promise that it had a
// better chain compared to ours. The only exception is if it's promised blocks were // better chain compared to ours. The only exception is if its promised blocks were
// already imported by other means (e.g. fecher): // already imported by other means (e.g. fecher):
// //
// R <remote peer>, L <local node>: Both at block 10 // R <remote peer>, L <local node>: Both at block 10
@ -1518,7 +1518,7 @@ func (d *Downloader) deliver(id string, destCh chan dataPack, packet dataPack, i
func (d *Downloader) qosTuner() { func (d *Downloader) qosTuner() {
for { for {
// Retrieve the current median RTT and integrate into the previoust target RTT // Retrieve the current median RTT and integrate into the previoust target RTT
rtt := time.Duration(float64(1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT())) rtt := time.Duration((1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT()))
atomic.StoreUint64(&d.rttEstimate, uint64(rtt)) atomic.StoreUint64(&d.rttEstimate, uint64(rtt))
// A new RTT cycle passed, increase our confidence in the estimated RTT // A new RTT cycle passed, increase our confidence in the estimated RTT

View File

@ -62,7 +62,7 @@ func (p *FakePeer) RequestHeadersByHash(hash common.Hash, amount int, skip int,
number := origin.Number.Uint64() number := origin.Number.Uint64()
headers = append(headers, origin) headers = append(headers, origin)
if reverse { if reverse {
for i := 0; i < int(skip)+1; i++ { for i := 0; i <= skip; i++ {
if header := p.hc.GetHeader(hash, number); header != nil { if header := p.hc.GetHeader(hash, number); header != nil {
hash = header.ParentHash hash = header.ParentHash
number-- number--

View File

@ -19,7 +19,6 @@ package filters
import ( import (
"context" "context"
"math/big" "math/big"
"time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
@ -136,11 +135,11 @@ func (f *Filter) indexedLogs(ctx context.Context, end uint64) ([]*types.Log, err
// Create a matcher session and request servicing from the backend // Create a matcher session and request servicing from the backend
matches := make(chan uint64, 64) matches := make(chan uint64, 64)
session, err := f.matcher.Start(uint64(f.begin), end, matches) session, err := f.matcher.Start(ctx, uint64(f.begin), end, matches)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer session.Close(time.Second) defer session.Close()
f.backend.ServiceFilter(ctx, session) f.backend.ServiceFilter(ctx, session)
@ -152,9 +151,14 @@ func (f *Filter) indexedLogs(ctx context.Context, end uint64) ([]*types.Log, err
case number, ok := <-matches: case number, ok := <-matches:
// Abort if all matches have been fulfilled // Abort if all matches have been fulfilled
if !ok { if !ok {
f.begin = int64(end) + 1 err := session.Error()
return logs, nil if err == nil {
f.begin = int64(end) + 1
}
return logs, err
} }
f.begin = int64(number) + 1
// Retrieve the suggested block and pull any truly matching logs // Retrieve the suggested block and pull any truly matching logs
header, err := f.backend.HeaderByNumber(ctx, rpc.BlockNumber(number)) header, err := f.backend.HeaderByNumber(ctx, rpc.BlockNumber(number))
if header == nil || err != nil { if header == nil || err != nil {
@ -203,7 +207,7 @@ func (f *Filter) checkMatches(ctx context.Context, header *types.Header) (logs [
} }
var unfiltered []*types.Log var unfiltered []*types.Log
for _, receipt := range receipts { for _, receipt := range receipts {
unfiltered = append(unfiltered, ([]*types.Log)(receipt.Logs)...) unfiltered = append(unfiltered, receipt.Logs...)
} }
logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics) logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics)
if len(logs) > 0 { if len(logs) > 0 {

View File

@ -97,7 +97,7 @@ var errorToString = map[int]string{
type txPool interface { type txPool interface {
// AddRemotes should add the given transactions to the pool. // AddRemotes should add the given transactions to the pool.
AddRemotes([]*types.Transaction) error AddRemotes([]*types.Transaction) []error
// Pending should return pending transactions. // Pending should return pending transactions.
// The slice should be modifiable by the caller. // The slice should be modifiable by the caller.

View File

@ -379,7 +379,7 @@ func (s *Service) login(conn *websocket.Conn) error {
protocol = fmt.Sprintf("eth/%d", eth.ProtocolVersions[0]) protocol = fmt.Sprintf("eth/%d", eth.ProtocolVersions[0])
} else { } else {
network = fmt.Sprintf("%d", infos.Protocols["les"].(*eth.EthNodeInfo).Network) network = fmt.Sprintf("%d", infos.Protocols["les"].(*eth.EthNodeInfo).Network)
protocol = fmt.Sprintf("les/%d", les.ProtocolVersions[0]) protocol = fmt.Sprintf("les/%d", les.ClientProtocolVersions[0])
} }
auth := &authMsg{ auth := &authMsg{
Id: s.node, Id: s.node,

View File

@ -127,6 +127,8 @@ func (f *Feed) remove(sub *feedSub) {
// Send delivers to all subscribed channels simultaneously. // Send delivers to all subscribed channels simultaneously.
// It returns the number of subscribers that the value was sent to. // It returns the number of subscribers that the value was sent to.
func (f *Feed) Send(value interface{}) (nsent int) { func (f *Feed) Send(value interface{}) (nsent int) {
rvalue := reflect.ValueOf(value)
f.once.Do(f.init) f.once.Do(f.init)
<-f.sendLock <-f.sendLock
@ -134,14 +136,14 @@ func (f *Feed) Send(value interface{}) (nsent int) {
f.mu.Lock() f.mu.Lock()
f.sendCases = append(f.sendCases, f.inbox...) f.sendCases = append(f.sendCases, f.inbox...)
f.inbox = nil f.inbox = nil
f.mu.Unlock()
// Set the sent value on all channels.
rvalue := reflect.ValueOf(value)
if !f.typecheck(rvalue.Type()) { if !f.typecheck(rvalue.Type()) {
f.sendLock <- struct{}{} f.sendLock <- struct{}{}
panic(feedTypeError{op: "Send", got: rvalue.Type(), want: f.etype}) panic(feedTypeError{op: "Send", got: rvalue.Type(), want: f.etype})
} }
f.mu.Unlock()
// Set the sent value on all channels.
for i := firstSubSendCase; i < len(f.sendCases); i++ { for i := firstSubSendCase; i < len(f.sendCases); i++ {
f.sendCases[i].Send = rvalue f.sendCases[i].Send = rvalue
} }

View File

@ -82,18 +82,22 @@ func Env() Environment {
// LocalEnv returns build environment metadata gathered from git. // LocalEnv returns build environment metadata gathered from git.
func LocalEnv() Environment { func LocalEnv() Environment {
env := applyEnvFlags(Environment{Name: "local", Repo: "ethereum/go-ethereum"}) env := applyEnvFlags(Environment{Name: "local", Repo: "ethereum/go-ethereum"})
if _, err := os.Stat(".git"); err != nil {
head := readGitFile("HEAD")
if splits := strings.Split(head, " "); len(splits) == 2 {
head = splits[1]
} else {
return env return env
} }
if env.Commit == "" { if env.Commit == "" {
env.Commit = RunGit("rev-parse", "HEAD") env.Commit = readGitFile(head)
} }
if env.Branch == "" { if env.Branch == "" {
if b := RunGit("rev-parse", "--abbrev-ref", "HEAD"); b != "HEAD" { if head != "HEAD" {
env.Branch = b env.Branch = strings.TrimLeft(head, "refs/heads/")
} }
} }
if env.Tag == "" { if info, err := os.Stat(".git/objects"); err == nil && info.IsDir() && env.Tag == "" {
env.Tag = firstLine(RunGit("tag", "-l", "--points-at", "HEAD")) env.Tag = firstLine(RunGit("tag", "-l", "--points-at", "HEAD"))
} }
return env return env

View File

@ -25,6 +25,7 @@ import (
"log" "log"
"os" "os"
"os/exec" "os/exec"
"path"
"path/filepath" "path/filepath"
"runtime" "runtime"
"strings" "strings"
@ -88,6 +89,15 @@ func RunGit(args ...string) string {
return strings.TrimSpace(stdout.String()) return strings.TrimSpace(stdout.String())
} }
// readGitFile returns content of file in .git directory.
func readGitFile(file string) string {
content, err := ioutil.ReadFile(path.Join(".git", file))
if err != nil {
return ""
}
return strings.TrimSpace(string(content))
}
// Render renders the given template file into outputFile. // Render renders the given template file into outputFile.
func Render(templateFile, outputFile string, outputPerm os.FileMode, x interface{}) { func Render(templateFile, outputFile string, outputPerm os.FileMode, x interface{}) {
tpl := template.Must(template.ParseFiles(templateFile)) tpl := template.Must(template.ParseFiles(templateFile))

View File

@ -151,9 +151,9 @@ func (s *PublicTxPoolAPI) Inspect() map[string]map[string]map[string]string {
// Define a formatter to flatten a transaction into a string // Define a formatter to flatten a transaction into a string
var format = func(tx *types.Transaction) string { var format = func(tx *types.Transaction) string {
if to := tx.To(); to != nil { if to := tx.To(); to != nil {
return fmt.Sprintf("%s: %v wei + %v × %v gas", tx.To().Hex(), tx.Value(), tx.Gas(), tx.GasPrice()) return fmt.Sprintf("%s: %v wei + %v gas × %v wei", tx.To().Hex(), tx.Value(), tx.Gas(), tx.GasPrice())
} }
return fmt.Sprintf("contract creation: %v wei + %v × %v gas", tx.Value(), tx.Gas(), tx.GasPrice()) return fmt.Sprintf("contract creation: %v wei + %v gas × %v wei", tx.Value(), tx.Gas(), tx.GasPrice())
} }
// Flatten the pending transactions // Flatten the pending transactions
for account, txs := range pending { for account, txs := range pending {
@ -663,12 +663,14 @@ func (s *PublicBlockChainAPI) Call(ctx context.Context, args CallArgs, blockNr r
return (hexutil.Bytes)(result), err return (hexutil.Bytes)(result), err
} }
// EstimateGas returns an estimate of the amount of gas needed to execute the given transaction. // EstimateGas returns an estimate of the amount of gas needed to execute the
// given transaction against the current pending block.
func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args CallArgs) (*hexutil.Big, error) { func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args CallArgs) (*hexutil.Big, error) {
// Binary search the gas requirement, as it may be higher than the amount used // Determine the lowest and highest possible gas limits to binary search in between
var ( var (
lo uint64 = params.TxGas - 1 lo uint64 = params.TxGas - 1
hi uint64 hi uint64
cap uint64
) )
if (*big.Int)(&args.Gas).Uint64() >= params.TxGas { if (*big.Int)(&args.Gas).Uint64() >= params.TxGas {
hi = (*big.Int)(&args.Gas).Uint64() hi = (*big.Int)(&args.Gas).Uint64()
@ -680,20 +682,31 @@ func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args CallArgs) (*
} }
hi = block.GasLimit().Uint64() hi = block.GasLimit().Uint64()
} }
for lo+1 < hi { cap = hi
// Take a guess at the gas, and check transaction validity
mid := (hi + lo) / 2
(*big.Int)(&args.Gas).SetUint64(mid)
// Create a helper to check if a gas allowance results in an executable transaction
executable := func(gas uint64) bool {
(*big.Int)(&args.Gas).SetUint64(gas)
_, _, failed, err := s.doCall(ctx, args, rpc.PendingBlockNumber, vm.Config{}) _, _, failed, err := s.doCall(ctx, args, rpc.PendingBlockNumber, vm.Config{})
// If the transaction became invalid or execution failed, raise the gas limit
if err != nil || failed { if err != nil || failed {
lo = mid return false
continue }
return true
}
// Execute the binary search and hone in on an executable gas limit
for lo+1 < hi {
mid := (hi + lo) / 2
if !executable(mid) {
lo = mid
} else {
hi = mid
}
}
// Reject the transaction as invalid if it still fails at the highest allowance
if hi == cap {
if !executable(hi) {
return nil, fmt.Errorf("gas required exceeds allowance or always failing transaction")
} }
// Otherwise assume the transaction succeeded, lower the gas limit
hi = mid
} }
return (*hexutil.Big)(new(big.Int).SetUint64(hi)), nil return (*hexutil.Big)(new(big.Int).SetUint64(hi)), nil
} }
@ -711,45 +724,52 @@ type ExecutionResult struct {
// StructLogRes stores a structured log emitted by the EVM while replaying a // StructLogRes stores a structured log emitted by the EVM while replaying a
// transaction in debug mode // transaction in debug mode
type StructLogRes struct { type StructLogRes struct {
Pc uint64 `json:"pc"` Pc uint64 `json:"pc"`
Op string `json:"op"` Op string `json:"op"`
Gas uint64 `json:"gas"` Gas uint64 `json:"gas"`
GasCost uint64 `json:"gasCost"` GasCost uint64 `json:"gasCost"`
Depth int `json:"depth"` Depth int `json:"depth"`
Error error `json:"error"` Error error `json:"error,omitempty"`
Stack []string `json:"stack"` Stack *[]string `json:"stack,omitempty"`
Memory []string `json:"memory"` Memory *[]string `json:"memory,omitempty"`
Storage map[string]string `json:"storage"` Storage *map[string]string `json:"storage,omitempty"`
} }
// formatLogs formats EVM returned structured logs for json output // formatLogs formats EVM returned structured logs for json output
func FormatLogs(structLogs []vm.StructLog) []StructLogRes { func FormatLogs(logs []vm.StructLog) []StructLogRes {
formattedStructLogs := make([]StructLogRes, len(structLogs)) formatted := make([]StructLogRes, len(logs))
for index, trace := range structLogs { for index, trace := range logs {
formattedStructLogs[index] = StructLogRes{ formatted[index] = StructLogRes{
Pc: trace.Pc, Pc: trace.Pc,
Op: trace.Op.String(), Op: trace.Op.String(),
Gas: trace.Gas, Gas: trace.Gas,
GasCost: trace.GasCost, GasCost: trace.GasCost,
Depth: trace.Depth, Depth: trace.Depth,
Error: trace.Err, Error: trace.Err,
Stack: make([]string, len(trace.Stack)),
Storage: make(map[string]string),
} }
if trace.Stack != nil {
for i, stackValue := range trace.Stack { stack := make([]string, len(trace.Stack))
formattedStructLogs[index].Stack[i] = fmt.Sprintf("%x", math.PaddedBigBytes(stackValue, 32)) for i, stackValue := range trace.Stack {
stack[i] = fmt.Sprintf("%x", math.PaddedBigBytes(stackValue, 32))
}
formatted[index].Stack = &stack
} }
if trace.Memory != nil {
for i := 0; i+32 <= len(trace.Memory); i += 32 { memory := make([]string, 0, (len(trace.Memory)+31)/32)
formattedStructLogs[index].Memory = append(formattedStructLogs[index].Memory, fmt.Sprintf("%x", trace.Memory[i:i+32])) for i := 0; i+32 <= len(trace.Memory); i += 32 {
memory = append(memory, fmt.Sprintf("%x", trace.Memory[i:i+32]))
}
formatted[index].Memory = &memory
} }
if trace.Storage != nil {
for i, storageValue := range trace.Storage { storage := make(map[string]string)
formattedStructLogs[index].Storage[fmt.Sprintf("%x", i)] = fmt.Sprintf("%x", storageValue) for i, storageValue := range trace.Storage {
storage[fmt.Sprintf("%x", i)] = fmt.Sprintf("%x", storageValue)
}
formatted[index].Storage = &storage
} }
} }
return formattedStructLogs return formatted
} }
// rpcOutputBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are // rpcOutputBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are

Some files were not shown because too many files have changed in this diff Show More