Rebase geth 1.7.0 (#353)

This commit is contained in:
Adam Babik 2017-10-10 11:38:49 +02:00 committed by Ivan Tomilov
parent e4cb6b060a
commit 90acfedf7a
252 changed files with 24728 additions and 6284 deletions

View File

@ -1,8 +1,8 @@
package account
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
@ -148,20 +148,28 @@ func (m *Manager) RecoverAccount(password, mnemonic string) (address, pubKey str
// If no error is returned, then account is considered verified.
func (m *Manager) VerifyAccountPassword(keyStoreDir, address, password string) (*keystore.Key, error) {
var err error
var keyJSON []byte
var foundKeyFile []byte
addressObj := gethcommon.BytesToAddress(gethcommon.FromHex(address))
checkAccountKey := func(path string, fileInfo os.FileInfo) error {
if len(keyJSON) > 0 || fileInfo.IsDir() {
if len(foundKeyFile) > 0 || fileInfo.IsDir() {
return nil
}
keyJSON, err = ioutil.ReadFile(path)
rawKeyFile, err := ioutil.ReadFile(path)
if err != nil {
return fmt.Errorf("invalid account key file: %v", err)
}
if !bytes.Contains(keyJSON, []byte(fmt.Sprintf(`"address":"%s"`, addressObj.Hex()[2:]))) {
keyJSON = []byte{}
var accountKey struct {
Address string `json:"address"`
}
if err := json.Unmarshal(rawKeyFile, &accountKey); err != nil {
return fmt.Errorf("failed to read key file: %s", err)
}
if gethcommon.HexToAddress("0x"+accountKey.Address).Hex() == addressObj.Hex() {
foundKeyFile = rawKeyFile
}
return nil
@ -177,18 +185,18 @@ func (m *Manager) VerifyAccountPassword(keyStoreDir, address, password string) (
return nil, fmt.Errorf("cannot traverse key store folder: %v", err)
}
if len(keyJSON) == 0 {
return nil, fmt.Errorf("cannot locate account for address: %x", addressObj)
if len(foundKeyFile) == 0 {
return nil, fmt.Errorf("cannot locate account for address: %s", addressObj.Hex())
}
key, err := keystore.DecryptKey(keyJSON, password)
key, err := keystore.DecryptKey(foundKeyFile, password)
if err != nil {
return nil, err
}
// avoid swap attack
if key.Address != addressObj {
return nil, fmt.Errorf("account mismatch: have %x, want %x", key.Address, addressObj)
return nil, fmt.Errorf("account mismatch: have %s, want %s", key.Address.Hex(), addressObj.Hex())
}
return key, nil
@ -293,7 +301,7 @@ func (m *Manager) importExtendedKey(extKey *extkeys.ExtendedKey, password string
if err != nil {
return "", "", err
}
address = fmt.Sprintf("%x", account.Address)
address = account.Address.Hex()
// obtain public key to return
account, key, err := keyStore.AccountDecryptedKey(account, password)

View File

@ -79,14 +79,14 @@ func (s *AccountsTestSuite) TestVerifyAccountPassword() {
emptyKeyStoreDir,
TestConfig.Account1.Address,
TestConfig.Account1.Password,
fmt.Errorf("cannot locate account for address: %x", account1Address),
fmt.Errorf("cannot locate account for address: %s", account1Address.Hex()),
},
{
"wrong address, correct password",
keyStoreDir,
"0x79791d3e8f2daa1f7fec29649d152c0ada3cc535",
TestConfig.Account1.Password,
fmt.Errorf("cannot locate account for address: %s", "79791d3e8f2daa1f7fec29649d152c0ada3cc535"),
fmt.Errorf("cannot locate account for address: %s", "0x79791d3E8F2dAa1F7FeC29649d152c0aDA3cc535"),
},
{
"correct address, wrong password",
@ -108,8 +108,26 @@ func (s *AccountsTestSuite) TestVerifyAccountPassword() {
}
accountAddress := gethcommon.BytesToAddress(gethcommon.FromHex(testCase.address))
if accountKey.Address != accountAddress {
s.T().Fatalf("account mismatch: have %x, want %x", accountKey.Address, accountAddress)
s.T().Fatalf("account mismatch: have %s, want %s", accountKey.Address.Hex(), accountAddress.Hex())
}
}
}
}
// TestVerifyAccountPasswordWithAccountBeforeEIP55 verifies if VerifyAccountPassword
// can handle accounts before introduction of EIP55.
func (s *AccountsTestSuite) TestVerifyAccountPasswordWithAccountBeforeEIP55() {
keyStoreDir, err := ioutil.TempDir("", "status-accounts-test")
s.NoError(err)
defer os.RemoveAll(keyStoreDir)
// Import keys and make sure one was created before EIP55 introduction.
err = common.ImportTestAccount(keyStoreDir, "test-account1-before-eip55.pk")
s.NoError(err)
acctManager := account.NewManager(nil)
address := gethcommon.HexToAddress(TestConfig.Account1.Address)
_, err = acctManager.VerifyAccountPassword(keyStoreDir, address.Hex(), TestConfig.Account1.Password)
s.NoError(err)
}

View File

@ -3,6 +3,7 @@ package api_test
import (
"errors"
"fmt"
"strings"
"github.com/status-im/status-go/geth/account"
"github.com/status-im/status-go/geth/common"
@ -44,8 +45,8 @@ func (s *BackendTestSuite) TestAccountsList() {
accounts, err = s.backend.AccountManager().Accounts()
require.NoError(err)
require.Equal(1, len(accounts), "exactly single account is expected (main account)")
require.Equal(string(accounts[0].Hex()), "0x"+address,
fmt.Sprintf("main account is not retured as the first key: got %s, expected %s", accounts[0].Hex(), "0x"+address))
require.Equal(string(accounts[0].Hex()), address,
fmt.Sprintf("main account is not retured as the first key: got %s, expected %s", accounts[0].Hex(), address))
// create sub-account 1
subAccount1, subPubKey1, err := s.backend.AccountManager().CreateChildAccount("", TestConfig.Account1.Password)
@ -55,8 +56,8 @@ func (s *BackendTestSuite) TestAccountsList() {
accounts, err = s.backend.AccountManager().Accounts()
require.NoError(err)
require.Equal(2, len(accounts), "exactly 2 accounts are expected (main + sub-account 1)")
require.Equal(string(accounts[0].Hex()), "0x"+address, "main account is not retured as the first key")
require.Equal(string(accounts[1].Hex()), "0x"+subAccount1, "subAcount1 not returned")
require.Equal(string(accounts[0].Hex()), address, "main account is not retured as the first key")
require.Equal(string(accounts[1].Hex()), subAccount1, "subAcount1 not returned")
// create sub-account 2, index automatically progresses
subAccount2, subPubKey2, err := s.backend.AccountManager().CreateChildAccount("", TestConfig.Account1.Password)
@ -67,14 +68,14 @@ func (s *BackendTestSuite) TestAccountsList() {
accounts, err = s.backend.AccountManager().Accounts()
require.NoError(err)
require.Equal(3, len(accounts), "unexpected number of accounts")
require.Equal(string(accounts[0].Hex()), "0x"+address, "main account is not retured as the first key")
require.Equal(string(accounts[0].Hex()), address, "main account is not retured as the first key")
subAccount1MatchesKey1 := string(accounts[1].Hex()) != "0x"+subAccount1
subAccount1MatchesKey2 := string(accounts[2].Hex()) != "0x"+subAccount1
subAccount1MatchesKey1 := string(accounts[1].Hex()) != subAccount1
subAccount1MatchesKey2 := string(accounts[2].Hex()) != subAccount1
require.False(!subAccount1MatchesKey1 && !subAccount1MatchesKey2, "subAcount1 not returned")
subAccount2MatchesKey1 := string(accounts[1].Hex()) != "0x"+subAccount2
subAccount2MatchesKey2 := string(accounts[2].Hex()) != "0x"+subAccount2
subAccount2MatchesKey1 := string(accounts[1].Hex()) != subAccount2
subAccount2MatchesKey2 := string(accounts[2].Hex()) != subAccount2
require.False(!subAccount2MatchesKey1 && !subAccount2MatchesKey2, "subAcount2 not returned")
}
@ -295,7 +296,7 @@ func (s *BackendTestSuite) TestSelectedAccountOnRestart() {
selectedAccount, err = s.backend.AccountManager().SelectedAccount()
require.NoError(err)
require.NotNil(selectedAccount)
require.Equal(selectedAccount.Address.Hex(), "0x"+address2, "incorrect address selected")
require.Equal(selectedAccount.Address.Hex(), address2, "incorrect address selected")
// resume node
nodeStarted, err := s.backend.StartNode(&preservedNodeConfig)
@ -306,7 +307,7 @@ func (s *BackendTestSuite) TestSelectedAccountOnRestart() {
selectedAccount, err = s.backend.AccountManager().SelectedAccount()
require.NoError(err)
require.NotNil(selectedAccount)
require.Equal(selectedAccount.Address.Hex(), "0x"+address2, "incorrect address selected")
require.Equal(selectedAccount.Address.Hex(), address2, "incorrect address selected")
// make sure that Whisper gets identity re-injected
whisperService = s.WhisperService()
@ -345,14 +346,14 @@ func (s *BackendTestSuite) TestRPCEthAccounts() {
rpcClient := s.backend.NodeManager().RPCClient()
expectedResponse := `{"jsonrpc":"2.0","id":1,"result":["` + TestConfig.Account1.Address + `"]}`
expected := `{"jsonrpc":"2.0","id":1,"result":["` + strings.ToLower(TestConfig.Account1.Address) + `"]}`
resp := rpcClient.CallRaw(`{
"jsonrpc": "2.0",
"id": 1,
"method": "eth_accounts",
"params": []
}`)
require.Equal(expectedResponse, resp)
require.Equal(expected, resp)
}
func (s *BackendTestSuite) TestRPCEthAccountsWithUpstream() {
@ -367,14 +368,14 @@ func (s *BackendTestSuite) TestRPCEthAccountsWithUpstream() {
rpcClient := s.backend.NodeManager().RPCClient()
expectedResponse := `{"jsonrpc":"2.0","id":1,"result":["` + TestConfig.Account1.Address + `"]}`
expected := `{"jsonrpc":"2.0","id":1,"result":["` + strings.ToLower(TestConfig.Account1.Address) + `"]}`
resp := rpcClient.CallRaw(`{
"jsonrpc": "2.0",
"id": 1,
"method": "eth_accounts",
"params": []
}`)
require.Equal(expectedResponse, resp)
require.Equal(expected, resp)
}
// regression test: eth_getTransactionReceipt with invalid transaction hash should return null

View File

@ -70,7 +70,7 @@ func MakeNode(config *params.NodeConfig) (*node.Node, error) {
return nil, ErrNodeMakeFailure
}
// start Ethereum service if we are not expected to use an upstream server.
// Start Ethereum service if we are not expected to use an upstream server.
if !config.UpstreamConfig.Enabled {
if err := activateEthService(stack, config); err != nil {
return nil, fmt.Errorf("%v: %v", ErrEthServiceRegistrationFailure, err)
@ -164,7 +164,6 @@ func activateEthService(stack *node.Node, config *params.NodeConfig) error {
ethConf.SyncMode = downloader.LightSync
ethConf.NetworkId = config.NetworkID
ethConf.DatabaseCache = config.LightEthConfig.DatabaseCache
ethConf.MaxPeers = config.MaxPeers
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
lightEth, err := les.New(ctx, &ethConf)

View File

@ -44,7 +44,8 @@
"enode://88c2b24429a6f7683fbfd06874ae3f1e7c8b4a5ffb846e77c705ba02e2543789d66fc032b6606a8d8888eb6239a2abe5897ce83f78dcdcfcb027d6ea69aa6fe9@163.172.157.61:30303",
"enode://ce6854c2c77a8800fcc12600206c344b8053bb90ee3ba280e6c4f18f3141cdc5ee80bcc3bdb24cbc0e96dffd4b38d7b57546ed528c00af6cd604ab65c4d528f6@163.172.153.124:30303",
"enode://00ae60771d9815daba35766d463a82a7b360b3a80e35ab2e0daa25bdc6ca6213ff4c8348025e7e1a908a8f58411a364fe02a0fb3c2aa32008304f063d8aaf1a2@163.172.132.85:30303",
"enode://86ebc843aa51669e08e27400e435f957918e39dc540b021a2f3291ab776c88bbda3d97631639219b6e77e375ab7944222c47713bdeb3251b25779ce743a39d70@212.47.254.155:30303"
"enode://86ebc843aa51669e08e27400e435f957918e39dc540b021a2f3291ab776c88bbda3d97631639219b6e77e375ab7944222c47713bdeb3251b25779ce743a39d70@212.47.254.155:30303",
"enode://a1ef9ba5550d5fac27f7cbd4e8d20a643ad75596f307c91cd6e7f85b548b8a6bf215cca436d6ee436d6135f9fe51398f8dd4c0bd6c6a0c332ccb41880f33ec12@51.15.218.125:30303"
]
},
"LightEthConfig": {

View File

@ -4,7 +4,6 @@ import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"sync"
"testing"
"time"
@ -91,44 +90,19 @@ func (s *RPCTestSuite) TestNewClient() {
require.NotNil(err)
}
func (s *RPCTestSuite) TestRPCSendTransaction() {
func (s *RPCTestSuite) TestRPCClientHandler() {
require := s.Require()
expectedResponse := []byte(`{"jsonrpc":"2.0","id":10,"result":"3434=done"}`)
// httpRPCServer will serve as an upstream server accepting transactions.
httpRPCServer := httptest.NewServer(service{
Handler: func(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
var txReq txRequest
err := json.NewDecoder(r.Body).Decode(&txReq)
require.NoError(err)
if txReq.Method == "eth_getTransactionCount" {
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"jsonrpc": "2.0", "result": "0x434"}`))
return
}
payload := ([]byte)(txReq.Payload)
var bu []interface{}
jserr := json.Unmarshal(payload, &bu)
require.NoError(jserr)
require.Len(bu, 1)
require.IsType(bu[0], (map[string]interface{})(nil))
w.WriteHeader(http.StatusOK)
w.Write(expectedResponse)
},
})
s.StartTestNode(params.RopstenNetworkID, WithUpstream(httpRPCServer.URL))
s.StartTestNode(params.RopstenNetworkID)
defer s.StopTestNode()
rpcClient := s.NodeManager.RPCClient()
require.NotNil(rpcClient)
rpcClient.RegisterHandler("eth_sendTransaction", func(ctx context.Context, args ...interface{}) (interface{}, error) {
return map[string]interface{}{"done": true}, nil
})
response := rpcClient.CallRaw(`{
"jsonrpc": "2.0",
"id":10,
@ -138,13 +112,13 @@ func (s *RPCTestSuite) TestRPCSendTransaction() {
"to": "` + TestConfig.Account2.Address + `",
"value": "0x200",
"nonce": "0x100",
"data": "Will-power",
"data": "` + hexutil.Encode([]byte("Will-power")) + `",
"gasPrice": "0x4a817c800",
"gasLimit": "0x5208",
"chainId": 3391
}]
}`)
require.Equal(response, string(expectedResponse))
require.Equal(`{"jsonrpc":"2.0","id":10,"result":{"done":true}}`, response)
}
func (s *RPCTestSuite) TestCallRPC() {

View File

@ -221,6 +221,7 @@ func (m *Manager) completeRemoteTransaction(queuedTx *common.QueuedTx, password
selectedAcct.Address.String(),
password,
); err != nil {
log.Warn("failed to verify account", "account", selectedAcct.Address.String(), "error", err.Error())
return emptyHash, err
}

File diff suppressed because one or more lines are too long

View File

@ -20,7 +20,8 @@
"enode://88c2b24429a6f7683fbfd06874ae3f1e7c8b4a5ffb846e77c705ba02e2543789d66fc032b6606a8d8888eb6239a2abe5897ce83f78dcdcfcb027d6ea69aa6fe9@163.172.157.61:30303",
"enode://ce6854c2c77a8800fcc12600206c344b8053bb90ee3ba280e6c4f18f3141cdc5ee80bcc3bdb24cbc0e96dffd4b38d7b57546ed528c00af6cd604ab65c4d528f6@163.172.153.124:30303",
"enode://00ae60771d9815daba35766d463a82a7b360b3a80e35ab2e0daa25bdc6ca6213ff4c8348025e7e1a908a8f58411a364fe02a0fb3c2aa32008304f063d8aaf1a2@163.172.132.85:30303",
"enode://86ebc843aa51669e08e27400e435f957918e39dc540b021a2f3291ab776c88bbda3d97631639219b6e77e375ab7944222c47713bdeb3251b25779ce743a39d70@212.47.254.155:30303"
"enode://86ebc843aa51669e08e27400e435f957918e39dc540b021a2f3291ab776c88bbda3d97631639219b6e77e375ab7944222c47713bdeb3251b25779ce743a39d70@212.47.254.155:30303",
"enode://a1ef9ba5550d5fac27f7cbd4e8d20a643ad75596f307c91cd6e7f85b548b8a6bf215cca436d6ee436d6135f9fe51398f8dd4c0bd6c6a0c332ccb41880f33ec12@51.15.218.125:30303"
]
},
"dev": {
@ -41,7 +42,8 @@
"enode://88c2b24429a6f7683fbfd06874ae3f1e7c8b4a5ffb846e77c705ba02e2543789d66fc032b6606a8d8888eb6239a2abe5897ce83f78dcdcfcb027d6ea69aa6fe9@163.172.157.61:30303",
"enode://ce6854c2c77a8800fcc12600206c344b8053bb90ee3ba280e6c4f18f3141cdc5ee80bcc3bdb24cbc0e96dffd4b38d7b57546ed528c00af6cd604ab65c4d528f6@163.172.153.124:30303",
"enode://00ae60771d9815daba35766d463a82a7b360b3a80e35ab2e0daa25bdc6ca6213ff4c8348025e7e1a908a8f58411a364fe02a0fb3c2aa32008304f063d8aaf1a2@163.172.132.85:30303",
"enode://86ebc843aa51669e08e27400e435f957918e39dc540b021a2f3291ab776c88bbda3d97631639219b6e77e375ab7944222c47713bdeb3251b25779ce743a39d70@212.47.254.155:30303"
"enode://86ebc843aa51669e08e27400e435f957918e39dc540b021a2f3291ab776c88bbda3d97631639219b6e77e375ab7944222c47713bdeb3251b25779ce743a39d70@212.47.254.155:30303",
"enode://a1ef9ba5550d5fac27f7cbd4e8d20a643ad75596f307c91cd6e7f85b548b8a6bf215cca436d6ee436d6135f9fe51398f8dd4c0bd6c6a0c332ccb41880f33ec12@51.15.218.125:30303"
]
}
},

View File

@ -5,11 +5,11 @@
"WSPort": 8646
},
"Account1": {
"Address": "0xadaf150b905cf5e6a778e553e15a139b6618bbb7",
"Address": "0xAdAf150b905Cf5E6A778E553E15A139B6618BbB7",
"Password": "asdfasdf"
},
"Account2": {
"Address": "0x65c01586aa0ce152835c788ace665e91ab3527b8",
"Address": "0x65C01586aa0Ce152835c788aCe665e91Ab3527b8",
"Password": "asdfasdf"
}
}

View File

@ -0,0 +1 @@
{"address":"adaf150b905cf5e6a778e553e15a139b6618bbb7","crypto":{"cipher":"aes-128-ctr","ciphertext":"e6b4a87b62eca6b654b45ec806a19b5a8fa2ee1b39d2ca17406f11fb81428455","cipherparams":{"iv":"863515dcd60f8b9fec7d5ba59f2a895e"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"83e4d852dbd1d4bb93c1989736dd4cf4443b0842978a6eec3069a13406cb3605"},"mac":"7d00f96aeb1684717248fbcbdfec2d6da69dadea60e65239d20642e39097b923"},"id":"bc042f57-ad91-4459-a056-607ce80a760d","version":3,"whisperenabled":true,"extendedkey":{"cipher":"","ciphertext":"","cipherparams":{"iv":""},"kdf":"","kdfparams":null,"mac":""},"subaccountindex":0}

View File

@ -1 +1 @@
{"address":"adaf150b905cf5e6a778e553e15a139b6618bbb7","crypto":{"cipher":"aes-128-ctr","ciphertext":"e6b4a87b62eca6b654b45ec806a19b5a8fa2ee1b39d2ca17406f11fb81428455","cipherparams":{"iv":"863515dcd60f8b9fec7d5ba59f2a895e"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"83e4d852dbd1d4bb93c1989736dd4cf4443b0842978a6eec3069a13406cb3605"},"mac":"7d00f96aeb1684717248fbcbdfec2d6da69dadea60e65239d20642e39097b923"},"id":"bc042f57-ad91-4459-a056-607ce80a760d","version":3,"whisperenabled":true,"extendedkey":{"cipher":"","ciphertext":"","cipherparams":{"iv":""},"kdf":"","kdfparams":null,"mac":""},"subaccountindex":0}
{"address":"AdAf150b905Cf5E6A778E553E15A139B6618BbB7","crypto":{"cipher":"aes-128-ctr","ciphertext":"e6b4a87b62eca6b654b45ec806a19b5a8fa2ee1b39d2ca17406f11fb81428455","cipherparams":{"iv":"863515dcd60f8b9fec7d5ba59f2a895e"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"83e4d852dbd1d4bb93c1989736dd4cf4443b0842978a6eec3069a13406cb3605"},"mac":"7d00f96aeb1684717248fbcbdfec2d6da69dadea60e65239d20642e39097b923"},"id":"bc042f57-ad91-4459-a056-607ce80a760d","version":3,"whisperenabled":true,"extendedkey":{"cipher":"","ciphertext":"","cipherparams":{"iv":""},"kdf":"","kdfparams":null,"mac":""},"subaccountindex":0}

View File

@ -1 +1 @@
{"address":"65c01586aa0ce152835c788ace665e91ab3527b8","crypto":{"cipher":"aes-128-ctr","ciphertext":"26155f5c315492ccb40f24aeb34ff6bfbdbdcb7402b9e7bb558fa4c936be51ff","cipherparams":{"iv":"1d9b3043b59c3995c410f82cd96bfe05"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":4096,"p":6,"r":8,"salt":"4bb6d2080121e7ecc8b3dcba95cd0031cf3b36174612247c14486266580ed4b8"},"mac":"cc8f209d9128a4e9a836c691324e1459d4d3d2a2b0627cf94e99a7e8b305b6dc"},"id":"756c215e-d3af-435d-8fcf-39eb62d9a590","version":3,"whisperenabled":true,"extendedkey":{"cipher":"aes-128-ctr","ciphertext":"dd5cc47cde930bdb9a1625ff2efd230bac9a52e9d7e220aa699c017c4ac1ff645765a721ac6f4143ffa551ecf775f20b25e7cd5d3856ef59e7f31d407bf2d0404f2323919101c37c94952fb8462d4281a92b005e9f81a6dd1aff2f4eb15cb4c6a56df907c9591503c7d89b0cea08cb","cipherparams":{"iv":"f66b414b6678816bc3e5bb214624b4f7"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":4096,"p":6,"r":8,"salt":"094ba4f65bac68f8a9807871187dd915540013a0792bf3d11d019ae3cea7d7c0"},"mac":"2e2979943a75cff762480c26b3daff0fac78cdf12e0ed791e6a1321f3ed8fc91"},"subaccountindex":0}
{"address":"65C01586aa0Ce152835c788aCe665e91Ab3527b8","crypto":{"cipher":"aes-128-ctr","ciphertext":"26155f5c315492ccb40f24aeb34ff6bfbdbdcb7402b9e7bb558fa4c936be51ff","cipherparams":{"iv":"1d9b3043b59c3995c410f82cd96bfe05"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":4096,"p":6,"r":8,"salt":"4bb6d2080121e7ecc8b3dcba95cd0031cf3b36174612247c14486266580ed4b8"},"mac":"cc8f209d9128a4e9a836c691324e1459d4d3d2a2b0627cf94e99a7e8b305b6dc"},"id":"756c215e-d3af-435d-8fcf-39eb62d9a590","version":3,"whisperenabled":true,"extendedkey":{"cipher":"aes-128-ctr","ciphertext":"dd5cc47cde930bdb9a1625ff2efd230bac9a52e9d7e220aa699c017c4ac1ff645765a721ac6f4143ffa551ecf775f20b25e7cd5d3856ef59e7f31d407bf2d0404f2323919101c37c94952fb8462d4281a92b005e9f81a6dd1aff2f4eb15cb4c6a56df907c9591503c7d89b0cea08cb","cipherparams":{"iv":"f66b414b6678816bc3e5bb214624b4f7"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":4096,"p":6,"r":8,"salt":"094ba4f65bac68f8a9807871187dd915540013a0792bf3d11d019ae3cea7d7c0"},"mac":"2e2979943a75cff762480c26b3daff0fac78cdf12e0ed791e6a1321f3ed8fc91"},"subaccountindex":0}

View File

@ -15,11 +15,23 @@ matrix:
- go run build/ci.go install
- go run build/ci.go test -coverage
# These are the latest Go versions.
- os: linux
dist: trusty
sudo: required
go: 1.8.3
script:
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
- sudo modprobe fuse
- sudo chmod 666 /dev/fuse
- sudo chown root:$USER /etc/fuse.conf
- go run build/ci.go install
- go run build/ci.go test -coverage
# These are the latest Go versions.
- os: linux
dist: trusty
sudo: required
go: 1.9.0
script:
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
- sudo modprobe fuse
@ -29,7 +41,7 @@ matrix:
- go run build/ci.go test -coverage -misspell
- os: osx
go: 1.8.3
go: 1.9.0
sudo: required
script:
- brew update
@ -42,7 +54,7 @@ matrix:
- os: linux
dist: trusty
sudo: required
go: 1.8.3
go: 1.9.0
env:
- ubuntu-ppa
- azure-linux
@ -81,7 +93,7 @@ matrix:
sudo: required
services:
- docker
go: 1.8.3
go: 1.9.0
env:
- azure-linux-mips
script:
@ -121,7 +133,7 @@ matrix:
- azure-android
- maven-android
before_install:
- curl https://storage.googleapis.com/golang/go1.8.3.linux-amd64.tar.gz | tar -xz
- curl https://storage.googleapis.com/golang/go1.9.linux-amd64.tar.gz | tar -xz
- export PATH=`pwd`/go/bin:$PATH
- export GOROOT=`pwd`/go
- export GOPATH=$HOME/go
@ -138,7 +150,7 @@ matrix:
# This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads
- os: osx
go: 1.8.3
go: 1.9.0
env:
- azure-osx
- azure-ios
@ -164,7 +176,7 @@ matrix:
- os: linux
dist: trusty
sudo: required
go: 1.8.3
go: 1.9.0
env:
- azure-purge
script:

View File

@ -1,15 +1,16 @@
FROM alpine:3.5
# Build Geth in a stock Go builder container
FROM golang:1.9-alpine as builder
RUN apk add --no-cache make gcc musl-dev linux-headers
ADD . /go-ethereum
RUN \
apk add --update git go make gcc musl-dev linux-headers && \
(cd go-ethereum && make geth) && \
cp go-ethereum/build/bin/geth /usr/local/bin/ && \
apk del git go make gcc musl-dev linux-headers && \
rm -rf /go-ethereum && rm -rf /var/cache/apk/*
RUN cd /go-ethereum && make geth
EXPOSE 8545
EXPOSE 30303
EXPOSE 30303/udp
# Pull Geth into a second stage deploy alpine container
FROM alpine:latest
RUN apk add --no-cache ca-certificates
COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/
EXPOSE 8545 8546 30303 30303/udp
ENTRYPOINT ["geth"]

View File

@ -8,7 +8,7 @@
.PHONY: geth-darwin geth-darwin-386 geth-darwin-amd64
.PHONY: geth-windows geth-windows-386 geth-windows-amd64
GOBIN = build/bin
GOBIN = $(shell pwd)/build/bin
GO ?= latest
geth:
@ -21,11 +21,6 @@ swarm:
@echo "Done building."
@echo "Run \"$(GOBIN)/swarm\" to launch swarm."
evm:
build/env.sh go run build/ci.go install ./cmd/evm
@echo "Done building."
@echo "Run \"$(GOBIN)/evm\" to start the evm."
all:
build/env.sh go run build/ci.go install

View File

@ -35,11 +35,11 @@ The go-ethereum project comes with several wrappers/executables found in the `cm
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default) archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. |
| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. |
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
| `disasm` | Bytecode disassembler to convert EVM (Ethereum Virtual Machine) bytecode into more user friendly assembly-like opcodes (e.g. `echo "6001" | disasm`). For details on the individual opcodes, please see pages 22-30 of the [Ethereum Yellow Paper](http://gavwood.com/paper.pdf). |
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow insolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug`). |
| `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://github.com/ethereum/wiki/wiki/JSON-RPC) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. |
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://github.com/ethereum/wiki/wiki/RLP)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
| `swarm` | swarm daemon and tools. This is the entrypoint for the swarm network. `swarm --help` for command line options and subcommands. See https://swarm-guide.readthedocs.io for swarm documentation. |
| `puppeth` | a CLI wizard that aids in creating a new Ethereum network. |
## Running geth
@ -116,7 +116,7 @@ To get an idea how the file should look like you can use the `dumpconfig` subcom
$ geth --your-favourite-flags dumpconfig
```
*Note: This works only with geth v1.6.0 and above*
*Note: This works only with geth v1.6.0 and above.*
#### Docker quick start

View File

@ -1 +1 @@
1.6.7
1.7.0

View File

@ -33,7 +33,6 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
)
@ -61,7 +60,7 @@ func NewSimulatedBackend(alloc core.GenesisAlloc) *SimulatedBackend {
database, _ := ethdb.NewMemDatabase()
genesis := core.Genesis{Config: params.AllProtocolChanges, Alloc: alloc}
genesis.MustCommit(database)
blockchain, _ := core.NewBlockChain(database, genesis.Config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
blockchain, _ := core.NewBlockChain(database, genesis.Config, ethash.NewFaker(), vm.Config{})
backend := &SimulatedBackend{database: database, blockchain: blockchain, config: genesis.Config}
backend.rollback()
return backend
@ -144,7 +143,8 @@ func (b *SimulatedBackend) StorageAt(ctx context.Context, contract common.Addres
// TransactionReceipt returns the receipt of a transaction.
func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) {
return core.GetReceipt(b.database, txHash), nil
receipt, _, _, _ := core.GetReceipt(b.database, txHash)
return receipt, nil
}
// PendingCodeAt returns the code associated with an account in the pending state.
@ -253,7 +253,8 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
// about the transaction and calling mechanisms.
vmenv := vm.NewEVM(evmContext, statedb, b.config, vm.Config{})
gaspool := new(core.GasPool).AddGas(math.MaxBig256)
ret, gasUsed, _, err := core.NewStateTransition(vmenv, msg, gaspool).TransitionDb()
// TODO utilize returned failed flag to help gas estimation.
ret, gasUsed, _, _, err := core.NewStateTransition(vmenv, msg, gaspool).TransitionDb()
return ret, gasUsed, err
}

View File

@ -122,7 +122,7 @@ func Bind(types []string, abis []string, bytecodes []string, pkg string, lang La
}
// For Go bindings pass the code through goimports to clean it up and double check
if lang == LangGo {
code, err := imports.Process("", buffer.Bytes(), nil)
code, err := imports.Process(".", buffer.Bytes(), nil)
if err != nil {
return "", fmt.Errorf("%v\n%s", err, buffer)
}

View File

@ -52,8 +52,8 @@ var tmplSource = map[Lang]string{
// tmplSourceGo is the Go source template use to generate the contract binding
// based on.
const tmplSourceGo = `
// This file is an automatically generated Go binding. Do not modify as any
// change will likely be lost upon the next re-generation!
// Code generated - DO NOT EDIT.
// This file is a generated binding and any manual changes will be lost.
package {{.Package}}

View File

@ -42,8 +42,9 @@ type Wallet interface {
URL() URL
// Status returns a textual status to aid the user in the current state of the
// wallet.
Status() string
// wallet. It also returns an error indicating any failure the wallet might have
// encountered.
Status() (string, error)
// Open initializes access to a wallet instance. It is not meant to unlock or
// decrypt account keys, rather simply to establish a connection to hardware
@ -147,9 +148,26 @@ type Backend interface {
Subscribe(sink chan<- WalletEvent) event.Subscription
}
// WalletEventType represents the different event types that can be fired by
// the wallet subscription subsystem.
type WalletEventType int
const (
// WalletArrived is fired when a new wallet is detected either via USB or via
// a filesystem event in the keystore.
WalletArrived WalletEventType = iota
// WalletOpened is fired when a wallet is successfully opened with the purpose
// of starting any background processes such as automatic key derivation.
WalletOpened
// WalletDropped
WalletDropped
)
// WalletEvent is an event fired by an account backend when a wallet arrival or
// departure is detected.
type WalletEvent struct {
Wallet Wallet // Wallet instance arrived or departed
Arrive bool // Whether the wallet was added or removed
Wallet Wallet // Wallet instance arrived or departed
Kind WalletEventType // Event type that happened in the system
}

View File

@ -27,12 +27,17 @@ import (
// DefaultRootDerivationPath is the root path to which custom derivation endpoints
// are appended. As such, the first account will be at m/44'/60'/0'/0, the second
// at m/44'/60'/0'/1, etc.
var DefaultRootDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0}
var DefaultRootDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}
// DefaultBaseDerivationPath is the base path from which custom derivation endpoints
// are incremented. As such, the first account will be at m/44'/60'/0'/0, the second
// at m/44'/60'/0'/1, etc.
var DefaultBaseDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}
var DefaultBaseDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0}
// DefaultLedgerBaseDerivationPath is the base path from which custom derivation endpoints
// are incremented. As such, the first account will be at m/44'/60'/0'/0, the second
// at m/44'/60'/0'/1, etc.
var DefaultLedgerBaseDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}
// DerivationPath represents the computer friendly version of a hierarchical
// deterministic wallet account derivaion path.

View File

@ -47,8 +47,6 @@ type Key struct {
// we only store privkey as pubkey/address can be derived from it
// privkey in this struct is always in plaintext
PrivateKey *ecdsa.PrivateKey
// when enabled, the key will be used as a Whisper identity
WhisperEnabled bool
// extended key is the root node for new hardened children i.e. sub-accounts
ExtendedKey *extkeys.ExtendedKey
// next index to be used for sub-account child derivation
@ -76,7 +74,6 @@ type encryptedKeyJSONV3 struct {
Crypto cryptoJSON `json:"crypto"`
Id string `json:"id"`
Version int `json:"version"`
WhisperEnabled bool `json:"whisperenabled"`
ExtendedKey cryptoJSON `json:"extendedkey"`
SubAccountIndex uint32 `json:"subaccountindex"`
}
@ -101,14 +98,6 @@ type cipherparamsJSON struct {
IV string `json:"iv"`
}
type scryptParamsJSON struct {
N int `json:"n"`
R int `json:"r"`
P int `json:"p"`
DkLen int `json:"dklen"`
Salt string `json:"salt"`
}
func (k *Key) MarshalJSON() (j []byte, err error) {
jStruct := plainKeyJSON{
hex.EncodeToString(k.Address[:]),
@ -181,11 +170,10 @@ func newKeyFromExtendedKey(extKey *extkeys.ExtendedKey) (*Key, error) {
privateKeyECDSA := extChild1.ToECDSA()
id := uuid.NewRandom()
key := &Key{
Id: id,
Address: crypto.PubkeyToAddress(privateKeyECDSA.PublicKey),
PrivateKey: privateKeyECDSA,
WhisperEnabled: true,
ExtendedKey: extChild2,
Id: id,
Address: crypto.PubkeyToAddress(privateKeyECDSA.PublicKey),
PrivateKey: privateKeyECDSA,
ExtendedKey: extChild2,
}
return key, nil
}
@ -219,12 +207,11 @@ func newKey(rand io.Reader) (*Key, error) {
return newKeyFromECDSA(privateKeyECDSA), nil
}
func storeNewKey(ks keyStore, rand io.Reader, auth string, whisperEnabled bool) (*Key, accounts.Account, error) {
func storeNewKey(ks keyStore, rand io.Reader, auth string) (*Key, accounts.Account, error) {
key, err := newKey(rand)
if err != nil {
return nil, accounts.Account{}, err
}
key.WhisperEnabled = whisperEnabled
a := accounts.Account{Address: key.Address, URL: accounts.URL{Scheme: KeyStoreScheme, Path: ks.JoinPath(keyFileName(key.Address))}}
if err := ks.StoreKey(a.URL.Path, key, auth); err != nil {
zeroKey(key.PrivateKey)

View File

@ -144,14 +144,14 @@ func (ks *KeyStore) refreshWallets() {
for _, account := range accs {
// Drop wallets while they were in front of the next account
for len(ks.wallets) > 0 && ks.wallets[0].URL().Cmp(account.URL) < 0 {
events = append(events, accounts.WalletEvent{Wallet: ks.wallets[0], Arrive: false})
events = append(events, accounts.WalletEvent{Wallet: ks.wallets[0], Kind: accounts.WalletDropped})
ks.wallets = ks.wallets[1:]
}
// If there are no more wallets or the account is before the next, wrap new wallet
if len(ks.wallets) == 0 || ks.wallets[0].URL().Cmp(account.URL) > 0 {
wallet := &keystoreWallet{account: account, keystore: ks}
events = append(events, accounts.WalletEvent{Wallet: wallet, Arrive: true})
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletArrived})
wallets = append(wallets, wallet)
continue
}
@ -164,7 +164,7 @@ func (ks *KeyStore) refreshWallets() {
}
// Drop any leftover wallets and set the new batch
for _, wallet := range ks.wallets {
events = append(events, accounts.WalletEvent{Wallet: wallet, Arrive: false})
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletDropped})
}
ks.wallets = wallets
ks.mu.Unlock()
@ -411,8 +411,8 @@ func (ks *KeyStore) expire(addr common.Address, u *unlocked, timeout time.Durati
// NewAccount generates a new key and stores it into the key directory,
// encrypting it with the passphrase.
func (ks *KeyStore) NewAccount(passphrase string, whisperEnabled bool) (accounts.Account, error) {
_, account, err := storeNewKey(ks.storage, crand.Reader, passphrase, whisperEnabled)
func (ks *KeyStore) NewAccount(passphrase string) (accounts.Account, error) {
_, account, err := storeNewKey(ks.storage, crand.Reader, passphrase)
if err != nil {
return accounts.Account{}, err
}

View File

@ -141,7 +141,7 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
Cipher: "aes-128-ctr",
CipherText: hex.EncodeToString(cipherText),
CipherParams: cipherParamsJSON,
KDF: "scrypt",
KDF: keyHeaderKDF,
KDFParams: scryptParamsJSON,
MAC: hex.EncodeToString(mac),
}
@ -154,7 +154,6 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
cryptoStruct,
key.Id.String(),
version,
key.WhisperEnabled,
encryptedExtendedKey,
key.SubAccountIndex,
}
@ -212,7 +211,6 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
// Depending on the version try to parse one way or another
var (
keyBytes, keyId []byte
whisperEnabled bool
err error
extKeyBytes []byte
extKey *extkeys.ExtendedKey
@ -251,10 +249,6 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
extKey, err = extkeys.NewKeyFromString(string(extKeyBytes))
}
whisperEnabled, ok = m["whisperenabled"].(bool)
if !ok {
whisperEnabled = false
}
// Handle any decryption errors and return the key
if err != nil {
return nil, err
@ -265,7 +259,6 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
Id: uuid.UUID(keyId),
Address: crypto.PubkeyToAddress(key.PublicKey),
PrivateKey: key,
WhisperEnabled: whisperEnabled,
ExtendedKey: extKey,
SubAccountIndex: uint32(subAccountIndex),
}, nil
@ -400,7 +393,7 @@ func getKDFKey(cryptoJSON cryptoJSON, auth string) ([]byte, error) {
}
dkLen := ensureInt(cryptoJSON.KDFParams["dklen"])
if cryptoJSON.KDF == "scrypt" {
if cryptoJSON.KDF == keyHeaderKDF {
n := ensureInt(cryptoJSON.KDFParams["n"])
r := ensureInt(cryptoJSON.KDFParams["r"])
p := ensureInt(cryptoJSON.KDFParams["p"])

View File

@ -36,16 +36,16 @@ func (w *keystoreWallet) URL() accounts.URL {
return w.account.URL
}
// Status implements accounts.Wallet, always returning "open", since there is no
// concept of open/close for plain keystore accounts.
func (w *keystoreWallet) Status() string {
// Status implements accounts.Wallet, returning whether the account held by the
// keystore wallet is unlocked or not.
func (w *keystoreWallet) Status() (string, error) {
w.keystore.mu.RLock()
defer w.keystore.mu.RUnlock()
if _, ok := w.keystore.unlocked[w.account.Address]; ok {
return "Unlocked"
return "Unlocked", nil
}
return "Locked"
return "Locked", nil
}
// Open implements accounts.Wallet, but is a noop for plain wallets since there

View File

@ -96,9 +96,10 @@ func (am *Manager) update() {
case event := <-am.updates:
// Wallet event arrived, update local cache
am.lock.Lock()
if event.Arrive {
switch event.Kind {
case WalletArrived:
am.wallets = merge(am.wallets, event.Wallet)
} else {
case WalletDropped:
am.wallets = drop(am.wallets, event.Wallet)
}
am.lock.Unlock()

View File

@ -14,10 +14,6 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// This file contains the implementation for interacting with the Ledger hardware
// wallets. The wire protocol spec can be found in the Ledger Blue GitHub repo:
// https://raw.githubusercontent.com/LedgerHQ/blue-app-eth/master/doc/ethapp.asc
package usbwallet
import (
@ -33,24 +29,28 @@ import (
)
// LedgerScheme is the protocol scheme prefixing account and wallet URLs.
var LedgerScheme = "ledger"
const LedgerScheme = "ledger"
// ledgerDeviceIDs are the known device IDs that Ledger wallets use.
var ledgerDeviceIDs = []deviceID{
{Vendor: 0x2c97, Product: 0x0000}, // Ledger Blue
{Vendor: 0x2c97, Product: 0x0001}, // Ledger Nano S
}
// TrezorScheme is the protocol scheme prefixing account and wallet URLs.
const TrezorScheme = "trezor"
// Maximum time between wallet refreshes (if USB hotplug notifications don't work).
const ledgerRefreshCycle = time.Second
// refreshCycle is the maximum time between wallet refreshes (if USB hotplug
// notifications don't work).
const refreshCycle = time.Second
// Minimum time between wallet refreshes to avoid USB trashing.
const ledgerRefreshThrottling = 500 * time.Millisecond
// refreshThrottling is the minimum time between wallet refreshes to avoid USB
// trashing.
const refreshThrottling = 500 * time.Millisecond
// Hub is a accounts.Backend that can find and handle generic USB hardware wallets.
type Hub struct {
scheme string // Protocol scheme prefixing account and wallet URLs.
vendorID uint16 // USB vendor identifier used for device discovery
productIDs []uint16 // USB product identifiers used for device discovery
makeDriver func(log.Logger) driver // Factory method to construct a vendor specific driver
// LedgerHub is a accounts.Backend that can find and handle Ledger hardware wallets.
type LedgerHub struct {
refreshed time.Time // Time instance when the list of wallets was last refreshed
wallets []accounts.Wallet // List of Ledger devices currently tracking
wallets []accounts.Wallet // List of USB wallet devices currently tracking
updateFeed event.Feed // Event feed to notify wallet additions/removals
updateScope event.SubscriptionScope // Subscription scope tracking current live listeners
updating bool // Whether the event notification loop is running
@ -65,20 +65,34 @@ type LedgerHub struct {
}
// NewLedgerHub creates a new hardware wallet manager for Ledger devices.
func NewLedgerHub() (*LedgerHub, error) {
func NewLedgerHub() (*Hub, error) {
return newHub(LedgerScheme, 0x2c97, []uint16{0x0000 /* Ledger Blue */, 0x0001 /* Ledger Nano S */}, newLedgerDriver)
}
// NewTrezorHub creates a new hardware wallet manager for Trezor devices.
func NewTrezorHub() (*Hub, error) {
return newHub(TrezorScheme, 0x534c, []uint16{0x0001 /* Trezor 1 */}, newTrezorDriver)
}
// newHub creates a new hardware wallet manager for generic USB devices.
func newHub(scheme string, vendorID uint16, productIDs []uint16, makeDriver func(log.Logger) driver) (*Hub, error) {
if !hid.Supported() {
return nil, errors.New("unsupported platform")
}
hub := &LedgerHub{
quit: make(chan chan error),
hub := &Hub{
scheme: scheme,
vendorID: vendorID,
productIDs: productIDs,
makeDriver: makeDriver,
quit: make(chan chan error),
}
hub.refreshWallets()
return hub, nil
}
// Wallets implements accounts.Backend, returning all the currently tracked USB
// devices that appear to be Ledger hardware wallets.
func (hub *LedgerHub) Wallets() []accounts.Wallet {
// devices that appear to be hardware wallets.
func (hub *Hub) Wallets() []accounts.Wallet {
// Make sure the list of wallets is up to date
hub.refreshWallets()
@ -92,17 +106,17 @@ func (hub *LedgerHub) Wallets() []accounts.Wallet {
// refreshWallets scans the USB devices attached to the machine and updates the
// list of wallets based on the found devices.
func (hub *LedgerHub) refreshWallets() {
func (hub *Hub) refreshWallets() {
// Don't scan the USB like crazy it the user fetches wallets in a loop
hub.stateLock.RLock()
elapsed := time.Since(hub.refreshed)
hub.stateLock.RUnlock()
if elapsed < ledgerRefreshThrottling {
if elapsed < refreshThrottling {
return
}
// Retrieve the current list of Ledger devices
var ledgers []hid.DeviceInfo
// Retrieve the current list of USB wallet devices
var devices []hid.DeviceInfo
if runtime.GOOS == "linux" {
// hidapi on Linux opens the device during enumeration to retrieve some infos,
@ -117,10 +131,10 @@ func (hub *LedgerHub) refreshWallets() {
return
}
}
for _, info := range hid.Enumerate(0, 0) { // Can't enumerate directly, one valid ID is the 0 wildcard
for _, id := range ledgerDeviceIDs {
if info.VendorID == id.Vendor && info.ProductID == id.Product {
ledgers = append(ledgers, info)
for _, info := range hid.Enumerate(hub.vendorID, 0) {
for _, id := range hub.productIDs {
if info.ProductID == id && info.Interface == 0 {
devices = append(devices, info)
break
}
}
@ -132,22 +146,29 @@ func (hub *LedgerHub) refreshWallets() {
// Transform the current list of wallets into the new one
hub.stateLock.Lock()
wallets := make([]accounts.Wallet, 0, len(ledgers))
wallets := make([]accounts.Wallet, 0, len(devices))
events := []accounts.WalletEvent{}
for _, ledger := range ledgers {
url := accounts.URL{Scheme: LedgerScheme, Path: ledger.Path}
for _, device := range devices {
url := accounts.URL{Scheme: hub.scheme, Path: device.Path}
// Drop wallets in front of the next device or those that failed for some reason
for len(hub.wallets) > 0 && (hub.wallets[0].URL().Cmp(url) < 0 || hub.wallets[0].(*ledgerWallet).failed()) {
events = append(events, accounts.WalletEvent{Wallet: hub.wallets[0], Arrive: false})
for len(hub.wallets) > 0 {
// Abort if we're past the current device and found an operational one
_, failure := hub.wallets[0].Status()
if hub.wallets[0].URL().Cmp(url) >= 0 || failure == nil {
break
}
// Drop the stale and failed devices
events = append(events, accounts.WalletEvent{Wallet: hub.wallets[0], Kind: accounts.WalletDropped})
hub.wallets = hub.wallets[1:]
}
// If there are no more wallets or the device is before the next, wrap new wallet
if len(hub.wallets) == 0 || hub.wallets[0].URL().Cmp(url) > 0 {
wallet := &ledgerWallet{hub: hub, url: &url, info: ledger, log: log.New("url", url)}
logger := log.New("url", url)
wallet := &wallet{hub: hub, driver: hub.makeDriver(logger), url: &url, info: device, log: logger}
events = append(events, accounts.WalletEvent{Wallet: wallet, Arrive: true})
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletArrived})
wallets = append(wallets, wallet)
continue
}
@ -160,7 +181,7 @@ func (hub *LedgerHub) refreshWallets() {
}
// Drop any leftover wallets and set the new batch
for _, wallet := range hub.wallets {
events = append(events, accounts.WalletEvent{Wallet: wallet, Arrive: false})
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletDropped})
}
hub.refreshed = time.Now()
hub.wallets = wallets
@ -173,8 +194,8 @@ func (hub *LedgerHub) refreshWallets() {
}
// Subscribe implements accounts.Backend, creating an async subscription to
// receive notifications on the addition or removal of Ledger wallets.
func (hub *LedgerHub) Subscribe(sink chan<- accounts.WalletEvent) event.Subscription {
// receive notifications on the addition or removal of USB wallets.
func (hub *Hub) Subscribe(sink chan<- accounts.WalletEvent) event.Subscription {
// We need the mutex to reliably start/stop the update loop
hub.stateLock.Lock()
defer hub.stateLock.Unlock()
@ -190,18 +211,14 @@ func (hub *LedgerHub) Subscribe(sink chan<- accounts.WalletEvent) event.Subscrip
return sub
}
// updater is responsible for maintaining an up-to-date list of wallets stored in
// the keystore, and for firing wallet addition/removal events. It listens for
// account change events from the underlying account cache, and also periodically
// forces a manual refresh (only triggers for systems where the filesystem notifier
// is not running).
func (hub *LedgerHub) updater() {
// updater is responsible for maintaining an up-to-date list of wallets managed
// by the USB hub, and for firing wallet addition/removal events.
func (hub *Hub) updater() {
for {
// Wait for a USB hotplug event (not supported yet) or a refresh timeout
select {
//case <-hub.changes: // reenable on hutplug implementation
case <-time.After(ledgerRefreshCycle):
}
// TODO: Wait for a USB hotplug event (not supported yet) or a refresh timeout
// <-hub.changes
time.Sleep(refreshCycle)
// Run the wallet refresher
hub.refreshWallets()

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,903 @@
// This file originates from the SatoshiLabs Trezor `common` repository at:
// https://github.com/trezor/trezor-common/blob/master/protob/messages.proto
// dated 28.07.2017, commit dd8ec3231fb5f7992360aff9bdfe30bb58130f4b.
/**
* Messages for TREZOR communication
*/
// Sugar for easier handling in Java
option java_package = "com.satoshilabs.trezor.lib.protobuf";
option java_outer_classname = "TrezorMessage";
import "types.proto";
/**
* Mapping between Trezor wire identifier (uint) and a protobuf message
*/
enum MessageType {
MessageType_Initialize = 0 [(wire_in) = true];
MessageType_Ping = 1 [(wire_in) = true];
MessageType_Success = 2 [(wire_out) = true];
MessageType_Failure = 3 [(wire_out) = true];
MessageType_ChangePin = 4 [(wire_in) = true];
MessageType_WipeDevice = 5 [(wire_in) = true];
MessageType_FirmwareErase = 6 [(wire_in) = true, (wire_bootloader) = true];
MessageType_FirmwareUpload = 7 [(wire_in) = true, (wire_bootloader) = true];
MessageType_FirmwareRequest = 8 [(wire_out) = true, (wire_bootloader) = true];
MessageType_GetEntropy = 9 [(wire_in) = true];
MessageType_Entropy = 10 [(wire_out) = true];
MessageType_GetPublicKey = 11 [(wire_in) = true];
MessageType_PublicKey = 12 [(wire_out) = true];
MessageType_LoadDevice = 13 [(wire_in) = true];
MessageType_ResetDevice = 14 [(wire_in) = true];
MessageType_SignTx = 15 [(wire_in) = true];
MessageType_SimpleSignTx = 16 [(wire_in) = true, deprecated = true];
MessageType_Features = 17 [(wire_out) = true];
MessageType_PinMatrixRequest = 18 [(wire_out) = true];
MessageType_PinMatrixAck = 19 [(wire_in) = true, (wire_tiny) = true];
MessageType_Cancel = 20 [(wire_in) = true];
MessageType_TxRequest = 21 [(wire_out) = true];
MessageType_TxAck = 22 [(wire_in) = true];
MessageType_CipherKeyValue = 23 [(wire_in) = true];
MessageType_ClearSession = 24 [(wire_in) = true];
MessageType_ApplySettings = 25 [(wire_in) = true];
MessageType_ButtonRequest = 26 [(wire_out) = true];
MessageType_ButtonAck = 27 [(wire_in) = true, (wire_tiny) = true];
MessageType_ApplyFlags = 28 [(wire_in) = true];
MessageType_GetAddress = 29 [(wire_in) = true];
MessageType_Address = 30 [(wire_out) = true];
MessageType_SelfTest = 32 [(wire_in) = true, (wire_bootloader) = true];
MessageType_BackupDevice = 34 [(wire_in) = true];
MessageType_EntropyRequest = 35 [(wire_out) = true];
MessageType_EntropyAck = 36 [(wire_in) = true];
MessageType_SignMessage = 38 [(wire_in) = true];
MessageType_VerifyMessage = 39 [(wire_in) = true];
MessageType_MessageSignature = 40 [(wire_out) = true];
MessageType_PassphraseRequest = 41 [(wire_out) = true];
MessageType_PassphraseAck = 42 [(wire_in) = true, (wire_tiny) = true];
MessageType_EstimateTxSize = 43 [(wire_in) = true, deprecated = true];
MessageType_TxSize = 44 [(wire_out) = true, deprecated = true];
MessageType_RecoveryDevice = 45 [(wire_in) = true];
MessageType_WordRequest = 46 [(wire_out) = true];
MessageType_WordAck = 47 [(wire_in) = true];
MessageType_CipheredKeyValue = 48 [(wire_out) = true];
MessageType_EncryptMessage = 49 [(wire_in) = true, deprecated = true];
MessageType_EncryptedMessage = 50 [(wire_out) = true, deprecated = true];
MessageType_DecryptMessage = 51 [(wire_in) = true, deprecated = true];
MessageType_DecryptedMessage = 52 [(wire_out) = true, deprecated = true];
MessageType_SignIdentity = 53 [(wire_in) = true];
MessageType_SignedIdentity = 54 [(wire_out) = true];
MessageType_GetFeatures = 55 [(wire_in) = true];
MessageType_EthereumGetAddress = 56 [(wire_in) = true];
MessageType_EthereumAddress = 57 [(wire_out) = true];
MessageType_EthereumSignTx = 58 [(wire_in) = true];
MessageType_EthereumTxRequest = 59 [(wire_out) = true];
MessageType_EthereumTxAck = 60 [(wire_in) = true];
MessageType_GetECDHSessionKey = 61 [(wire_in) = true];
MessageType_ECDHSessionKey = 62 [(wire_out) = true];
MessageType_SetU2FCounter = 63 [(wire_in) = true];
MessageType_EthereumSignMessage = 64 [(wire_in) = true];
MessageType_EthereumVerifyMessage = 65 [(wire_in) = true];
MessageType_EthereumMessageSignature = 66 [(wire_out) = true];
MessageType_DebugLinkDecision = 100 [(wire_debug_in) = true, (wire_tiny) = true];
MessageType_DebugLinkGetState = 101 [(wire_debug_in) = true];
MessageType_DebugLinkState = 102 [(wire_debug_out) = true];
MessageType_DebugLinkStop = 103 [(wire_debug_in) = true];
MessageType_DebugLinkLog = 104 [(wire_debug_out) = true];
MessageType_DebugLinkMemoryRead = 110 [(wire_debug_in) = true];
MessageType_DebugLinkMemory = 111 [(wire_debug_out) = true];
MessageType_DebugLinkMemoryWrite = 112 [(wire_debug_in) = true];
MessageType_DebugLinkFlashErase = 113 [(wire_debug_in) = true];
}
////////////////////
// Basic messages //
////////////////////
/**
* Request: Reset device to default state and ask for device details
* @next Features
*/
message Initialize {
}
/**
* Request: Ask for device details (no device reset)
* @next Features
*/
message GetFeatures {
}
/**
* Response: Reports various information about the device
* @prev Initialize
* @prev GetFeatures
*/
message Features {
optional string vendor = 1; // name of the manufacturer, e.g. "bitcointrezor.com"
optional uint32 major_version = 2; // major version of the device, e.g. 1
optional uint32 minor_version = 3; // minor version of the device, e.g. 0
optional uint32 patch_version = 4; // patch version of the device, e.g. 0
optional bool bootloader_mode = 5; // is device in bootloader mode?
optional string device_id = 6; // device's unique identifier
optional bool pin_protection = 7; // is device protected by PIN?
optional bool passphrase_protection = 8; // is node/mnemonic encrypted using passphrase?
optional string language = 9; // device language
optional string label = 10; // device description label
repeated CoinType coins = 11; // supported coins
optional bool initialized = 12; // does device contain seed?
optional bytes revision = 13; // SCM revision of firmware
optional bytes bootloader_hash = 14; // hash of the bootloader
optional bool imported = 15; // was storage imported from an external source?
optional bool pin_cached = 16; // is PIN already cached in session?
optional bool passphrase_cached = 17; // is passphrase already cached in session?
optional bool firmware_present = 18; // is valid firmware loaded?
optional bool needs_backup = 19; // does storage need backup? (equals to Storage.needs_backup)
optional uint32 flags = 20; // device flags (equals to Storage.flags)
}
/**
* Request: clear session (removes cached PIN, passphrase, etc).
* @next Success
*/
message ClearSession {
}
/**
* Request: change language and/or label of the device
* @next Success
* @next Failure
* @next ButtonRequest
* @next PinMatrixRequest
*/
message ApplySettings {
optional string language = 1;
optional string label = 2;
optional bool use_passphrase = 3;
optional bytes homescreen = 4;
}
/**
* Request: set flags of the device
* @next Success
* @next Failure
*/
message ApplyFlags {
optional uint32 flags = 1; // bitmask, can only set bits, not unset
}
/**
* Request: Starts workflow for setting/changing/removing the PIN
* @next ButtonRequest
* @next PinMatrixRequest
*/
message ChangePin {
optional bool remove = 1; // is PIN removal requested?
}
/**
* Request: Test if the device is alive, device sends back the message in Success response
* @next Success
*/
message Ping {
optional string message = 1; // message to send back in Success message
optional bool button_protection = 2; // ask for button press
optional bool pin_protection = 3; // ask for PIN if set in device
optional bool passphrase_protection = 4; // ask for passphrase if set in device
}
/**
* Response: Success of the previous request
*/
message Success {
optional string message = 1; // human readable description of action or request-specific payload
}
/**
* Response: Failure of the previous request
*/
message Failure {
optional FailureType code = 1; // computer-readable definition of the error state
optional string message = 2; // human-readable message of the error state
}
/**
* Response: Device is waiting for HW button press.
* @next ButtonAck
* @next Cancel
*/
message ButtonRequest {
optional ButtonRequestType code = 1;
optional string data = 2;
}
/**
* Request: Computer agrees to wait for HW button press
* @prev ButtonRequest
*/
message ButtonAck {
}
/**
* Response: Device is asking computer to show PIN matrix and awaits PIN encoded using this matrix scheme
* @next PinMatrixAck
* @next Cancel
*/
message PinMatrixRequest {
optional PinMatrixRequestType type = 1;
}
/**
* Request: Computer responds with encoded PIN
* @prev PinMatrixRequest
*/
message PinMatrixAck {
required string pin = 1; // matrix encoded PIN entered by user
}
/**
* Request: Abort last operation that required user interaction
* @prev ButtonRequest
* @prev PinMatrixRequest
* @prev PassphraseRequest
*/
message Cancel {
}
/**
* Response: Device awaits encryption passphrase
* @next PassphraseAck
* @next Cancel
*/
message PassphraseRequest {
}
/**
* Request: Send passphrase back
* @prev PassphraseRequest
*/
message PassphraseAck {
required string passphrase = 1;
}
/**
* Request: Request a sample of random data generated by hardware RNG. May be used for testing.
* @next ButtonRequest
* @next Entropy
* @next Failure
*/
message GetEntropy {
required uint32 size = 1; // size of requested entropy
}
/**
* Response: Reply with random data generated by internal RNG
* @prev GetEntropy
*/
message Entropy {
required bytes entropy = 1; // stream of random generated bytes
}
/**
* Request: Ask device for public key corresponding to address_n path
* @next PassphraseRequest
* @next PublicKey
* @next Failure
*/
message GetPublicKey {
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
optional string ecdsa_curve_name = 2; // ECDSA curve name to use
optional bool show_display = 3; // optionally show on display before sending the result
optional string coin_name = 4 [default='Bitcoin'];
}
/**
* Response: Contains public key derived from device private seed
* @prev GetPublicKey
*/
message PublicKey {
required HDNodeType node = 1; // BIP32 public node
optional string xpub = 2; // serialized form of public node
}
/**
* Request: Ask device for address corresponding to address_n path
* @next PassphraseRequest
* @next Address
* @next Failure
*/
message GetAddress {
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
optional string coin_name = 2 [default='Bitcoin'];
optional bool show_display = 3 ; // optionally show on display before sending the result
optional MultisigRedeemScriptType multisig = 4; // filled if we are showing a multisig address
optional InputScriptType script_type = 5 [default=SPENDADDRESS]; // used to distinguish between various address formats (non-segwit, segwit, etc.)
}
/**
* Request: Ask device for Ethereum address corresponding to address_n path
* @next PassphraseRequest
* @next EthereumAddress
* @next Failure
*/
message EthereumGetAddress {
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
optional bool show_display = 2; // optionally show on display before sending the result
}
/**
* Response: Contains address derived from device private seed
* @prev GetAddress
*/
message Address {
required string address = 1; // Coin address in Base58 encoding
}
/**
* Response: Contains an Ethereum address derived from device private seed
* @prev EthereumGetAddress
*/
message EthereumAddress {
required bytes address = 1; // Coin address as an Ethereum 160 bit hash
}
/**
* Request: Request device to wipe all sensitive data and settings
* @next ButtonRequest
*/
message WipeDevice {
}
/**
* Request: Load seed and related internal settings from the computer
* @next ButtonRequest
* @next Success
* @next Failure
*/
message LoadDevice {
optional string mnemonic = 1; // seed encoded as BIP-39 mnemonic (12, 18 or 24 words)
optional HDNodeType node = 2; // BIP-32 node
optional string pin = 3; // set PIN protection
optional bool passphrase_protection = 4; // enable master node encryption using passphrase
optional string language = 5 [default='english']; // device language
optional string label = 6; // device label
optional bool skip_checksum = 7; // do not test mnemonic for valid BIP-39 checksum
optional uint32 u2f_counter = 8; // U2F counter
}
/**
* Request: Ask device to do initialization involving user interaction
* @next EntropyRequest
* @next Failure
*/
message ResetDevice {
optional bool display_random = 1; // display entropy generated by the device before asking for additional entropy
optional uint32 strength = 2 [default=256]; // strength of seed in bits
optional bool passphrase_protection = 3; // enable master node encryption using passphrase
optional bool pin_protection = 4; // enable PIN protection
optional string language = 5 [default='english']; // device language
optional string label = 6; // device label
optional uint32 u2f_counter = 7; // U2F counter
optional bool skip_backup = 8; // postpone seed backup to BackupDevice workflow
}
/**
* Request: Perform backup of the device seed if not backed up using ResetDevice
* @next ButtonRequest
*/
message BackupDevice {
}
/**
* Response: Ask for additional entropy from host computer
* @prev ResetDevice
* @next EntropyAck
*/
message EntropyRequest {
}
/**
* Request: Provide additional entropy for seed generation function
* @prev EntropyRequest
* @next ButtonRequest
*/
message EntropyAck {
optional bytes entropy = 1; // 256 bits (32 bytes) of random data
}
/**
* Request: Start recovery workflow asking user for specific words of mnemonic
* Used to recovery device safely even on untrusted computer.
* @next WordRequest
*/
message RecoveryDevice {
optional uint32 word_count = 1; // number of words in BIP-39 mnemonic
optional bool passphrase_protection = 2; // enable master node encryption using passphrase
optional bool pin_protection = 3; // enable PIN protection
optional string language = 4 [default='english']; // device language
optional string label = 5; // device label
optional bool enforce_wordlist = 6; // enforce BIP-39 wordlist during the process
// 7 reserved for unused recovery method
optional uint32 type = 8; // supported recovery type (see RecoveryType)
optional uint32 u2f_counter = 9; // U2F counter
optional bool dry_run = 10; // perform dry-run recovery workflow (for safe mnemonic validation)
}
/**
* Response: Device is waiting for user to enter word of the mnemonic
* Its position is shown only on device's internal display.
* @prev RecoveryDevice
* @prev WordAck
*/
message WordRequest {
optional WordRequestType type = 1;
}
/**
* Request: Computer replies with word from the mnemonic
* @prev WordRequest
* @next WordRequest
* @next Success
* @next Failure
*/
message WordAck {
required string word = 1; // one word of mnemonic on asked position
}
//////////////////////////////
// Message signing messages //
//////////////////////////////
/**
* Request: Ask device to sign message
* @next MessageSignature
* @next Failure
*/
message SignMessage {
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
required bytes message = 2; // message to be signed
optional string coin_name = 3 [default='Bitcoin']; // coin to use for signing
optional InputScriptType script_type = 4 [default=SPENDADDRESS]; // used to distinguish between various address formats (non-segwit, segwit, etc.)
}
/**
* Request: Ask device to verify message
* @next Success
* @next Failure
*/
message VerifyMessage {
optional string address = 1; // address to verify
optional bytes signature = 2; // signature to verify
optional bytes message = 3; // message to verify
optional string coin_name = 4 [default='Bitcoin']; // coin to use for verifying
}
/**
* Response: Signed message
* @prev SignMessage
*/
message MessageSignature {
optional string address = 1; // address used to sign the message
optional bytes signature = 2; // signature of the message
}
///////////////////////////
// Encryption/decryption //
///////////////////////////
/**
* Request: Ask device to encrypt message
* @next EncryptedMessage
* @next Failure
*/
message EncryptMessage {
optional bytes pubkey = 1; // public key
optional bytes message = 2; // message to encrypt
optional bool display_only = 3; // show just on display? (don't send back via wire)
repeated uint32 address_n = 4; // BIP-32 path to derive the signing key from master node
optional string coin_name = 5 [default='Bitcoin']; // coin to use for signing
}
/**
* Response: Encrypted message
* @prev EncryptMessage
*/
message EncryptedMessage {
optional bytes nonce = 1; // nonce used during encryption
optional bytes message = 2; // encrypted message
optional bytes hmac = 3; // message hmac
}
/**
* Request: Ask device to decrypt message
* @next Success
* @next Failure
*/
message DecryptMessage {
repeated uint32 address_n = 1; // BIP-32 path to derive the decryption key from master node
optional bytes nonce = 2; // nonce used during encryption
optional bytes message = 3; // message to decrypt
optional bytes hmac = 4; // message hmac
}
/**
* Response: Decrypted message
* @prev DecryptedMessage
*/
message DecryptedMessage {
optional bytes message = 1; // decrypted message
optional string address = 2; // address used to sign the message (if used)
}
/**
* Request: Ask device to encrypt or decrypt value of given key
* @next CipheredKeyValue
* @next Failure
*/
message CipherKeyValue {
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
optional string key = 2; // key component of key:value
optional bytes value = 3; // value component of key:value
optional bool encrypt = 4; // are we encrypting (True) or decrypting (False)?
optional bool ask_on_encrypt = 5; // should we ask on encrypt operation?
optional bool ask_on_decrypt = 6; // should we ask on decrypt operation?
optional bytes iv = 7; // initialization vector (will be computed if not set)
}
/**
* Response: Return ciphered/deciphered value
* @prev CipherKeyValue
*/
message CipheredKeyValue {
optional bytes value = 1; // ciphered/deciphered value
}
//////////////////////////////////
// Transaction signing messages //
//////////////////////////////////
/**
* Request: Estimated size of the transaction
* This behaves exactly like SignTx, which means that it can ask using TxRequest
* This call is non-blocking (except possible PassphraseRequest to unlock the seed)
* @next TxSize
* @next Failure
*/
message EstimateTxSize {
required uint32 outputs_count = 1; // number of transaction outputs
required uint32 inputs_count = 2; // number of transaction inputs
optional string coin_name = 3 [default='Bitcoin']; // coin to use
}
/**
* Response: Estimated size of the transaction
* @prev EstimateTxSize
*/
message TxSize {
optional uint32 tx_size = 1; // estimated size of transaction in bytes
}
/**
* Request: Ask device to sign transaction
* @next PassphraseRequest
* @next PinMatrixRequest
* @next TxRequest
* @next Failure
*/
message SignTx {
required uint32 outputs_count = 1; // number of transaction outputs
required uint32 inputs_count = 2; // number of transaction inputs
optional string coin_name = 3 [default='Bitcoin']; // coin to use
optional uint32 version = 4 [default=1]; // transaction version
optional uint32 lock_time = 5 [default=0]; // transaction lock_time
}
/**
* Request: Simplified transaction signing
* This method doesn't support streaming, so there are hardware limits in number of inputs and outputs.
* In case of success, the result is returned using TxRequest message.
* @next PassphraseRequest
* @next PinMatrixRequest
* @next TxRequest
* @next Failure
*/
message SimpleSignTx {
repeated TxInputType inputs = 1; // transaction inputs
repeated TxOutputType outputs = 2; // transaction outputs
repeated TransactionType transactions = 3; // transactions whose outputs are used to build current inputs
optional string coin_name = 4 [default='Bitcoin']; // coin to use
optional uint32 version = 5 [default=1]; // transaction version
optional uint32 lock_time = 6 [default=0]; // transaction lock_time
}
/**
* Response: Device asks for information for signing transaction or returns the last result
* If request_index is set, device awaits TxAck message (with fields filled in according to request_type)
* If signature_index is set, 'signature' contains signed input of signature_index's input
* @prev SignTx
* @prev SimpleSignTx
* @prev TxAck
*/
message TxRequest {
optional RequestType request_type = 1; // what should be filled in TxAck message?
optional TxRequestDetailsType details = 2; // request for tx details
optional TxRequestSerializedType serialized = 3; // serialized data and request for next
}
/**
* Request: Reported transaction data
* @prev TxRequest
* @next TxRequest
*/
message TxAck {
optional TransactionType tx = 1;
}
/**
* Request: Ask device to sign transaction
* All fields are optional from the protocol's point of view. Each field defaults to value `0` if missing.
* Note: the first at most 1024 bytes of data MUST be transmitted as part of this message.
* @next PassphraseRequest
* @next PinMatrixRequest
* @next EthereumTxRequest
* @next Failure
*/
message EthereumSignTx {
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
optional bytes nonce = 2; // <=256 bit unsigned big endian
optional bytes gas_price = 3; // <=256 bit unsigned big endian (in wei)
optional bytes gas_limit = 4; // <=256 bit unsigned big endian
optional bytes to = 5; // 160 bit address hash
optional bytes value = 6; // <=256 bit unsigned big endian (in wei)
optional bytes data_initial_chunk = 7; // The initial data chunk (<= 1024 bytes)
optional uint32 data_length = 8; // Length of transaction payload
optional uint32 chain_id = 9; // Chain Id for EIP 155
}
/**
* Response: Device asks for more data from transaction payload, or returns the signature.
* If data_length is set, device awaits that many more bytes of payload.
* Otherwise, the signature_* fields contain the computed transaction signature. All three fields will be present.
* @prev EthereumSignTx
* @next EthereumTxAck
*/
message EthereumTxRequest {
optional uint32 data_length = 1; // Number of bytes being requested (<= 1024)
optional uint32 signature_v = 2; // Computed signature (recovery parameter, limited to 27 or 28)
optional bytes signature_r = 3; // Computed signature R component (256 bit)
optional bytes signature_s = 4; // Computed signature S component (256 bit)
}
/**
* Request: Transaction payload data.
* @prev EthereumTxRequest
* @next EthereumTxRequest
*/
message EthereumTxAck {
optional bytes data_chunk = 1; // Bytes from transaction payload (<= 1024 bytes)
}
////////////////////////////////////////
// Ethereum: Message signing messages //
////////////////////////////////////////
/**
* Request: Ask device to sign message
* @next EthereumMessageSignature
* @next Failure
*/
message EthereumSignMessage {
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
required bytes message = 2; // message to be signed
}
/**
* Request: Ask device to verify message
* @next Success
* @next Failure
*/
message EthereumVerifyMessage {
optional bytes address = 1; // address to verify
optional bytes signature = 2; // signature to verify
optional bytes message = 3; // message to verify
}
/**
* Response: Signed message
* @prev EthereumSignMessage
*/
message EthereumMessageSignature {
optional bytes address = 1; // address used to sign the message
optional bytes signature = 2; // signature of the message
}
///////////////////////
// Identity messages //
///////////////////////
/**
* Request: Ask device to sign identity
* @next SignedIdentity
* @next Failure
*/
message SignIdentity {
optional IdentityType identity = 1; // identity
optional bytes challenge_hidden = 2; // non-visible challenge
optional string challenge_visual = 3; // challenge shown on display (e.g. date+time)
optional string ecdsa_curve_name = 4; // ECDSA curve name to use
}
/**
* Response: Device provides signed identity
* @prev SignIdentity
*/
message SignedIdentity {
optional string address = 1; // identity address
optional bytes public_key = 2; // identity public key
optional bytes signature = 3; // signature of the identity data
}
///////////////////
// ECDH messages //
///////////////////
/**
* Request: Ask device to generate ECDH session key
* @next ECDHSessionKey
* @next Failure
*/
message GetECDHSessionKey {
optional IdentityType identity = 1; // identity
optional bytes peer_public_key = 2; // peer's public key
optional string ecdsa_curve_name = 3; // ECDSA curve name to use
}
/**
* Response: Device provides ECDH session key
* @prev GetECDHSessionKey
*/
message ECDHSessionKey {
optional bytes session_key = 1; // ECDH session key
}
///////////////////
// U2F messages //
///////////////////
/**
* Request: Set U2F counter
* @next Success
*/
message SetU2FCounter {
optional uint32 u2f_counter = 1; // counter
}
/////////////////////////
// Bootloader messages //
/////////////////////////
/**
* Request: Ask device to erase its firmware (so it can be replaced via FirmwareUpload)
* @next Success
* @next FirmwareRequest
* @next Failure
*/
message FirmwareErase {
optional uint32 length = 1; // length of new firmware
}
/**
* Response: Ask for firmware chunk
* @next FirmwareUpload
*/
message FirmwareRequest {
optional uint32 offset = 1; // offset of requested firmware chunk
optional uint32 length = 2; // length of requested firmware chunk
}
/**
* Request: Send firmware in binary form to the device
* @next Success
* @next Failure
*/
message FirmwareUpload {
required bytes payload = 1; // firmware to be loaded into device
optional bytes hash = 2; // hash of the payload
}
/**
* Request: Perform a device self-test
* @next Success
* @next Failure
*/
message SelfTest {
optional bytes payload = 1; // payload to be used in self-test
}
/////////////////////////////////////////////////////////////
// Debug messages (only available if DebugLink is enabled) //
/////////////////////////////////////////////////////////////
/**
* Request: "Press" the button on the device
* @next Success
*/
message DebugLinkDecision {
required bool yes_no = 1; // true for "Confirm", false for "Cancel"
}
/**
* Request: Computer asks for device state
* @next DebugLinkState
*/
message DebugLinkGetState {
}
/**
* Response: Device current state
* @prev DebugLinkGetState
*/
message DebugLinkState {
optional bytes layout = 1; // raw buffer of display
optional string pin = 2; // current PIN, blank if PIN is not set/enabled
optional string matrix = 3; // current PIN matrix
optional string mnemonic = 4; // current BIP-39 mnemonic
optional HDNodeType node = 5; // current BIP-32 node
optional bool passphrase_protection = 6; // is node/mnemonic encrypted using passphrase?
optional string reset_word = 7; // word on device display during ResetDevice workflow
optional bytes reset_entropy = 8; // current entropy during ResetDevice workflow
optional string recovery_fake_word = 9; // (fake) word on display during RecoveryDevice workflow
optional uint32 recovery_word_pos = 10; // index of mnemonic word the device is expecting during RecoveryDevice workflow
}
/**
* Request: Ask device to restart
*/
message DebugLinkStop {
}
/**
* Response: Device wants host to log event
*/
message DebugLinkLog {
optional uint32 level = 1;
optional string bucket = 2;
optional string text = 3;
}
/**
* Request: Read memory from device
* @next DebugLinkMemory
*/
message DebugLinkMemoryRead {
optional uint32 address = 1;
optional uint32 length = 2;
}
/**
* Response: Device sends memory back
* @prev DebugLinkMemoryRead
*/
message DebugLinkMemory {
optional bytes memory = 1;
}
/**
* Request: Write memory to device.
* WARNING: Writing to the wrong location can irreparably break the device.
*/
message DebugLinkMemoryWrite {
optional uint32 address = 1;
optional bytes memory = 2;
optional bool flash = 3;
}
/**
* Request: Erase block of flash on device
* WARNING: Writing to the wrong location can irreparably break the device.
*/
message DebugLinkFlashErase {
optional uint32 sector = 1;
}

View File

@ -0,0 +1,46 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// This file contains the implementation for interacting with the Trezor hardware
// wallets. The wire protocol spec can be found on the SatoshiLabs website:
// https://doc.satoshilabs.com/trezor-tech/api-protobuf.html
//go:generate protoc --go_out=Mgoogle/protobuf/descriptor.proto=github.com/golang/protobuf/protoc-gen-go/descriptor,import_path=trezor:. types.proto messages.proto
// Package trezor contains the wire protocol wrapper in Go.
package trezor
import (
"reflect"
"github.com/golang/protobuf/proto"
)
// Type returns the protocol buffer type number of a specific message. If the
// message is nil, this method panics!
func Type(msg proto.Message) uint16 {
return uint16(MessageType_value["MessageType_"+reflect.TypeOf(msg).Elem().Name()])
}
// Name returns the friendly message type name of a specific protocol buffer
// type numbers.
func Name(kind uint16) string {
name := MessageType_name[int32(kind)]
if len(name) < 12 {
return name
}
return name[12:]
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,276 @@
// This file originates from the SatoshiLabs Trezor `common` repository at:
// https://github.com/trezor/trezor-common/blob/master/protob/types.proto
// dated 28.07.2017, commit dd8ec3231fb5f7992360aff9bdfe30bb58130f4b.
/**
* Types for TREZOR communication
*
* @author Marek Palatinus <slush@satoshilabs.com>
* @version 1.2
*/
// Sugar for easier handling in Java
option java_package = "com.satoshilabs.trezor.lib.protobuf";
option java_outer_classname = "TrezorType";
import "google/protobuf/descriptor.proto";
/**
* Options for specifying message direction and type of wire (normal/debug)
*/
extend google.protobuf.EnumValueOptions {
optional bool wire_in = 50002; // message can be transmitted via wire from PC to TREZOR
optional bool wire_out = 50003; // message can be transmitted via wire from TREZOR to PC
optional bool wire_debug_in = 50004; // message can be transmitted via debug wire from PC to TREZOR
optional bool wire_debug_out = 50005; // message can be transmitted via debug wire from TREZOR to PC
optional bool wire_tiny = 50006; // message is handled by TREZOR when the USB stack is in tiny mode
optional bool wire_bootloader = 50007; // message is only handled by TREZOR Bootloader
}
/**
* Type of failures returned by Failure message
* @used_in Failure
*/
enum FailureType {
Failure_UnexpectedMessage = 1;
Failure_ButtonExpected = 2;
Failure_DataError = 3;
Failure_ActionCancelled = 4;
Failure_PinExpected = 5;
Failure_PinCancelled = 6;
Failure_PinInvalid = 7;
Failure_InvalidSignature = 8;
Failure_ProcessError = 9;
Failure_NotEnoughFunds = 10;
Failure_NotInitialized = 11;
Failure_FirmwareError = 99;
}
/**
* Type of script which will be used for transaction output
* @used_in TxOutputType
*/
enum OutputScriptType {
PAYTOADDRESS = 0; // used for all addresses (bitcoin, p2sh, witness)
PAYTOSCRIPTHASH = 1; // p2sh address (deprecated; use PAYTOADDRESS)
PAYTOMULTISIG = 2; // only for change output
PAYTOOPRETURN = 3; // op_return
PAYTOWITNESS = 4; // only for change output
PAYTOP2SHWITNESS = 5; // only for change output
}
/**
* Type of script which will be used for transaction output
* @used_in TxInputType
*/
enum InputScriptType {
SPENDADDRESS = 0; // standard p2pkh address
SPENDMULTISIG = 1; // p2sh multisig address
EXTERNAL = 2; // reserved for external inputs (coinjoin)
SPENDWITNESS = 3; // native segwit
SPENDP2SHWITNESS = 4; // segwit over p2sh (backward compatible)
}
/**
* Type of information required by transaction signing process
* @used_in TxRequest
*/
enum RequestType {
TXINPUT = 0;
TXOUTPUT = 1;
TXMETA = 2;
TXFINISHED = 3;
TXEXTRADATA = 4;
}
/**
* Type of button request
* @used_in ButtonRequest
*/
enum ButtonRequestType {
ButtonRequest_Other = 1;
ButtonRequest_FeeOverThreshold = 2;
ButtonRequest_ConfirmOutput = 3;
ButtonRequest_ResetDevice = 4;
ButtonRequest_ConfirmWord = 5;
ButtonRequest_WipeDevice = 6;
ButtonRequest_ProtectCall = 7;
ButtonRequest_SignTx = 8;
ButtonRequest_FirmwareCheck = 9;
ButtonRequest_Address = 10;
ButtonRequest_PublicKey = 11;
}
/**
* Type of PIN request
* @used_in PinMatrixRequest
*/
enum PinMatrixRequestType {
PinMatrixRequestType_Current = 1;
PinMatrixRequestType_NewFirst = 2;
PinMatrixRequestType_NewSecond = 3;
}
/**
* Type of recovery procedure. These should be used as bitmask, e.g.,
* `RecoveryDeviceType_ScrambledWords | RecoveryDeviceType_Matrix`
* listing every method supported by the host computer.
*
* Note that ScrambledWords must be supported by every implementation
* for backward compatibility; there is no way to not support it.
*
* @used_in RecoveryDevice
*/
enum RecoveryDeviceType {
// use powers of two when extending this field
RecoveryDeviceType_ScrambledWords = 0; // words in scrambled order
RecoveryDeviceType_Matrix = 1; // matrix recovery type
}
/**
* Type of Recovery Word request
* @used_in WordRequest
*/
enum WordRequestType {
WordRequestType_Plain = 0;
WordRequestType_Matrix9 = 1;
WordRequestType_Matrix6 = 2;
}
/**
* Structure representing BIP32 (hierarchical deterministic) node
* Used for imports of private key into the device and exporting public key out of device
* @used_in PublicKey
* @used_in LoadDevice
* @used_in DebugLinkState
* @used_in Storage
*/
message HDNodeType {
required uint32 depth = 1;
required uint32 fingerprint = 2;
required uint32 child_num = 3;
required bytes chain_code = 4;
optional bytes private_key = 5;
optional bytes public_key = 6;
}
message HDNodePathType {
required HDNodeType node = 1; // BIP-32 node in deserialized form
repeated uint32 address_n = 2; // BIP-32 path to derive the key from node
}
/**
* Structure representing Coin
* @used_in Features
*/
message CoinType {
optional string coin_name = 1;
optional string coin_shortcut = 2;
optional uint32 address_type = 3 [default=0];
optional uint64 maxfee_kb = 4;
optional uint32 address_type_p2sh = 5 [default=5];
optional string signed_message_header = 8;
optional uint32 xpub_magic = 9 [default=76067358]; // default=0x0488b21e
optional uint32 xprv_magic = 10 [default=76066276]; // default=0x0488ade4
optional bool segwit = 11;
optional uint32 forkid = 12;
}
/**
* Type of redeem script used in input
* @used_in TxInputType
*/
message MultisigRedeemScriptType {
repeated HDNodePathType pubkeys = 1; // pubkeys from multisig address (sorted lexicographically)
repeated bytes signatures = 2; // existing signatures for partially signed input
optional uint32 m = 3; // "m" from n, how many valid signatures is necessary for spending
}
/**
* Structure representing transaction input
* @used_in SimpleSignTx
* @used_in TransactionType
*/
message TxInputType {
repeated uint32 address_n = 1; // BIP-32 path to derive the key from master node
required bytes prev_hash = 2; // hash of previous transaction output to spend by this input
required uint32 prev_index = 3; // index of previous output to spend
optional bytes script_sig = 4; // script signature, unset for tx to sign
optional uint32 sequence = 5 [default=4294967295]; // sequence (default=0xffffffff)
optional InputScriptType script_type = 6 [default=SPENDADDRESS]; // defines template of input script
optional MultisigRedeemScriptType multisig = 7; // Filled if input is going to spend multisig tx
optional uint64 amount = 8; // amount of previous transaction output (for segwit only)
}
/**
* Structure representing transaction output
* @used_in SimpleSignTx
* @used_in TransactionType
*/
message TxOutputType {
optional string address = 1; // target coin address in Base58 encoding
repeated uint32 address_n = 2; // BIP-32 path to derive the key from master node; has higher priority than "address"
required uint64 amount = 3; // amount to spend in satoshis
required OutputScriptType script_type = 4; // output script type
optional MultisigRedeemScriptType multisig = 5; // defines multisig address; script_type must be PAYTOMULTISIG
optional bytes op_return_data = 6; // defines op_return data; script_type must be PAYTOOPRETURN, amount must be 0
}
/**
* Structure representing compiled transaction output
* @used_in TransactionType
*/
message TxOutputBinType {
required uint64 amount = 1;
required bytes script_pubkey = 2;
}
/**
* Structure representing transaction
* @used_in SimpleSignTx
*/
message TransactionType {
optional uint32 version = 1;
repeated TxInputType inputs = 2;
repeated TxOutputBinType bin_outputs = 3;
repeated TxOutputType outputs = 5;
optional uint32 lock_time = 4;
optional uint32 inputs_cnt = 6;
optional uint32 outputs_cnt = 7;
optional bytes extra_data = 8;
optional uint32 extra_data_len = 9;
}
/**
* Structure representing request details
* @used_in TxRequest
*/
message TxRequestDetailsType {
optional uint32 request_index = 1; // device expects TxAck message from the computer
optional bytes tx_hash = 2; // tx_hash of requested transaction
optional uint32 extra_data_len = 3; // length of requested extra data
optional uint32 extra_data_offset = 4; // offset of requested extra data
}
/**
* Structure representing serialized data
* @used_in TxRequest
*/
message TxRequestSerializedType {
optional uint32 signature_index = 1; // 'signature' field contains signed input of this index
optional bytes signature = 2; // signature of the signature_index input
optional bytes serialized_tx = 3; // part of serialized and signed transaction
}
/**
* Structure representing identity data
* @used_in IdentityType
*/
message IdentityType {
optional string proto = 1; // proto part of URI
optional string user = 2; // user part of URI
optional string host = 3; // host part of URI
optional string port = 4; // port part of URI
optional string path = 5; // path part of URI
optional uint32 index = 6 [default=0]; // identity index
}

View File

@ -0,0 +1,464 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// This file contains the implementation for interacting with the Ledger hardware
// wallets. The wire protocol spec can be found in the Ledger Blue GitHub repo:
// https://raw.githubusercontent.com/LedgerHQ/blue-app-eth/master/doc/ethapp.asc
package usbwallet
import (
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
"math/big"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
)
// ledgerOpcode is an enumeration encoding the supported Ledger opcodes.
type ledgerOpcode byte
// ledgerParam1 is an enumeration encoding the supported Ledger parameters for
// specific opcodes. The same parameter values may be reused between opcodes.
type ledgerParam1 byte
// ledgerParam2 is an enumeration encoding the supported Ledger parameters for
// specific opcodes. The same parameter values may be reused between opcodes.
type ledgerParam2 byte
const (
ledgerOpRetrieveAddress ledgerOpcode = 0x02 // Returns the public key and Ethereum address for a given BIP 32 path
ledgerOpSignTransaction ledgerOpcode = 0x04 // Signs an Ethereum transaction after having the user validate the parameters
ledgerOpGetConfiguration ledgerOpcode = 0x06 // Returns specific wallet application configuration
ledgerP1DirectlyFetchAddress ledgerParam1 = 0x00 // Return address directly from the wallet
ledgerP1ConfirmFetchAddress ledgerParam1 = 0x01 // Require a user confirmation before returning the address
ledgerP1InitTransactionData ledgerParam1 = 0x00 // First transaction data block for signing
ledgerP1ContTransactionData ledgerParam1 = 0x80 // Subsequent transaction data block for signing
ledgerP2DiscardAddressChainCode ledgerParam2 = 0x00 // Do not return the chain code along with the address
ledgerP2ReturnAddressChainCode ledgerParam2 = 0x01 // Require a user confirmation before returning the address
)
// errLedgerReplyInvalidHeader is the error message returned by a Ledger data exchange
// if the device replies with a mismatching header. This usually means the device
// is in browser mode.
var errLedgerReplyInvalidHeader = errors.New("ledger: invalid reply header")
// errLedgerInvalidVersionReply is the error message returned by a Ledger version retrieval
// when a response does arrive, but it does not contain the expected data.
var errLedgerInvalidVersionReply = errors.New("ledger: invalid version reply")
// ledgerDriver implements the communication with a Ledger hardware wallet.
type ledgerDriver struct {
device io.ReadWriter // USB device connection to communicate through
version [3]byte // Current version of the Ledger firmware (zero if app is offline)
browser bool // Flag whether the Ledger is in browser mode (reply channel mismatch)
failure error // Any failure that would make the device unusable
log log.Logger // Contextual logger to tag the ledger with its id
}
// newLedgerDriver creates a new instance of a Ledger USB protocol driver.
func newLedgerDriver(logger log.Logger) driver {
return &ledgerDriver{
log: logger,
}
}
// Status implements usbwallet.driver, returning various states the Ledger can
// currently be in.
func (w *ledgerDriver) Status() (string, error) {
if w.failure != nil {
return fmt.Sprintf("Failed: %v", w.failure), w.failure
}
if w.browser {
return "Ethereum app in browser mode", w.failure
}
if w.offline() {
return "Ethereum app offline", w.failure
}
return fmt.Sprintf("Ethereum app v%d.%d.%d online", w.version[0], w.version[1], w.version[2]), w.failure
}
// offline returns whether the wallet and the Ethereum app is offline or not.
//
// The method assumes that the state lock is held!
func (w *ledgerDriver) offline() bool {
return w.version == [3]byte{0, 0, 0}
}
// Open implements usbwallet.driver, attempting to initialize the connection to the
// Ledger hardware wallet. The Ledger does not require a user passphrase, so that
// parameter is silently discarded.
func (w *ledgerDriver) Open(device io.ReadWriter, passphrase string) error {
w.device, w.failure = device, nil
_, err := w.ledgerDerive(accounts.DefaultBaseDerivationPath)
if err != nil {
// Ethereum app is not running or in browser mode, nothing more to do, return
if err == errLedgerReplyInvalidHeader {
w.browser = true
}
return nil
}
// Try to resolve the Ethereum app's version, will fail prior to v1.0.2
if w.version, err = w.ledgerVersion(); err != nil {
w.version = [3]byte{1, 0, 0} // Assume worst case, can't verify if v1.0.0 or v1.0.1
}
return nil
}
// Close implements usbwallet.driver, cleaning up and metadata maintained within
// the Ledger driver.
func (w *ledgerDriver) Close() error {
w.browser, w.version = false, [3]byte{}
return nil
}
// Heartbeat implements usbwallet.driver, performing a sanity check against the
// Ledger to see if it's still online.
func (w *ledgerDriver) Heartbeat() error {
if _, err := w.ledgerVersion(); err != nil && err != errLedgerInvalidVersionReply {
w.failure = err
return err
}
return nil
}
// Derive implements usbwallet.driver, sending a derivation request to the Ledger
// and returning the Ethereum address located on that derivation path.
func (w *ledgerDriver) Derive(path accounts.DerivationPath) (common.Address, error) {
return w.ledgerDerive(path)
}
// SignTx implements usbwallet.driver, sending the transaction to the Ledger and
// waiting for the user to confirm or deny the transaction.
//
// Note, if the version of the Ethereum application running on the Ledger wallet is
// too old to sign EIP-155 transactions, but such is requested nonetheless, an error
// will be returned opposed to silently signing in Homestead mode.
func (w *ledgerDriver) SignTx(path accounts.DerivationPath, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error) {
// If the Ethereum app doesn't run, abort
if w.offline() {
return common.Address{}, nil, accounts.ErrWalletClosed
}
// Ensure the wallet is capable of signing the given transaction
if chainID != nil && w.version[0] <= 1 && w.version[1] <= 0 && w.version[2] <= 2 {
return common.Address{}, nil, fmt.Errorf("Ledger v%d.%d.%d doesn't support signing this transaction, please update to v1.0.3 at least", w.version[0], w.version[1], w.version[2])
}
// All infos gathered and metadata checks out, request signing
return w.ledgerSign(path, tx, chainID)
}
// ledgerVersion retrieves the current version of the Ethereum wallet app running
// on the Ledger wallet.
//
// The version retrieval protocol is defined as follows:
//
// CLA | INS | P1 | P2 | Lc | Le
// ----+-----+----+----+----+---
// E0 | 06 | 00 | 00 | 00 | 04
//
// With no input data, and the output data being:
//
// Description | Length
// ---------------------------------------------------+--------
// Flags 01: arbitrary data signature enabled by user | 1 byte
// Application major version | 1 byte
// Application minor version | 1 byte
// Application patch version | 1 byte
func (w *ledgerDriver) ledgerVersion() ([3]byte, error) {
// Send the request and wait for the response
reply, err := w.ledgerExchange(ledgerOpGetConfiguration, 0, 0, nil)
if err != nil {
return [3]byte{}, err
}
if len(reply) != 4 {
return [3]byte{}, errLedgerInvalidVersionReply
}
// Cache the version for future reference
var version [3]byte
copy(version[:], reply[1:])
return version, nil
}
// ledgerDerive retrieves the currently active Ethereum address from a Ledger
// wallet at the specified derivation path.
//
// The address derivation protocol is defined as follows:
//
// CLA | INS | P1 | P2 | Lc | Le
// ----+-----+----+----+-----+---
// E0 | 02 | 00 return address
// 01 display address and confirm before returning
// | 00: do not return the chain code
// | 01: return the chain code
// | var | 00
//
// Where the input data is:
//
// Description | Length
// -------------------------------------------------+--------
// Number of BIP 32 derivations to perform (max 10) | 1 byte
// First derivation index (big endian) | 4 bytes
// ... | 4 bytes
// Last derivation index (big endian) | 4 bytes
//
// And the output data is:
//
// Description | Length
// ------------------------+-------------------
// Public Key length | 1 byte
// Uncompressed Public Key | arbitrary
// Ethereum address length | 1 byte
// Ethereum address | 40 bytes hex ascii
// Chain code if requested | 32 bytes
func (w *ledgerDriver) ledgerDerive(derivationPath []uint32) (common.Address, error) {
// Flatten the derivation path into the Ledger request
path := make([]byte, 1+4*len(derivationPath))
path[0] = byte(len(derivationPath))
for i, component := range derivationPath {
binary.BigEndian.PutUint32(path[1+4*i:], component)
}
// Send the request and wait for the response
reply, err := w.ledgerExchange(ledgerOpRetrieveAddress, ledgerP1DirectlyFetchAddress, ledgerP2DiscardAddressChainCode, path)
if err != nil {
return common.Address{}, err
}
// Discard the public key, we don't need that for now
if len(reply) < 1 || len(reply) < 1+int(reply[0]) {
return common.Address{}, errors.New("reply lacks public key entry")
}
reply = reply[1+int(reply[0]):]
// Extract the Ethereum hex address string
if len(reply) < 1 || len(reply) < 1+int(reply[0]) {
return common.Address{}, errors.New("reply lacks address entry")
}
hexstr := reply[1 : 1+int(reply[0])]
// Decode the hex sting into an Ethereum address and return
var address common.Address
hex.Decode(address[:], hexstr)
return address, nil
}
// ledgerSign sends the transaction to the Ledger wallet, and waits for the user
// to confirm or deny the transaction.
//
// The transaction signing protocol is defined as follows:
//
// CLA | INS | P1 | P2 | Lc | Le
// ----+-----+----+----+-----+---
// E0 | 04 | 00: first transaction data block
// 80: subsequent transaction data block
// | 00 | variable | variable
//
// Where the input for the first transaction block (first 255 bytes) is:
//
// Description | Length
// -------------------------------------------------+----------
// Number of BIP 32 derivations to perform (max 10) | 1 byte
// First derivation index (big endian) | 4 bytes
// ... | 4 bytes
// Last derivation index (big endian) | 4 bytes
// RLP transaction chunk | arbitrary
//
// And the input for subsequent transaction blocks (first 255 bytes) are:
//
// Description | Length
// ----------------------+----------
// RLP transaction chunk | arbitrary
//
// And the output data is:
//
// Description | Length
// ------------+---------
// signature V | 1 byte
// signature R | 32 bytes
// signature S | 32 bytes
func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error) {
// Flatten the derivation path into the Ledger request
path := make([]byte, 1+4*len(derivationPath))
path[0] = byte(len(derivationPath))
for i, component := range derivationPath {
binary.BigEndian.PutUint32(path[1+4*i:], component)
}
// Create the transaction RLP based on whether legacy or EIP155 signing was requeste
var (
txrlp []byte
err error
)
if chainID == nil {
if txrlp, err = rlp.EncodeToBytes([]interface{}{tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data()}); err != nil {
return common.Address{}, nil, err
}
} else {
if txrlp, err = rlp.EncodeToBytes([]interface{}{tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data(), chainID, big.NewInt(0), big.NewInt(0)}); err != nil {
return common.Address{}, nil, err
}
}
payload := append(path, txrlp...)
// Send the request and wait for the response
var (
op = ledgerP1InitTransactionData
reply []byte
)
for len(payload) > 0 {
// Calculate the size of the next data chunk
chunk := 255
if chunk > len(payload) {
chunk = len(payload)
}
// Send the chunk over, ensuring it's processed correctly
reply, err = w.ledgerExchange(ledgerOpSignTransaction, op, 0, payload[:chunk])
if err != nil {
return common.Address{}, nil, err
}
// Shift the payload and ensure subsequent chunks are marked as such
payload = payload[chunk:]
op = ledgerP1ContTransactionData
}
// Extract the Ethereum signature and do a sanity validation
if len(reply) != 65 {
return common.Address{}, nil, errors.New("reply lacks signature")
}
signature := append(reply[1:], reply[0])
// Create the correct signer and signature transform based on the chain ID
var signer types.Signer
if chainID == nil {
signer = new(types.HomesteadSigner)
} else {
signer = types.NewEIP155Signer(chainID)
signature[64] = signature[64] - byte(chainID.Uint64()*2+35)
}
signed, err := tx.WithSignature(signer, signature)
if err != nil {
return common.Address{}, nil, err
}
sender, err := types.Sender(signer, signed)
if err != nil {
return common.Address{}, nil, err
}
return sender, signed, nil
}
// ledgerExchange performs a data exchange with the Ledger wallet, sending it a
// message and retrieving the response.
//
// The common transport header is defined as follows:
//
// Description | Length
// --------------------------------------+----------
// Communication channel ID (big endian) | 2 bytes
// Command tag | 1 byte
// Packet sequence index (big endian) | 2 bytes
// Payload | arbitrary
//
// The Communication channel ID allows commands multiplexing over the same
// physical link. It is not used for the time being, and should be set to 0101
// to avoid compatibility issues with implementations ignoring a leading 00 byte.
//
// The Command tag describes the message content. Use TAG_APDU (0x05) for standard
// APDU payloads, or TAG_PING (0x02) for a simple link test.
//
// The Packet sequence index describes the current sequence for fragmented payloads.
// The first fragment index is 0x00.
//
// APDU Command payloads are encoded as follows:
//
// Description | Length
// -----------------------------------
// APDU length (big endian) | 2 bytes
// APDU CLA | 1 byte
// APDU INS | 1 byte
// APDU P1 | 1 byte
// APDU P2 | 1 byte
// APDU length | 1 byte
// Optional APDU data | arbitrary
func (w *ledgerDriver) ledgerExchange(opcode ledgerOpcode, p1 ledgerParam1, p2 ledgerParam2, data []byte) ([]byte, error) {
// Construct the message payload, possibly split into multiple chunks
apdu := make([]byte, 2, 7+len(data))
binary.BigEndian.PutUint16(apdu, uint16(5+len(data)))
apdu = append(apdu, []byte{0xe0, byte(opcode), byte(p1), byte(p2), byte(len(data))}...)
apdu = append(apdu, data...)
// Stream all the chunks to the device
header := []byte{0x01, 0x01, 0x05, 0x00, 0x00} // Channel ID and command tag appended
chunk := make([]byte, 64)
space := len(chunk) - len(header)
for i := 0; len(apdu) > 0; i++ {
// Construct the new message to stream
chunk = append(chunk[:0], header...)
binary.BigEndian.PutUint16(chunk[3:], uint16(i))
if len(apdu) > space {
chunk = append(chunk, apdu[:space]...)
apdu = apdu[space:]
} else {
chunk = append(chunk, apdu...)
apdu = nil
}
// Send over to the device
w.log.Trace("Data chunk sent to the Ledger", "chunk", hexutil.Bytes(chunk))
if _, err := w.device.Write(chunk); err != nil {
return nil, err
}
}
// Stream the reply back from the wallet in 64 byte chunks
var reply []byte
chunk = chunk[:64] // Yeah, we surely have enough space
for {
// Read the next chunk from the Ledger wallet
if _, err := io.ReadFull(w.device, chunk); err != nil {
return nil, err
}
w.log.Trace("Data chunk received from the Ledger", "chunk", hexutil.Bytes(chunk))
// Make sure the transport header matches
if chunk[0] != 0x01 || chunk[1] != 0x01 || chunk[2] != 0x05 {
return nil, errLedgerReplyInvalidHeader
}
// If it's the first chunk, retrieve the total message length
var payload []byte
if chunk[3] == 0x00 && chunk[4] == 0x00 {
reply = make([]byte, 0, int(binary.BigEndian.Uint16(chunk[5:7])))
payload = chunk[7:]
} else {
payload = chunk[5:]
}
// Append to the reply and stop when filled up
if left := cap(reply) - len(reply); left > len(payload) {
reply = append(reply, payload...)
} else {
reply = append(reply, payload[:left]...)
break
}
}
return reply[:len(reply)-2], nil
}

View File

@ -1,903 +0,0 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// This file contains the implementation for interacting with the Ledger hardware
// wallets. The wire protocol spec can be found in the Ledger Blue GitHub repo:
// https://raw.githubusercontent.com/LedgerHQ/blue-app-eth/master/doc/ethapp.asc
package usbwallet
import (
"context"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
"math/big"
"sync"
"time"
ethereum "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/karalabe/hid"
)
// Maximum time between wallet health checks to detect USB unplugs.
const ledgerHeartbeatCycle = time.Second
// Minimum time to wait between self derivation attempts, even it the user is
// requesting accounts like crazy.
const ledgerSelfDeriveThrottling = time.Second
// ledgerOpcode is an enumeration encoding the supported Ledger opcodes.
type ledgerOpcode byte
// ledgerParam1 is an enumeration encoding the supported Ledger parameters for
// specific opcodes. The same parameter values may be reused between opcodes.
type ledgerParam1 byte
// ledgerParam2 is an enumeration encoding the supported Ledger parameters for
// specific opcodes. The same parameter values may be reused between opcodes.
type ledgerParam2 byte
const (
ledgerOpRetrieveAddress ledgerOpcode = 0x02 // Returns the public key and Ethereum address for a given BIP 32 path
ledgerOpSignTransaction ledgerOpcode = 0x04 // Signs an Ethereum transaction after having the user validate the parameters
ledgerOpGetConfiguration ledgerOpcode = 0x06 // Returns specific wallet application configuration
ledgerP1DirectlyFetchAddress ledgerParam1 = 0x00 // Return address directly from the wallet
ledgerP1ConfirmFetchAddress ledgerParam1 = 0x01 // Require a user confirmation before returning the address
ledgerP1InitTransactionData ledgerParam1 = 0x00 // First transaction data block for signing
ledgerP1ContTransactionData ledgerParam1 = 0x80 // Subsequent transaction data block for signing
ledgerP2DiscardAddressChainCode ledgerParam2 = 0x00 // Do not return the chain code along with the address
ledgerP2ReturnAddressChainCode ledgerParam2 = 0x01 // Require a user confirmation before returning the address
)
// errReplyInvalidHeader is the error message returned by a Ledger data exchange
// if the device replies with a mismatching header. This usually means the device
// is in browser mode.
var errReplyInvalidHeader = errors.New("invalid reply header")
// errInvalidVersionReply is the error message returned by a Ledger version retrieval
// when a response does arrive, but it does not contain the expected data.
var errInvalidVersionReply = errors.New("invalid version reply")
// ledgerWallet represents a live USB Ledger hardware wallet.
type ledgerWallet struct {
hub *LedgerHub // USB hub the device originates from (TODO(karalabe): remove if hotplug lands on Windows)
url *accounts.URL // Textual URL uniquely identifying this wallet
info hid.DeviceInfo // Known USB device infos about the wallet
device *hid.Device // USB device advertising itself as a Ledger wallet
failure error // Any failure that would make the device unusable
version [3]byte // Current version of the Ledger Ethereum app (zero if app is offline)
browser bool // Flag whether the Ledger is in browser mode (reply channel mismatch)
accounts []accounts.Account // List of derive accounts pinned on the Ledger
paths map[common.Address]accounts.DerivationPath // Known derivation paths for signing operations
deriveNextPath accounts.DerivationPath // Next derivation path for account auto-discovery
deriveNextAddr common.Address // Next derived account address for auto-discovery
deriveChain ethereum.ChainStateReader // Blockchain state reader to discover used account with
deriveReq chan chan struct{} // Channel to request a self-derivation on
deriveQuit chan chan error // Channel to terminate the self-deriver with
healthQuit chan chan error
// Locking a hardware wallet is a bit special. Since hardware devices are lower
// performing, any communication with them might take a non negligible amount of
// time. Worse still, waiting for user confirmation can take arbitrarily long,
// but exclusive communication must be upheld during. Locking the entire wallet
// in the mean time however would stall any parts of the system that don't want
// to communicate, just read some state (e.g. list the accounts).
//
// As such, a hardware wallet needs two locks to function correctly. A state
// lock can be used to protect the wallet's software-side internal state, which
// must not be held exlusively during hardware communication. A communication
// lock can be used to achieve exclusive access to the device itself, this one
// however should allow "skipping" waiting for operations that might want to
// use the device, but can live without too (e.g. account self-derivation).
//
// Since we have two locks, it's important to know how to properly use them:
// - Communication requires the `device` to not change, so obtaining the
// commsLock should be done after having a stateLock.
// - Communication must not disable read access to the wallet state, so it
// must only ever hold a *read* lock to stateLock.
commsLock chan struct{} // Mutex (buf=1) for the USB comms without keeping the state locked
stateLock sync.RWMutex // Protects read and write access to the wallet struct fields
log log.Logger // Contextual logger to tag the ledger with its id
}
// URL implements accounts.Wallet, returning the URL of the Ledger device.
func (w *ledgerWallet) URL() accounts.URL {
return *w.url // Immutable, no need for a lock
}
// Status implements accounts.Wallet, always whether the Ledger is opened, closed
// or whether the Ethereum app was not started on it.
func (w *ledgerWallet) Status() string {
w.stateLock.RLock() // No device communication, state lock is enough
defer w.stateLock.RUnlock()
if w.failure != nil {
return fmt.Sprintf("Failed: %v", w.failure)
}
if w.device == nil {
return "Closed"
}
if w.browser {
return "Ethereum app in browser mode"
}
if w.offline() {
return "Ethereum app offline"
}
return fmt.Sprintf("Ethereum app v%d.%d.%d online", w.version[0], w.version[1], w.version[2])
}
// offline returns whether the wallet and the Ethereum app is offline or not.
//
// The method assumes that the state lock is held!
func (w *ledgerWallet) offline() bool {
return w.version == [3]byte{0, 0, 0}
}
// failed returns if the USB device wrapped by the wallet failed for some reason.
// This is used by the device scanner to report failed wallets as departed.
//
// The method assumes that the state lock is *not* held!
func (w *ledgerWallet) failed() bool {
w.stateLock.RLock() // No device communication, state lock is enough
defer w.stateLock.RUnlock()
return w.failure != nil
}
// Open implements accounts.Wallet, attempting to open a USB connection to the
// Ledger hardware wallet. The Ledger does not require a user passphrase, so that
// parameter is silently discarded.
func (w *ledgerWallet) Open(passphrase string) error {
w.stateLock.Lock() // State lock is enough since there's no connection yet at this point
defer w.stateLock.Unlock()
// If the wallet was already opened, don't try to open again
if w.device != nil {
return accounts.ErrWalletAlreadyOpen
}
// Otherwise iterate over all USB devices and find this again (no way to directly do this)
device, err := w.info.Open()
if err != nil {
return err
}
// Wallet seems to be successfully opened, guess if the Ethereum app is running
w.device = device
w.commsLock = make(chan struct{}, 1)
w.commsLock <- struct{}{} // Enable lock
w.paths = make(map[common.Address]accounts.DerivationPath)
w.deriveReq = make(chan chan struct{})
w.deriveQuit = make(chan chan error)
w.healthQuit = make(chan chan error)
defer func() {
go w.heartbeat()
go w.selfDerive()
}()
if _, err = w.ledgerDerive(accounts.DefaultBaseDerivationPath); err != nil {
// Ethereum app is not running or in browser mode, nothing more to do, return
if err == errReplyInvalidHeader {
w.browser = true
}
return nil
}
// Try to resolve the Ethereum app's version, will fail prior to v1.0.2
if w.version, err = w.ledgerVersion(); err != nil {
w.version = [3]byte{1, 0, 0} // Assume worst case, can't verify if v1.0.0 or v1.0.1
}
return nil
}
// heartbeat is a health check loop for the Ledger wallets to periodically verify
// whether they are still present or if they malfunctioned. It is needed because:
// - libusb on Windows doesn't support hotplug, so we can't detect USB unplugs
// - communication timeout on the Ledger requires a device power cycle to fix
func (w *ledgerWallet) heartbeat() {
w.log.Debug("Ledger health-check started")
defer w.log.Debug("Ledger health-check stopped")
// Execute heartbeat checks until termination or error
var (
errc chan error
err error
)
for errc == nil && err == nil {
// Wait until termination is requested or the heartbeat cycle arrives
select {
case errc = <-w.healthQuit:
// Termination requested
continue
case <-time.After(ledgerHeartbeatCycle):
// Heartbeat time
}
// Execute a tiny data exchange to see responsiveness
w.stateLock.RLock()
if w.device == nil {
// Terminated while waiting for the lock
w.stateLock.RUnlock()
continue
}
<-w.commsLock // Don't lock state while resolving version
_, err = w.ledgerVersion()
w.commsLock <- struct{}{}
w.stateLock.RUnlock()
if err != nil && err != errInvalidVersionReply {
w.stateLock.Lock() // Lock state to tear the wallet down
w.failure = err
w.close()
w.stateLock.Unlock()
}
// Ignore non hardware related errors
err = nil
}
// In case of error, wait for termination
if err != nil {
w.log.Debug("Ledger health-check failed", "err", err)
errc = <-w.healthQuit
}
errc <- err
}
// Close implements accounts.Wallet, closing the USB connection to the Ledger.
func (w *ledgerWallet) Close() error {
// Ensure the wallet was opened
w.stateLock.RLock()
hQuit, dQuit := w.healthQuit, w.deriveQuit
w.stateLock.RUnlock()
// Terminate the health checks
var herr error
if hQuit != nil {
errc := make(chan error)
hQuit <- errc
herr = <-errc // Save for later, we *must* close the USB
}
// Terminate the self-derivations
var derr error
if dQuit != nil {
errc := make(chan error)
dQuit <- errc
derr = <-errc // Save for later, we *must* close the USB
}
// Terminate the device connection
w.stateLock.Lock()
defer w.stateLock.Unlock()
w.healthQuit = nil
w.deriveQuit = nil
w.deriveReq = nil
if err := w.close(); err != nil {
return err
}
if herr != nil {
return herr
}
return derr
}
// close is the internal wallet closer that terminates the USB connection and
// resets all the fields to their defaults.
//
// Note, close assumes the state lock is held!
func (w *ledgerWallet) close() error {
// Allow duplicate closes, especially for health-check failures
if w.device == nil {
return nil
}
// Close the device, clear everything, then return
w.device.Close()
w.device = nil
w.browser, w.version = false, [3]byte{}
w.accounts, w.paths = nil, nil
return nil
}
// Accounts implements accounts.Wallet, returning the list of accounts pinned to
// the Ledger hardware wallet. If self-derivation was enabled, the account list
// is periodically expanded based on current chain state.
func (w *ledgerWallet) Accounts() []accounts.Account {
// Attempt self-derivation if it's running
reqc := make(chan struct{}, 1)
select {
case w.deriveReq <- reqc:
// Self-derivation request accepted, wait for it
<-reqc
default:
// Self-derivation offline, throttled or busy, skip
}
// Return whatever account list we ended up with
w.stateLock.RLock()
defer w.stateLock.RUnlock()
cpy := make([]accounts.Account, len(w.accounts))
copy(cpy, w.accounts)
return cpy
}
// selfDerive is an account derivation loop that upon request attempts to find
// new non-zero accounts.
func (w *ledgerWallet) selfDerive() {
w.log.Debug("Ledger self-derivation started")
defer w.log.Debug("Ledger self-derivation stopped")
// Execute self-derivations until termination or error
var (
reqc chan struct{}
errc chan error
err error
)
for errc == nil && err == nil {
// Wait until either derivation or termination is requested
select {
case errc = <-w.deriveQuit:
// Termination requested
continue
case reqc = <-w.deriveReq:
// Account discovery requested
}
// Derivation needs a chain and device access, skip if either unavailable
w.stateLock.RLock()
if w.device == nil || w.deriveChain == nil || w.offline() {
w.stateLock.RUnlock()
reqc <- struct{}{}
continue
}
select {
case <-w.commsLock:
default:
w.stateLock.RUnlock()
reqc <- struct{}{}
continue
}
// Device lock obtained, derive the next batch of accounts
var (
accs []accounts.Account
paths []accounts.DerivationPath
nextAddr = w.deriveNextAddr
nextPath = w.deriveNextPath
context = context.Background()
)
for empty := false; !empty; {
// Retrieve the next derived Ethereum account
if nextAddr == (common.Address{}) {
if nextAddr, err = w.ledgerDerive(nextPath); err != nil {
w.log.Warn("Ledger account derivation failed", "err", err)
break
}
}
// Check the account's status against the current chain state
var (
balance *big.Int
nonce uint64
)
balance, err = w.deriveChain.BalanceAt(context, nextAddr, nil)
if err != nil {
w.log.Warn("Ledger balance retrieval failed", "err", err)
break
}
nonce, err = w.deriveChain.NonceAt(context, nextAddr, nil)
if err != nil {
w.log.Warn("Ledger nonce retrieval failed", "err", err)
break
}
// If the next account is empty, stop self-derivation, but add it nonetheless
if balance.Sign() == 0 && nonce == 0 {
empty = true
}
// We've just self-derived a new account, start tracking it locally
path := make(accounts.DerivationPath, len(nextPath))
copy(path[:], nextPath[:])
paths = append(paths, path)
account := accounts.Account{
Address: nextAddr,
URL: accounts.URL{Scheme: w.url.Scheme, Path: fmt.Sprintf("%s/%s", w.url.Path, path)},
}
accs = append(accs, account)
// Display a log message to the user for new (or previously empty accounts)
if _, known := w.paths[nextAddr]; !known || (!empty && nextAddr == w.deriveNextAddr) {
w.log.Info("Ledger discovered new account", "address", nextAddr, "path", path, "balance", balance, "nonce", nonce)
}
// Fetch the next potential account
if !empty {
nextAddr = common.Address{}
nextPath[len(nextPath)-1]++
}
}
// Self derivation complete, release device lock
w.commsLock <- struct{}{}
w.stateLock.RUnlock()
// Insert any accounts successfully derived
w.stateLock.Lock()
for i := 0; i < len(accs); i++ {
if _, ok := w.paths[accs[i].Address]; !ok {
w.accounts = append(w.accounts, accs[i])
w.paths[accs[i].Address] = paths[i]
}
}
// Shift the self-derivation forward
// TODO(karalabe): don't overwrite changes from wallet.SelfDerive
w.deriveNextAddr = nextAddr
w.deriveNextPath = nextPath
w.stateLock.Unlock()
// Notify the user of termination and loop after a bit of time (to avoid trashing)
reqc <- struct{}{}
if err == nil {
select {
case errc = <-w.deriveQuit:
// Termination requested, abort
case <-time.After(ledgerSelfDeriveThrottling):
// Waited enough, willing to self-derive again
}
}
}
// In case of error, wait for termination
if err != nil {
w.log.Debug("Ledger self-derivation failed", "err", err)
errc = <-w.deriveQuit
}
errc <- err
}
// Contains implements accounts.Wallet, returning whether a particular account is
// or is not pinned into this Ledger instance. Although we could attempt to resolve
// unpinned accounts, that would be an non-negligible hardware operation.
func (w *ledgerWallet) Contains(account accounts.Account) bool {
w.stateLock.RLock()
defer w.stateLock.RUnlock()
_, exists := w.paths[account.Address]
return exists
}
// Derive implements accounts.Wallet, deriving a new account at the specific
// derivation path. If pin is set to true, the account will be added to the list
// of tracked accounts.
func (w *ledgerWallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Account, error) {
// Try to derive the actual account and update its URL if successful
w.stateLock.RLock() // Avoid device disappearing during derivation
if w.device == nil || w.offline() {
w.stateLock.RUnlock()
return accounts.Account{}, accounts.ErrWalletClosed
}
<-w.commsLock // Avoid concurrent hardware access
address, err := w.ledgerDerive(path)
w.commsLock <- struct{}{}
w.stateLock.RUnlock()
// If an error occurred or no pinning was requested, return
if err != nil {
return accounts.Account{}, err
}
account := accounts.Account{
Address: address,
URL: accounts.URL{Scheme: w.url.Scheme, Path: fmt.Sprintf("%s/%s", w.url.Path, path)},
}
if !pin {
return account, nil
}
// Pinning needs to modify the state
w.stateLock.Lock()
defer w.stateLock.Unlock()
if _, ok := w.paths[address]; !ok {
w.accounts = append(w.accounts, account)
w.paths[address] = path
}
return account, nil
}
// SelfDerive implements accounts.Wallet, trying to discover accounts that the
// user used previously (based on the chain state), but ones that he/she did not
// explicitly pin to the wallet manually. To avoid chain head monitoring, self
// derivation only runs during account listing (and even then throttled).
func (w *ledgerWallet) SelfDerive(base accounts.DerivationPath, chain ethereum.ChainStateReader) {
w.stateLock.Lock()
defer w.stateLock.Unlock()
w.deriveNextPath = make(accounts.DerivationPath, len(base))
copy(w.deriveNextPath[:], base[:])
w.deriveNextAddr = common.Address{}
w.deriveChain = chain
}
// SignHash implements accounts.Wallet, however signing arbitrary data is not
// supported for Ledger wallets, so this method will always return an error.
func (w *ledgerWallet) SignHash(acc accounts.Account, hash []byte) ([]byte, error) {
return nil, accounts.ErrNotSupported
}
// SignTx implements accounts.Wallet. It sends the transaction over to the Ledger
// wallet to request a confirmation from the user. It returns either the signed
// transaction or a failure if the user denied the transaction.
//
// Note, if the version of the Ethereum application running on the Ledger wallet is
// too old to sign EIP-155 transactions, but such is requested nonetheless, an error
// will be returned opposed to silently signing in Homestead mode.
func (w *ledgerWallet) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
w.stateLock.RLock() // Comms have own mutex, this is for the state fields
defer w.stateLock.RUnlock()
// If the wallet is closed, or the Ethereum app doesn't run, abort
if w.device == nil || w.offline() {
return nil, accounts.ErrWalletClosed
}
// Make sure the requested account is contained within
path, ok := w.paths[account.Address]
if !ok {
return nil, accounts.ErrUnknownAccount
}
// Ensure the wallet is capable of signing the given transaction
if chainID != nil && w.version[0] <= 1 && w.version[1] <= 0 && w.version[2] <= 2 {
return nil, fmt.Errorf("Ledger v%d.%d.%d doesn't support signing this transaction, please update to v1.0.3 at least", w.version[0], w.version[1], w.version[2])
}
// All infos gathered and metadata checks out, request signing
<-w.commsLock
defer func() { w.commsLock <- struct{}{} }()
// Ensure the device isn't screwed with while user confirmation is pending
// TODO(karalabe): remove if hotplug lands on Windows
w.hub.commsLock.Lock()
w.hub.commsPend++
w.hub.commsLock.Unlock()
defer func() {
w.hub.commsLock.Lock()
w.hub.commsPend--
w.hub.commsLock.Unlock()
}()
return w.ledgerSign(path, account.Address, tx, chainID)
}
// SignHashWithPassphrase implements accounts.Wallet, however signing arbitrary
// data is not supported for Ledger wallets, so this method will always return
// an error.
func (w *ledgerWallet) SignHashWithPassphrase(account accounts.Account, passphrase string, hash []byte) ([]byte, error) {
return nil, accounts.ErrNotSupported
}
// SignTxWithPassphrase implements accounts.Wallet, attempting to sign the given
// transaction with the given account using passphrase as extra authentication.
// Since the Ledger does not support extra passphrases, it is silently ignored.
func (w *ledgerWallet) SignTxWithPassphrase(account accounts.Account, passphrase string, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
return w.SignTx(account, tx, chainID)
}
// ledgerVersion retrieves the current version of the Ethereum wallet app running
// on the Ledger wallet.
//
// The version retrieval protocol is defined as follows:
//
// CLA | INS | P1 | P2 | Lc | Le
// ----+-----+----+----+----+---
// E0 | 06 | 00 | 00 | 00 | 04
//
// With no input data, and the output data being:
//
// Description | Length
// ---------------------------------------------------+--------
// Flags 01: arbitrary data signature enabled by user | 1 byte
// Application major version | 1 byte
// Application minor version | 1 byte
// Application patch version | 1 byte
func (w *ledgerWallet) ledgerVersion() ([3]byte, error) {
// Send the request and wait for the response
reply, err := w.ledgerExchange(ledgerOpGetConfiguration, 0, 0, nil)
if err != nil {
return [3]byte{}, err
}
if len(reply) != 4 {
return [3]byte{}, errInvalidVersionReply
}
// Cache the version for future reference
var version [3]byte
copy(version[:], reply[1:])
return version, nil
}
// ledgerDerive retrieves the currently active Ethereum address from a Ledger
// wallet at the specified derivation path.
//
// The address derivation protocol is defined as follows:
//
// CLA | INS | P1 | P2 | Lc | Le
// ----+-----+----+----+-----+---
// E0 | 02 | 00 return address
// 01 display address and confirm before returning
// | 00: do not return the chain code
// | 01: return the chain code
// | var | 00
//
// Where the input data is:
//
// Description | Length
// -------------------------------------------------+--------
// Number of BIP 32 derivations to perform (max 10) | 1 byte
// First derivation index (big endian) | 4 bytes
// ... | 4 bytes
// Last derivation index (big endian) | 4 bytes
//
// And the output data is:
//
// Description | Length
// ------------------------+-------------------
// Public Key length | 1 byte
// Uncompressed Public Key | arbitrary
// Ethereum address length | 1 byte
// Ethereum address | 40 bytes hex ascii
// Chain code if requested | 32 bytes
func (w *ledgerWallet) ledgerDerive(derivationPath []uint32) (common.Address, error) {
// Flatten the derivation path into the Ledger request
path := make([]byte, 1+4*len(derivationPath))
path[0] = byte(len(derivationPath))
for i, component := range derivationPath {
binary.BigEndian.PutUint32(path[1+4*i:], component)
}
// Send the request and wait for the response
reply, err := w.ledgerExchange(ledgerOpRetrieveAddress, ledgerP1DirectlyFetchAddress, ledgerP2DiscardAddressChainCode, path)
if err != nil {
return common.Address{}, err
}
// Discard the public key, we don't need that for now
if len(reply) < 1 || len(reply) < 1+int(reply[0]) {
return common.Address{}, errors.New("reply lacks public key entry")
}
reply = reply[1+int(reply[0]):]
// Extract the Ethereum hex address string
if len(reply) < 1 || len(reply) < 1+int(reply[0]) {
return common.Address{}, errors.New("reply lacks address entry")
}
hexstr := reply[1 : 1+int(reply[0])]
// Decode the hex sting into an Ethereum address and return
var address common.Address
hex.Decode(address[:], hexstr)
return address, nil
}
// ledgerSign sends the transaction to the Ledger wallet, and waits for the user
// to confirm or deny the transaction.
//
// The transaction signing protocol is defined as follows:
//
// CLA | INS | P1 | P2 | Lc | Le
// ----+-----+----+----+-----+---
// E0 | 04 | 00: first transaction data block
// 80: subsequent transaction data block
// | 00 | variable | variable
//
// Where the input for the first transaction block (first 255 bytes) is:
//
// Description | Length
// -------------------------------------------------+----------
// Number of BIP 32 derivations to perform (max 10) | 1 byte
// First derivation index (big endian) | 4 bytes
// ... | 4 bytes
// Last derivation index (big endian) | 4 bytes
// RLP transaction chunk | arbitrary
//
// And the input for subsequent transaction blocks (first 255 bytes) are:
//
// Description | Length
// ----------------------+----------
// RLP transaction chunk | arbitrary
//
// And the output data is:
//
// Description | Length
// ------------+---------
// signature V | 1 byte
// signature R | 32 bytes
// signature S | 32 bytes
func (w *ledgerWallet) ledgerSign(derivationPath []uint32, address common.Address, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
// Flatten the derivation path into the Ledger request
path := make([]byte, 1+4*len(derivationPath))
path[0] = byte(len(derivationPath))
for i, component := range derivationPath {
binary.BigEndian.PutUint32(path[1+4*i:], component)
}
// Create the transaction RLP based on whether legacy or EIP155 signing was requeste
var (
txrlp []byte
err error
)
if chainID == nil {
if txrlp, err = rlp.EncodeToBytes([]interface{}{tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data()}); err != nil {
return nil, err
}
} else {
if txrlp, err = rlp.EncodeToBytes([]interface{}{tx.Nonce(), tx.GasPrice(), tx.Gas(), tx.To(), tx.Value(), tx.Data(), chainID, big.NewInt(0), big.NewInt(0)}); err != nil {
return nil, err
}
}
payload := append(path, txrlp...)
// Send the request and wait for the response
var (
op = ledgerP1InitTransactionData
reply []byte
)
for len(payload) > 0 {
// Calculate the size of the next data chunk
chunk := 255
if chunk > len(payload) {
chunk = len(payload)
}
// Send the chunk over, ensuring it's processed correctly
reply, err = w.ledgerExchange(ledgerOpSignTransaction, op, 0, payload[:chunk])
if err != nil {
return nil, err
}
// Shift the payload and ensure subsequent chunks are marked as such
payload = payload[chunk:]
op = ledgerP1ContTransactionData
}
// Extract the Ethereum signature and do a sanity validation
if len(reply) != 65 {
return nil, errors.New("reply lacks signature")
}
signature := append(reply[1:], reply[0])
// Create the correct signer and signature transform based on the chain ID
var signer types.Signer
if chainID == nil {
signer = new(types.HomesteadSigner)
} else {
signer = types.NewEIP155Signer(chainID)
signature[64] = signature[64] - byte(chainID.Uint64()*2+35)
}
// Inject the final signature into the transaction and sanity check the sender
signed, err := tx.WithSignature(signer, signature)
if err != nil {
return nil, err
}
sender, err := types.Sender(signer, signed)
if err != nil {
return nil, err
}
if sender != address {
return nil, fmt.Errorf("signer mismatch: expected %s, got %s", address.Hex(), sender.Hex())
}
return signed, nil
}
// ledgerExchange performs a data exchange with the Ledger wallet, sending it a
// message and retrieving the response.
//
// The common transport header is defined as follows:
//
// Description | Length
// --------------------------------------+----------
// Communication channel ID (big endian) | 2 bytes
// Command tag | 1 byte
// Packet sequence index (big endian) | 2 bytes
// Payload | arbitrary
//
// The Communication channel ID allows commands multiplexing over the same
// physical link. It is not used for the time being, and should be set to 0101
// to avoid compatibility issues with implementations ignoring a leading 00 byte.
//
// The Command tag describes the message content. Use TAG_APDU (0x05) for standard
// APDU payloads, or TAG_PING (0x02) for a simple link test.
//
// The Packet sequence index describes the current sequence for fragmented payloads.
// The first fragment index is 0x00.
//
// APDU Command payloads are encoded as follows:
//
// Description | Length
// -----------------------------------
// APDU length (big endian) | 2 bytes
// APDU CLA | 1 byte
// APDU INS | 1 byte
// APDU P1 | 1 byte
// APDU P2 | 1 byte
// APDU length | 1 byte
// Optional APDU data | arbitrary
func (w *ledgerWallet) ledgerExchange(opcode ledgerOpcode, p1 ledgerParam1, p2 ledgerParam2, data []byte) ([]byte, error) {
// Construct the message payload, possibly split into multiple chunks
apdu := make([]byte, 2, 7+len(data))
binary.BigEndian.PutUint16(apdu, uint16(5+len(data)))
apdu = append(apdu, []byte{0xe0, byte(opcode), byte(p1), byte(p2), byte(len(data))}...)
apdu = append(apdu, data...)
// Stream all the chunks to the device
header := []byte{0x01, 0x01, 0x05, 0x00, 0x00} // Channel ID and command tag appended
chunk := make([]byte, 64)
space := len(chunk) - len(header)
for i := 0; len(apdu) > 0; i++ {
// Construct the new message to stream
chunk = append(chunk[:0], header...)
binary.BigEndian.PutUint16(chunk[3:], uint16(i))
if len(apdu) > space {
chunk = append(chunk, apdu[:space]...)
apdu = apdu[space:]
} else {
chunk = append(chunk, apdu...)
apdu = nil
}
// Send over to the device
w.log.Trace("Data chunk sent to the Ledger", "chunk", hexutil.Bytes(chunk))
if _, err := w.device.Write(chunk); err != nil {
return nil, err
}
}
// Stream the reply back from the wallet in 64 byte chunks
var reply []byte
chunk = chunk[:64] // Yeah, we surely have enough space
for {
// Read the next chunk from the Ledger wallet
if _, err := io.ReadFull(w.device, chunk); err != nil {
return nil, err
}
w.log.Trace("Data chunk received from the Ledger", "chunk", hexutil.Bytes(chunk))
// Make sure the transport header matches
if chunk[0] != 0x01 || chunk[1] != 0x01 || chunk[2] != 0x05 {
return nil, errReplyInvalidHeader
}
// If it's the first chunk, retrieve the total message length
var payload []byte
if chunk[3] == 0x00 && chunk[4] == 0x00 {
reply = make([]byte, 0, int(binary.BigEndian.Uint16(chunk[5:7])))
payload = chunk[7:]
} else {
payload = chunk[5:]
}
// Append to the reply and stop when filled up
if left := cap(reply) - len(reply); left > len(payload) {
reply = append(reply, payload...)
} else {
reply = append(reply, payload[:left]...)
break
}
}
return reply[:len(reply)-2], nil
}

View File

@ -0,0 +1,330 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// This file contains the implementation for interacting with the Trezor hardware
// wallets. The wire protocol spec can be found on the SatoshiLabs website:
// https://doc.satoshilabs.com/trezor-tech/api-protobuf.html
package usbwallet
import (
"encoding/binary"
"errors"
"fmt"
"io"
"math/big"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/usbwallet/internal/trezor"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/golang/protobuf/proto"
)
// ErrTrezorPINNeeded is returned if opening the trezor requires a PIN code. In
// this case, the calling application should display a pinpad and send back the
// encoded passphrase.
var ErrTrezorPINNeeded = errors.New("trezor: pin needed")
// errTrezorReplyInvalidHeader is the error message returned by a Trezor data exchange
// if the device replies with a mismatching header. This usually means the device
// is in browser mode.
var errTrezorReplyInvalidHeader = errors.New("trezor: invalid reply header")
// trezorDriver implements the communication with a Trezor hardware wallet.
type trezorDriver struct {
device io.ReadWriter // USB device connection to communicate through
version [3]uint32 // Current version of the Trezor firmware
label string // Current textual label of the Trezor device
pinwait bool // Flags whether the device is waiting for PIN entry
failure error // Any failure that would make the device unusable
log log.Logger // Contextual logger to tag the trezor with its id
}
// newTrezorDriver creates a new instance of a Trezor USB protocol driver.
func newTrezorDriver(logger log.Logger) driver {
return &trezorDriver{
log: logger,
}
}
// Status implements accounts.Wallet, always whether the Trezor is opened, closed
// or whether the Ethereum app was not started on it.
func (w *trezorDriver) Status() (string, error) {
if w.failure != nil {
return fmt.Sprintf("Failed: %v", w.failure), w.failure
}
if w.device == nil {
return "Closed", w.failure
}
if w.pinwait {
return fmt.Sprintf("Trezor v%d.%d.%d '%s' waiting for PIN", w.version[0], w.version[1], w.version[2], w.label), w.failure
}
return fmt.Sprintf("Trezor v%d.%d.%d '%s' online", w.version[0], w.version[1], w.version[2], w.label), w.failure
}
// Open implements usbwallet.driver, attempting to initialize the connection to
// the Trezor hardware wallet. Initializing the Trezor is a two phase operation:
// * The first phase is to initialize the connection and read the wallet's
// features. This phase is invoked is the provided passphrase is empty. The
// device will display the pinpad as a result and will return an appropriate
// error to notify the user that a second open phase is needed.
// * The second phase is to unlock access to the Trezor, which is done by the
// user actually providing a passphrase mapping a keyboard keypad to the pin
// number of the user (shuffled according to the pinpad displayed).
func (w *trezorDriver) Open(device io.ReadWriter, passphrase string) error {
w.device, w.failure = device, nil
// If phase 1 is requested, init the connection and wait for user callback
if passphrase == "" {
// If we're already waiting for a PIN entry, insta-return
if w.pinwait {
return ErrTrezorPINNeeded
}
// Initialize a connection to the device
features := new(trezor.Features)
if _, err := w.trezorExchange(&trezor.Initialize{}, features); err != nil {
return err
}
w.version = [3]uint32{features.GetMajorVersion(), features.GetMinorVersion(), features.GetPatchVersion()}
w.label = features.GetLabel()
// Do a manual ping, forcing the device to ask for its PIN
askPin := true
res, err := w.trezorExchange(&trezor.Ping{PinProtection: &askPin}, new(trezor.PinMatrixRequest), new(trezor.Success))
if err != nil {
return err
}
// Only return the PIN request if the device wasn't unlocked until now
if res == 1 {
return nil // Device responded with trezor.Success
}
w.pinwait = true
return ErrTrezorPINNeeded
}
// Phase 2 requested with actual PIN entry
w.pinwait = false
if _, err := w.trezorExchange(&trezor.PinMatrixAck{Pin: &passphrase}, new(trezor.Success)); err != nil {
w.failure = err
return err
}
return nil
}
// Close implements usbwallet.driver, cleaning up and metadata maintained within
// the Trezor driver.
func (w *trezorDriver) Close() error {
w.version, w.label, w.pinwait = [3]uint32{}, "", false
return nil
}
// Heartbeat implements usbwallet.driver, performing a sanity check against the
// Trezor to see if it's still online.
func (w *trezorDriver) Heartbeat() error {
if _, err := w.trezorExchange(&trezor.Ping{}, new(trezor.Success)); err != nil {
w.failure = err
return err
}
return nil
}
// Derive implements usbwallet.driver, sending a derivation request to the Trezor
// and returning the Ethereum address located on that derivation path.
func (w *trezorDriver) Derive(path accounts.DerivationPath) (common.Address, error) {
return w.trezorDerive(path)
}
// SignTx implements usbwallet.driver, sending the transaction to the Trezor and
// waiting for the user to confirm or deny the transaction.
func (w *trezorDriver) SignTx(path accounts.DerivationPath, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error) {
if w.device == nil {
return common.Address{}, nil, accounts.ErrWalletClosed
}
return w.trezorSign(path, tx, chainID)
}
// trezorDerive sends a derivation request to the Trezor device and returns the
// Ethereum address located on that path.
func (w *trezorDriver) trezorDerive(derivationPath []uint32) (common.Address, error) {
address := new(trezor.EthereumAddress)
if _, err := w.trezorExchange(&trezor.EthereumGetAddress{AddressN: derivationPath}, address); err != nil {
return common.Address{}, err
}
return common.BytesToAddress(address.GetAddress()), nil
}
// trezorSign sends the transaction to the Trezor wallet, and waits for the user
// to confirm or deny the transaction.
func (w *trezorDriver) trezorSign(derivationPath []uint32, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error) {
// Create the transaction initiation message
data := tx.Data()
length := uint32(len(data))
request := &trezor.EthereumSignTx{
AddressN: derivationPath,
Nonce: new(big.Int).SetUint64(tx.Nonce()).Bytes(),
GasPrice: tx.GasPrice().Bytes(),
GasLimit: tx.Gas().Bytes(),
Value: tx.Value().Bytes(),
DataLength: &length,
}
if to := tx.To(); to != nil {
request.To = (*to)[:] // Non contract deploy, set recipient explicitly
}
if length > 1024 { // Send the data chunked if that was requested
request.DataInitialChunk, data = data[:1024], data[1024:]
} else {
request.DataInitialChunk, data = data, nil
}
if chainID != nil { // EIP-155 transaction, set chain ID explicitly (only 32 bit is supported!?)
id := uint32(chainID.Int64())
request.ChainId = &id
}
// Send the initiation message and stream content until a signature is returned
response := new(trezor.EthereumTxRequest)
if _, err := w.trezorExchange(request, response); err != nil {
return common.Address{}, nil, err
}
for response.DataLength != nil && int(*response.DataLength) <= len(data) {
chunk := data[:*response.DataLength]
data = data[*response.DataLength:]
if _, err := w.trezorExchange(&trezor.EthereumTxAck{DataChunk: chunk}, response); err != nil {
return common.Address{}, nil, err
}
}
// Extract the Ethereum signature and do a sanity validation
if len(response.GetSignatureR()) == 0 || len(response.GetSignatureS()) == 0 || response.GetSignatureV() == 0 {
return common.Address{}, nil, errors.New("reply lacks signature")
}
signature := append(append(response.GetSignatureR(), response.GetSignatureS()...), byte(response.GetSignatureV()))
// Create the correct signer and signature transform based on the chain ID
var signer types.Signer
if chainID == nil {
signer = new(types.HomesteadSigner)
} else {
signer = types.NewEIP155Signer(chainID)
signature[64] = signature[64] - byte(chainID.Uint64()*2+35)
}
// Inject the final signature into the transaction and sanity check the sender
signed, err := tx.WithSignature(signer, signature)
if err != nil {
return common.Address{}, nil, err
}
sender, err := types.Sender(signer, signed)
if err != nil {
return common.Address{}, nil, err
}
return sender, signed, nil
}
// trezorExchange performs a data exchange with the Trezor wallet, sending it a
// message and retrieving the response. If multiple responses are possible, the
// method will also return the index of the destination object used.
func (w *trezorDriver) trezorExchange(req proto.Message, results ...proto.Message) (int, error) {
// Construct the original message payload to chunk up
data, err := proto.Marshal(req)
if err != nil {
return 0, err
}
payload := make([]byte, 8+len(data))
copy(payload, []byte{0x23, 0x23})
binary.BigEndian.PutUint16(payload[2:], trezor.Type(req))
binary.BigEndian.PutUint32(payload[4:], uint32(len(data)))
copy(payload[8:], data)
// Stream all the chunks to the device
chunk := make([]byte, 64)
chunk[0] = 0x3f // Report ID magic number
for len(payload) > 0 {
// Construct the new message to stream, padding with zeroes if needed
if len(payload) > 63 {
copy(chunk[1:], payload[:63])
payload = payload[63:]
} else {
copy(chunk[1:], payload)
copy(chunk[1+len(payload):], make([]byte, 63-len(payload)))
payload = nil
}
// Send over to the device
w.log.Trace("Data chunk sent to the Trezor", "chunk", hexutil.Bytes(chunk))
if _, err := w.device.Write(chunk); err != nil {
return 0, err
}
}
// Stream the reply back from the wallet in 64 byte chunks
var (
kind uint16
reply []byte
)
for {
// Read the next chunk from the Trezor wallet
if _, err := io.ReadFull(w.device, chunk); err != nil {
return 0, err
}
w.log.Trace("Data chunk received from the Trezor", "chunk", hexutil.Bytes(chunk))
// Make sure the transport header matches
if chunk[0] != 0x3f || (len(reply) == 0 && (chunk[1] != 0x23 || chunk[2] != 0x23)) {
return 0, errTrezorReplyInvalidHeader
}
// If it's the first chunk, retrieve the reply message type and total message length
var payload []byte
if len(reply) == 0 {
kind = binary.BigEndian.Uint16(chunk[3:5])
reply = make([]byte, 0, int(binary.BigEndian.Uint32(chunk[5:9])))
payload = chunk[9:]
} else {
payload = chunk[1:]
}
// Append to the reply and stop when filled up
if left := cap(reply) - len(reply); left > len(payload) {
reply = append(reply, payload...)
} else {
reply = append(reply, payload[:left]...)
break
}
}
// Try to parse the reply into the requested reply message
if kind == uint16(trezor.MessageType_MessageType_Failure) {
// Trezor returned a failure, extract and return the message
failure := new(trezor.Failure)
if err := proto.Unmarshal(reply, failure); err != nil {
return 0, err
}
return 0, errors.New("trezor: " + failure.GetMessage())
}
if kind == uint16(trezor.MessageType_MessageType_ButtonRequest) {
// Trezor is waiting for user confirmation, ack and wait for the next message
return w.trezorExchange(&trezor.ButtonAck{}, results...)
}
for i, res := range results {
if trezor.Type(res) == kind {
return i, proto.Unmarshal(reply, res)
}
}
expected := make([]string, len(results))
for i, res := range results {
expected[i] = trezor.Name(trezor.Type(res))
}
return 0, fmt.Errorf("trezor: expected reply types %s, got %s", expected, trezor.Name(kind))
}

View File

@ -0,0 +1,562 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package usbwallet implements support for USB hardware wallets.
package usbwallet
import (
"context"
"fmt"
"io"
"math/big"
"sync"
"time"
ethereum "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/karalabe/hid"
)
// Maximum time between wallet health checks to detect USB unplugs.
const heartbeatCycle = time.Second
// Minimum time to wait between self derivation attempts, even it the user is
// requesting accounts like crazy.
const selfDeriveThrottling = time.Second
// driver defines the vendor specific functionality hardware wallets instances
// must implement to allow using them with the wallet lifecycle management.
type driver interface {
// Status returns a textual status to aid the user in the current state of the
// wallet. It also returns an error indicating any failure the wallet might have
// encountered.
Status() (string, error)
// Open initializes access to a wallet instance. The passphrase parameter may
// or may not be used by the implementation of a particular wallet instance.
Open(device io.ReadWriter, passphrase string) error
// Close releases any resources held by an open wallet instance.
Close() error
// Heartbeat performs a sanity check against the hardware wallet to see if it
// is still online and healthy.
Heartbeat() error
// Derive sends a derivation request to the USB device and returns the Ethereum
// address located on that path.
Derive(path accounts.DerivationPath) (common.Address, error)
// SignTx sends the transaction to the USB device and waits for the user to confirm
// or deny the transaction.
SignTx(path accounts.DerivationPath, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error)
}
// wallet represents the common functionality shared by all USB hardware
// wallets to prevent reimplementing the same complex maintenance mechanisms
// for different vendors.
type wallet struct {
hub *Hub // USB hub scanning
driver driver // Hardware implementation of the low level device operations
url *accounts.URL // Textual URL uniquely identifying this wallet
info hid.DeviceInfo // Known USB device infos about the wallet
device *hid.Device // USB device advertising itself as a hardware wallet
accounts []accounts.Account // List of derive accounts pinned on the hardware wallet
paths map[common.Address]accounts.DerivationPath // Known derivation paths for signing operations
deriveNextPath accounts.DerivationPath // Next derivation path for account auto-discovery
deriveNextAddr common.Address // Next derived account address for auto-discovery
deriveChain ethereum.ChainStateReader // Blockchain state reader to discover used account with
deriveReq chan chan struct{} // Channel to request a self-derivation on
deriveQuit chan chan error // Channel to terminate the self-deriver with
healthQuit chan chan error
// Locking a hardware wallet is a bit special. Since hardware devices are lower
// performing, any communication with them might take a non negligible amount of
// time. Worse still, waiting for user confirmation can take arbitrarily long,
// but exclusive communication must be upheld during. Locking the entire wallet
// in the mean time however would stall any parts of the system that don't want
// to communicate, just read some state (e.g. list the accounts).
//
// As such, a hardware wallet needs two locks to function correctly. A state
// lock can be used to protect the wallet's software-side internal state, which
// must not be held exlusively during hardware communication. A communication
// lock can be used to achieve exclusive access to the device itself, this one
// however should allow "skipping" waiting for operations that might want to
// use the device, but can live without too (e.g. account self-derivation).
//
// Since we have two locks, it's important to know how to properly use them:
// - Communication requires the `device` to not change, so obtaining the
// commsLock should be done after having a stateLock.
// - Communication must not disable read access to the wallet state, so it
// must only ever hold a *read* lock to stateLock.
commsLock chan struct{} // Mutex (buf=1) for the USB comms without keeping the state locked
stateLock sync.RWMutex // Protects read and write access to the wallet struct fields
log log.Logger // Contextual logger to tag the base with its id
}
// URL implements accounts.Wallet, returning the URL of the USB hardware device.
func (w *wallet) URL() accounts.URL {
return *w.url // Immutable, no need for a lock
}
// Status implements accounts.Wallet, returning a custom status message from the
// underlying vendor-specific hardware wallet implementation.
func (w *wallet) Status() (string, error) {
w.stateLock.RLock() // No device communication, state lock is enough
defer w.stateLock.RUnlock()
status, failure := w.driver.Status()
if w.device == nil {
return "Closed", failure
}
return status, failure
}
// Open implements accounts.Wallet, attempting to open a USB connection to the
// hardware wallet.
func (w *wallet) Open(passphrase string) error {
w.stateLock.Lock() // State lock is enough since there's no connection yet at this point
defer w.stateLock.Unlock()
// If the device was already opened once, refuse to try again
if w.paths != nil {
return accounts.ErrWalletAlreadyOpen
}
// Make sure the actual device connection is done only once
if w.device == nil {
device, err := w.info.Open()
if err != nil {
return err
}
w.device = device
w.commsLock = make(chan struct{}, 1)
w.commsLock <- struct{}{} // Enable lock
}
// Delegate device initialization to the underlying driver
if err := w.driver.Open(w.device, passphrase); err != nil {
return err
}
// Connection successful, start life-cycle management
w.paths = make(map[common.Address]accounts.DerivationPath)
w.deriveReq = make(chan chan struct{})
w.deriveQuit = make(chan chan error)
w.healthQuit = make(chan chan error)
go w.heartbeat()
go w.selfDerive()
// Notify anyone listening for wallet events that a new device is accessible
go w.hub.updateFeed.Send(accounts.WalletEvent{Wallet: w, Kind: accounts.WalletOpened})
return nil
}
// heartbeat is a health check loop for the USB wallets to periodically verify
// whether they are still present or if they malfunctioned.
func (w *wallet) heartbeat() {
w.log.Debug("USB wallet health-check started")
defer w.log.Debug("USB wallet health-check stopped")
// Execute heartbeat checks until termination or error
var (
errc chan error
err error
)
for errc == nil && err == nil {
// Wait until termination is requested or the heartbeat cycle arrives
select {
case errc = <-w.healthQuit:
// Termination requested
continue
case <-time.After(heartbeatCycle):
// Heartbeat time
}
// Execute a tiny data exchange to see responsiveness
w.stateLock.RLock()
if w.device == nil {
// Terminated while waiting for the lock
w.stateLock.RUnlock()
continue
}
<-w.commsLock // Don't lock state while resolving version
err = w.driver.Heartbeat()
w.commsLock <- struct{}{}
w.stateLock.RUnlock()
if err != nil {
w.stateLock.Lock() // Lock state to tear the wallet down
w.close()
w.stateLock.Unlock()
}
// Ignore non hardware related errors
err = nil
}
// In case of error, wait for termination
if err != nil {
w.log.Debug("USB wallet health-check failed", "err", err)
errc = <-w.healthQuit
}
errc <- err
}
// Close implements accounts.Wallet, closing the USB connection to the device.
func (w *wallet) Close() error {
// Ensure the wallet was opened
w.stateLock.RLock()
hQuit, dQuit := w.healthQuit, w.deriveQuit
w.stateLock.RUnlock()
// Terminate the health checks
var herr error
if hQuit != nil {
errc := make(chan error)
hQuit <- errc
herr = <-errc // Save for later, we *must* close the USB
}
// Terminate the self-derivations
var derr error
if dQuit != nil {
errc := make(chan error)
dQuit <- errc
derr = <-errc // Save for later, we *must* close the USB
}
// Terminate the device connection
w.stateLock.Lock()
defer w.stateLock.Unlock()
w.healthQuit = nil
w.deriveQuit = nil
w.deriveReq = nil
if err := w.close(); err != nil {
return err
}
if herr != nil {
return herr
}
return derr
}
// close is the internal wallet closer that terminates the USB connection and
// resets all the fields to their defaults.
//
// Note, close assumes the state lock is held!
func (w *wallet) close() error {
// Allow duplicate closes, especially for health-check failures
if w.device == nil {
return nil
}
// Close the device, clear everything, then return
w.device.Close()
w.device = nil
w.accounts, w.paths = nil, nil
w.driver.Close()
return nil
}
// Accounts implements accounts.Wallet, returning the list of accounts pinned to
// the USB hardware wallet. If self-derivation was enabled, the account list is
// periodically expanded based on current chain state.
func (w *wallet) Accounts() []accounts.Account {
// Attempt self-derivation if it's running
reqc := make(chan struct{}, 1)
select {
case w.deriveReq <- reqc:
// Self-derivation request accepted, wait for it
<-reqc
default:
// Self-derivation offline, throttled or busy, skip
}
// Return whatever account list we ended up with
w.stateLock.RLock()
defer w.stateLock.RUnlock()
cpy := make([]accounts.Account, len(w.accounts))
copy(cpy, w.accounts)
return cpy
}
// selfDerive is an account derivation loop that upon request attempts to find
// new non-zero accounts.
func (w *wallet) selfDerive() {
w.log.Debug("USB wallet self-derivation started")
defer w.log.Debug("USB wallet self-derivation stopped")
// Execute self-derivations until termination or error
var (
reqc chan struct{}
errc chan error
err error
)
for errc == nil && err == nil {
// Wait until either derivation or termination is requested
select {
case errc = <-w.deriveQuit:
// Termination requested
continue
case reqc = <-w.deriveReq:
// Account discovery requested
}
// Derivation needs a chain and device access, skip if either unavailable
w.stateLock.RLock()
if w.device == nil || w.deriveChain == nil {
w.stateLock.RUnlock()
reqc <- struct{}{}
continue
}
select {
case <-w.commsLock:
default:
w.stateLock.RUnlock()
reqc <- struct{}{}
continue
}
// Device lock obtained, derive the next batch of accounts
var (
accs []accounts.Account
paths []accounts.DerivationPath
nextAddr = w.deriveNextAddr
nextPath = w.deriveNextPath
context = context.Background()
)
for empty := false; !empty; {
// Retrieve the next derived Ethereum account
if nextAddr == (common.Address{}) {
if nextAddr, err = w.driver.Derive(nextPath); err != nil {
w.log.Warn("USB wallet account derivation failed", "err", err)
break
}
}
// Check the account's status against the current chain state
var (
balance *big.Int
nonce uint64
)
balance, err = w.deriveChain.BalanceAt(context, nextAddr, nil)
if err != nil {
w.log.Warn("USB wallet balance retrieval failed", "err", err)
break
}
nonce, err = w.deriveChain.NonceAt(context, nextAddr, nil)
if err != nil {
w.log.Warn("USB wallet nonce retrieval failed", "err", err)
break
}
// If the next account is empty, stop self-derivation, but add it nonetheless
if balance.Sign() == 0 && nonce == 0 {
empty = true
}
// We've just self-derived a new account, start tracking it locally
path := make(accounts.DerivationPath, len(nextPath))
copy(path[:], nextPath[:])
paths = append(paths, path)
account := accounts.Account{
Address: nextAddr,
URL: accounts.URL{Scheme: w.url.Scheme, Path: fmt.Sprintf("%s/%s", w.url.Path, path)},
}
accs = append(accs, account)
// Display a log message to the user for new (or previously empty accounts)
if _, known := w.paths[nextAddr]; !known || (!empty && nextAddr == w.deriveNextAddr) {
w.log.Info("USB wallet discovered new account", "address", nextAddr, "path", path, "balance", balance, "nonce", nonce)
}
// Fetch the next potential account
if !empty {
nextAddr = common.Address{}
nextPath[len(nextPath)-1]++
}
}
// Self derivation complete, release device lock
w.commsLock <- struct{}{}
w.stateLock.RUnlock()
// Insert any accounts successfully derived
w.stateLock.Lock()
for i := 0; i < len(accs); i++ {
if _, ok := w.paths[accs[i].Address]; !ok {
w.accounts = append(w.accounts, accs[i])
w.paths[accs[i].Address] = paths[i]
}
}
// Shift the self-derivation forward
// TODO(karalabe): don't overwrite changes from wallet.SelfDerive
w.deriveNextAddr = nextAddr
w.deriveNextPath = nextPath
w.stateLock.Unlock()
// Notify the user of termination and loop after a bit of time (to avoid trashing)
reqc <- struct{}{}
if err == nil {
select {
case errc = <-w.deriveQuit:
// Termination requested, abort
case <-time.After(selfDeriveThrottling):
// Waited enough, willing to self-derive again
}
}
}
// In case of error, wait for termination
if err != nil {
w.log.Debug("USB wallet self-derivation failed", "err", err)
errc = <-w.deriveQuit
}
errc <- err
}
// Contains implements accounts.Wallet, returning whether a particular account is
// or is not pinned into this wallet instance. Although we could attempt to resolve
// unpinned accounts, that would be an non-negligible hardware operation.
func (w *wallet) Contains(account accounts.Account) bool {
w.stateLock.RLock()
defer w.stateLock.RUnlock()
_, exists := w.paths[account.Address]
return exists
}
// Derive implements accounts.Wallet, deriving a new account at the specific
// derivation path. If pin is set to true, the account will be added to the list
// of tracked accounts.
func (w *wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Account, error) {
// Try to derive the actual account and update its URL if successful
w.stateLock.RLock() // Avoid device disappearing during derivation
if w.device == nil {
w.stateLock.RUnlock()
return accounts.Account{}, accounts.ErrWalletClosed
}
<-w.commsLock // Avoid concurrent hardware access
address, err := w.driver.Derive(path)
w.commsLock <- struct{}{}
w.stateLock.RUnlock()
// If an error occurred or no pinning was requested, return
if err != nil {
return accounts.Account{}, err
}
account := accounts.Account{
Address: address,
URL: accounts.URL{Scheme: w.url.Scheme, Path: fmt.Sprintf("%s/%s", w.url.Path, path)},
}
if !pin {
return account, nil
}
// Pinning needs to modify the state
w.stateLock.Lock()
defer w.stateLock.Unlock()
if _, ok := w.paths[address]; !ok {
w.accounts = append(w.accounts, account)
w.paths[address] = path
}
return account, nil
}
// SelfDerive implements accounts.Wallet, trying to discover accounts that the
// user used previously (based on the chain state), but ones that he/she did not
// explicitly pin to the wallet manually. To avoid chain head monitoring, self
// derivation only runs during account listing (and even then throttled).
func (w *wallet) SelfDerive(base accounts.DerivationPath, chain ethereum.ChainStateReader) {
w.stateLock.Lock()
defer w.stateLock.Unlock()
w.deriveNextPath = make(accounts.DerivationPath, len(base))
copy(w.deriveNextPath[:], base[:])
w.deriveNextAddr = common.Address{}
w.deriveChain = chain
}
// SignHash implements accounts.Wallet, however signing arbitrary data is not
// supported for hardware wallets, so this method will always return an error.
func (w *wallet) SignHash(account accounts.Account, hash []byte) ([]byte, error) {
return nil, accounts.ErrNotSupported
}
// SignTx implements accounts.Wallet. It sends the transaction over to the Ledger
// wallet to request a confirmation from the user. It returns either the signed
// transaction or a failure if the user denied the transaction.
//
// Note, if the version of the Ethereum application running on the Ledger wallet is
// too old to sign EIP-155 transactions, but such is requested nonetheless, an error
// will be returned opposed to silently signing in Homestead mode.
func (w *wallet) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
w.stateLock.RLock() // Comms have own mutex, this is for the state fields
defer w.stateLock.RUnlock()
// If the wallet is closed, abort
if w.device == nil {
return nil, accounts.ErrWalletClosed
}
// Make sure the requested account is contained within
path, ok := w.paths[account.Address]
if !ok {
return nil, accounts.ErrUnknownAccount
}
// All infos gathered and metadata checks out, request signing
<-w.commsLock
defer func() { w.commsLock <- struct{}{} }()
// Ensure the device isn't screwed with while user confirmation is pending
// TODO(karalabe): remove if hotplug lands on Windows
w.hub.commsLock.Lock()
w.hub.commsPend++
w.hub.commsLock.Unlock()
defer func() {
w.hub.commsLock.Lock()
w.hub.commsPend--
w.hub.commsLock.Unlock()
}()
// Sign the transaction and verify the sender to avoid hardware fault surprises
sender, signed, err := w.driver.SignTx(path, tx, chainID)
if err != nil {
return nil, err
}
if sender != account.Address {
return nil, fmt.Errorf("signer mismatch: expected %s, got %s", account.Address.Hex(), sender.Hex())
}
return signed, nil
}
// SignHashWithPassphrase implements accounts.Wallet, however signing arbitrary
// data is not supported for Ledger wallets, so this method will always return
// an error.
func (w *wallet) SignHashWithPassphrase(account accounts.Account, passphrase string, hash []byte) ([]byte, error) {
return w.SignHash(account, hash)
}
// SignTxWithPassphrase implements accounts.Wallet, attempting to sign the given
// transaction with the given account using passphrase as extra authentication.
// Since USB wallets don't rely on passphrases, these are silently ignored.
func (w *wallet) SignTxWithPassphrase(account accounts.Account, passphrase string, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
return w.SignTx(account, tx, chainID)
}

View File

@ -23,8 +23,8 @@ environment:
install:
- git submodule update --init
- rmdir C:\go /s /q
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.8.3.windows-%GETH_ARCH%.zip
- 7z x go1.8.3.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.9.windows-%GETH_ARCH%.zip
- 7z x go1.9.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
- go version
- gcc --version

562
vendor/github.com/ethereum/go-ethereum/bmt/bmt.go generated vendored Normal file
View File

@ -0,0 +1,562 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package bmt provides a binary merkle tree implementation
package bmt
import (
"fmt"
"hash"
"io"
"strings"
"sync"
"sync/atomic"
)
/*
Binary Merkle Tree Hash is a hash function over arbitrary datachunks of limited size
It is defined as the root hash of the binary merkle tree built over fixed size segments
of the underlying chunk using any base hash function (e.g keccak 256 SHA3)
It is used as the chunk hash function in swarm which in turn is the basis for the
128 branching swarm hash http://swarm-guide.readthedocs.io/en/latest/architecture.html#swarm-hash
The BMT is optimal for providing compact inclusion proofs, i.e. prove that a
segment is a substring of a chunk starting at a particular offset
The size of the underlying segments is fixed at 32 bytes (called the resolution
of the BMT hash), the EVM word size to optimize for on-chain BMT verification
as well as the hash size optimal for inclusion proofs in the merkle tree of the swarm hash.
Two implementations are provided:
* RefHasher is optimized for code simplicity and meant as a reference implementation
* Hasher is optimized for speed taking advantage of concurrency with minimalistic
control structure to coordinate the concurrent routines
It implements the ChunkHash interface as well as the go standard hash.Hash interface
*/
const (
// DefaultSegmentCount is the maximum number of segments of the underlying chunk
DefaultSegmentCount = 128 // Should be equal to storage.DefaultBranches
// DefaultPoolSize is the maximum number of bmt trees used by the hashers, i.e,
// the maximum number of concurrent BMT hashing operations performed by the same hasher
DefaultPoolSize = 8
)
// BaseHasher is a hash.Hash constructor function used for the base hash of the BMT.
type BaseHasher func() hash.Hash
// Hasher a reusable hasher for fixed maximum size chunks representing a BMT
// implements the hash.Hash interface
// reuse pool of Tree-s for amortised memory allocation and resource control
// supports order-agnostic concurrent segment writes
// as well as sequential read and write
// can not be called concurrently on more than one chunk
// can be further appended after Sum
// Reset gives back the Tree to the pool and guaranteed to leave
// the tree and itself in a state reusable for hashing a new chunk
type Hasher struct {
pool *TreePool // BMT resource pool
bmt *Tree // prebuilt BMT resource for flowcontrol and proofs
blocksize int // segment size (size of hash) also for hash.Hash
count int // segment count
size int // for hash.Hash same as hashsize
cur int // cursor position for righmost currently open chunk
segment []byte // the rightmost open segment (not complete)
depth int // index of last level
result chan []byte // result channel
hash []byte // to record the result
max int32 // max segments for SegmentWriter interface
blockLength []byte // The block length that needes to be added in Sum
}
// New creates a reusable Hasher
// implements the hash.Hash interface
// pulls a new Tree from a resource pool for hashing each chunk
func New(p *TreePool) *Hasher {
return &Hasher{
pool: p,
depth: depth(p.SegmentCount),
size: p.SegmentSize,
blocksize: p.SegmentSize,
count: p.SegmentCount,
result: make(chan []byte),
}
}
// Node is a reuseable segment hasher representing a node in a BMT
// it allows for continued writes after a Sum
// and is left in completely reusable state after Reset
type Node struct {
level, index int // position of node for information/logging only
initial bool // first and last node
root bool // whether the node is root to a smaller BMT
isLeft bool // whether it is left side of the parent double segment
unbalanced bool // indicates if a node has only the left segment
parent *Node // BMT connections
state int32 // atomic increment impl concurrent boolean toggle
left, right []byte
}
// NewNode constructor for segment hasher nodes in the BMT
func NewNode(level, index int, parent *Node) *Node {
return &Node{
parent: parent,
level: level,
index: index,
initial: index == 0,
isLeft: index%2 == 0,
}
}
// TreePool provides a pool of Trees used as resources by Hasher
// a Tree popped from the pool is guaranteed to have clean state
// for hashing a new chunk
// Hasher Reset releases the Tree to the pool
type TreePool struct {
lock sync.Mutex
c chan *Tree
hasher BaseHasher
SegmentSize int
SegmentCount int
Capacity int
count int
}
// NewTreePool creates a Tree pool with hasher, segment size, segment count and capacity
// on GetTree it reuses free Trees or creates a new one if size is not reached
func NewTreePool(hasher BaseHasher, segmentCount, capacity int) *TreePool {
return &TreePool{
c: make(chan *Tree, capacity),
hasher: hasher,
SegmentSize: hasher().Size(),
SegmentCount: segmentCount,
Capacity: capacity,
}
}
// Drain drains the pool uptil it has no more than n resources
func (self *TreePool) Drain(n int) {
self.lock.Lock()
defer self.lock.Unlock()
for len(self.c) > n {
<-self.c
self.count--
}
}
// Reserve is blocking until it returns an available Tree
// it reuses free Trees or creates a new one if size is not reached
func (self *TreePool) Reserve() *Tree {
self.lock.Lock()
defer self.lock.Unlock()
var t *Tree
if self.count == self.Capacity {
return <-self.c
}
select {
case t = <-self.c:
default:
t = NewTree(self.hasher, self.SegmentSize, self.SegmentCount)
self.count++
}
return t
}
// Release gives back a Tree to the pool.
// This Tree is guaranteed to be in reusable state
// does not need locking
func (self *TreePool) Release(t *Tree) {
self.c <- t // can never fail but...
}
// Tree is a reusable control structure representing a BMT
// organised in a binary tree
// Hasher uses a TreePool to pick one for each chunk hash
// the Tree is 'locked' while not in the pool
type Tree struct {
leaves []*Node
}
// Draw draws the BMT (badly)
func (self *Tree) Draw(hash []byte, d int) string {
var left, right []string
var anc []*Node
for i, n := range self.leaves {
left = append(left, fmt.Sprintf("%v", hashstr(n.left)))
if i%2 == 0 {
anc = append(anc, n.parent)
}
right = append(right, fmt.Sprintf("%v", hashstr(n.right)))
}
anc = self.leaves
var hashes [][]string
for l := 0; len(anc) > 0; l++ {
var nodes []*Node
hash := []string{""}
for i, n := range anc {
hash = append(hash, fmt.Sprintf("%v|%v", hashstr(n.left), hashstr(n.right)))
if i%2 == 0 && n.parent != nil {
nodes = append(nodes, n.parent)
}
}
hash = append(hash, "")
hashes = append(hashes, hash)
anc = nodes
}
hashes = append(hashes, []string{"", fmt.Sprintf("%v", hashstr(hash)), ""})
total := 60
del := " "
var rows []string
for i := len(hashes) - 1; i >= 0; i-- {
var textlen int
hash := hashes[i]
for _, s := range hash {
textlen += len(s)
}
if total < textlen {
total = textlen + len(hash)
}
delsize := (total - textlen) / (len(hash) - 1)
if delsize > len(del) {
delsize = len(del)
}
row := fmt.Sprintf("%v: %v", len(hashes)-i-1, strings.Join(hash, del[:delsize]))
rows = append(rows, row)
}
rows = append(rows, strings.Join(left, " "))
rows = append(rows, strings.Join(right, " "))
return strings.Join(rows, "\n") + "\n"
}
// NewTree initialises the Tree by building up the nodes of a BMT
// segment size is stipulated to be the size of the hash
// segmentCount needs to be positive integer and does not need to be
// a power of two and can even be an odd number
// segmentSize * segmentCount determines the maximum chunk size
// hashed using the tree
func NewTree(hasher BaseHasher, segmentSize, segmentCount int) *Tree {
n := NewNode(0, 0, nil)
n.root = true
prevlevel := []*Node{n}
// iterate over levels and creates 2^level nodes
level := 1
count := 2
for d := 1; d <= depth(segmentCount); d++ {
nodes := make([]*Node, count)
for i := 0; i < len(nodes); i++ {
var parent *Node
parent = prevlevel[i/2]
t := NewNode(level, i, parent)
nodes[i] = t
}
prevlevel = nodes
level++
count *= 2
}
// the datanode level is the nodes on the last level where
return &Tree{
leaves: prevlevel,
}
}
// methods needed by hash.Hash
// Size returns the size
func (self *Hasher) Size() int {
return self.size
}
// BlockSize returns the block size
func (self *Hasher) BlockSize() int {
return self.blocksize
}
// Sum returns the hash of the buffer
// hash.Hash interface Sum method appends the byte slice to the underlying
// data before it calculates and returns the hash of the chunk
func (self *Hasher) Sum(b []byte) (r []byte) {
t := self.bmt
i := self.cur
n := t.leaves[i]
j := i
// must run strictly before all nodes calculate
// datanodes are guaranteed to have a parent
if len(self.segment) > self.size && i > 0 && n.parent != nil {
n = n.parent
} else {
i *= 2
}
d := self.finalise(n, i)
self.writeSegment(j, self.segment, d)
c := <-self.result
self.releaseTree()
// sha3(length + BMT(pure_chunk))
if self.blockLength == nil {
return c
}
res := self.pool.hasher()
res.Reset()
res.Write(self.blockLength)
res.Write(c)
return res.Sum(nil)
}
// Hasher implements the SwarmHash interface
// Hash waits for the hasher result and returns it
// caller must call this on a BMT Hasher being written to
func (self *Hasher) Hash() []byte {
return <-self.result
}
// Hasher implements the io.Writer interface
// Write fills the buffer to hash
// with every full segment complete launches a hasher go routine
// that shoots up the BMT
func (self *Hasher) Write(b []byte) (int, error) {
l := len(b)
if l <= 0 {
return 0, nil
}
s := self.segment
i := self.cur
count := (self.count + 1) / 2
need := self.count*self.size - self.cur*2*self.size
size := self.size
if need > size {
size *= 2
}
if l < need {
need = l
}
// calculate missing bit to complete current open segment
rest := size - len(s)
if need < rest {
rest = need
}
s = append(s, b[:rest]...)
need -= rest
// read full segments and the last possibly partial segment
for need > 0 && i < count-1 {
// push all finished chunks we read
self.writeSegment(i, s, self.depth)
need -= size
if need < 0 {
size += need
}
s = b[rest : rest+size]
rest += size
i++
}
self.segment = s
self.cur = i
// otherwise, we can assume len(s) == 0, so all buffer is read and chunk is not yet full
return l, nil
}
// Hasher implements the io.ReaderFrom interface
// ReadFrom reads from io.Reader and appends to the data to hash using Write
// it reads so that chunk to hash is maximum length or reader reaches EOF
// caller must Reset the hasher prior to call
func (self *Hasher) ReadFrom(r io.Reader) (m int64, err error) {
bufsize := self.size*self.count - self.size*self.cur - len(self.segment)
buf := make([]byte, bufsize)
var read int
for {
var n int
n, err = r.Read(buf)
read += n
if err == io.EOF || read == len(buf) {
hash := self.Sum(buf[:n])
if read == len(buf) {
err = NewEOC(hash)
}
break
}
if err != nil {
break
}
n, err = self.Write(buf[:n])
if err != nil {
break
}
}
return int64(read), err
}
// Reset needs to be called before writing to the hasher
func (self *Hasher) Reset() {
self.getTree()
self.blockLength = nil
}
// Hasher implements the SwarmHash interface
// ResetWithLength needs to be called before writing to the hasher
// the argument is supposed to be the byte slice binary representation of
// the legth of the data subsumed under the hash
func (self *Hasher) ResetWithLength(l []byte) {
self.Reset()
self.blockLength = l
}
// Release gives back the Tree to the pool whereby it unlocks
// it resets tree, segment and index
func (self *Hasher) releaseTree() {
if self.bmt != nil {
n := self.bmt.leaves[self.cur]
for ; n != nil; n = n.parent {
n.unbalanced = false
if n.parent != nil {
n.root = false
}
}
self.pool.Release(self.bmt)
self.bmt = nil
}
self.cur = 0
self.segment = nil
}
func (self *Hasher) writeSegment(i int, s []byte, d int) {
h := self.pool.hasher()
n := self.bmt.leaves[i]
if len(s) > self.size && n.parent != nil {
go func() {
h.Reset()
h.Write(s)
s = h.Sum(nil)
if n.root {
self.result <- s
return
}
self.run(n.parent, h, d, n.index, s)
}()
return
}
go self.run(n, h, d, i*2, s)
}
func (self *Hasher) run(n *Node, h hash.Hash, d int, i int, s []byte) {
isLeft := i%2 == 0
for {
if isLeft {
n.left = s
} else {
n.right = s
}
if !n.unbalanced && n.toggle() {
return
}
if !n.unbalanced || !isLeft || i == 0 && d == 0 {
h.Reset()
h.Write(n.left)
h.Write(n.right)
s = h.Sum(nil)
} else {
s = append(n.left, n.right...)
}
self.hash = s
if n.root {
self.result <- s
return
}
isLeft = n.isLeft
n = n.parent
i++
}
}
// getTree obtains a BMT resource by reserving one from the pool
func (self *Hasher) getTree() *Tree {
if self.bmt != nil {
return self.bmt
}
t := self.pool.Reserve()
self.bmt = t
return t
}
// atomic bool toggle implementing a concurrent reusable 2-state object
// atomic addint with %2 implements atomic bool toggle
// it returns true if the toggler just put it in the active/waiting state
func (self *Node) toggle() bool {
return atomic.AddInt32(&self.state, 1)%2 == 1
}
func hashstr(b []byte) string {
end := len(b)
if end > 4 {
end = 4
}
return fmt.Sprintf("%x", b[:end])
}
func depth(n int) (d int) {
for l := (n - 1) / 2; l > 0; l /= 2 {
d++
}
return d
}
// finalise is following the zigzags on the tree belonging
// to the final datasegment
func (self *Hasher) finalise(n *Node, i int) (d int) {
isLeft := i%2 == 0
for {
// when the final segment's path is going via left segments
// the incoming data is pushed to the parent upon pulling the left
// we do not need toogle the state since this condition is
// detectable
n.unbalanced = isLeft
n.right = nil
if n.initial {
n.root = true
return d
}
isLeft = n.isLeft
n = n.parent
d++
}
}
// EOC (end of chunk) implements the error interface
type EOC struct {
Hash []byte // read the hash of the chunk off the error
}
// Error returns the error string
func (self *EOC) Error() string {
return fmt.Sprintf("hasher limit reached, chunk hash: %x", self.Hash)
}
// NewEOC creates new end of chunk error with the hash
func NewEOC(hash []byte) *EOC {
return &EOC{hash}
}

85
vendor/github.com/ethereum/go-ethereum/bmt/bmt_r.go generated vendored Normal file
View File

@ -0,0 +1,85 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// simple nonconcurrent reference implementation for hashsize segment based
// Binary Merkle tree hash on arbitrary but fixed maximum chunksize
//
// This implementation does not take advantage of any paralellisms and uses
// far more memory than necessary, but it is easy to see that it is correct.
// It can be used for generating test cases for optimized implementations.
// see testBMTHasherCorrectness function in bmt_test.go
package bmt
import (
"hash"
)
// RefHasher is the non-optimized easy to read reference implementation of BMT
type RefHasher struct {
span int
section int
cap int
h hash.Hash
}
// NewRefHasher returns a new RefHasher
func NewRefHasher(hasher BaseHasher, count int) *RefHasher {
h := hasher()
hashsize := h.Size()
maxsize := hashsize * count
c := 2
for ; c < count; c *= 2 {
}
if c > 2 {
c /= 2
}
return &RefHasher{
section: 2 * hashsize,
span: c * hashsize,
cap: maxsize,
h: h,
}
}
// Hash returns the BMT hash of the byte slice
// implements the SwarmHash interface
func (rh *RefHasher) Hash(d []byte) []byte {
if len(d) > rh.cap {
d = d[:rh.cap]
}
return rh.hash(d, rh.span)
}
func (rh *RefHasher) hash(d []byte, s int) []byte {
l := len(d)
left := d
var right []byte
if l > rh.section {
for ; s >= l; s /= 2 {
}
left = rh.hash(d[:s], s)
right = d[s:]
if l-s > rh.section/2 {
right = rh.hash(right, s)
}
}
defer rh.h.Reset()
rh.h.Write(left)
rh.h.Write(right)
h := rh.h.Sum(nil)
return h
}

View File

@ -21,18 +21,18 @@ variable which Travis CI makes available to certain builds.
We want to build go-ethereum with the most recent version of Go, irrespective of the Go
version that is available in the main Ubuntu repository. In order to make this possible,
our PPA depends on the ~gophers/ubuntu/archive PPA. Our source package build-depends on
golang-1.8, which is co-installable alongside the regular golang package. PPA dependencies
golang-1.9, which is co-installable alongside the regular golang package. PPA dependencies
can be edited at https://launchpad.net/%7Eethereum/+archive/ubuntu/ethereum/+edit-dependencies
## Building Packages Locally (for testing)
You need to run Ubuntu to do test packaging.
Add the gophers PPA and install Go 1.8 and Debian packaging tools:
Add the gophers PPA and install Go 1.9 and Debian packaging tools:
$ sudo apt-add-repository ppa:gophers/ubuntu/archive
$ sudo apt-get update
$ sudo apt-get install build-essential golang-1.8 devscripts debhelper
$ sudo apt-get install build-essential golang-1.9 devscripts debhelper
Create the source packages:

View File

@ -119,7 +119,8 @@ var (
// Distros for which packages are created.
// Note: vivid is unsupported because there is no golang-1.6 package for it.
// Note: wily is unsupported because it was officially deprecated on lanchpad.
debDistros = []string{"trusty", "xenial", "yakkety", "zesty"}
// Note: yakkety is unsupported because it was officially deprecated on lanchpad.
debDistros = []string{"trusty", "xenial", "zesty"}
)
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
@ -249,10 +250,7 @@ func goTool(subcmd string, args ...string) *exec.Cmd {
}
func goToolArch(arch string, subcmd string, args ...string) *exec.Cmd {
gocmd := filepath.Join(runtime.GOROOT(), "bin", "go")
cmd := exec.Command(gocmd, subcmd)
cmd.Args = append(cmd.Args, args...)
cmd := build.GoTool(subcmd, args...)
if subcmd == "build" || subcmd == "install" || subcmd == "test" {
// Go CGO has a Windows linker error prior to 1.8 (https://github.com/golang/go/issues/8756).
// Work around issue by allowing multiple definitions for <1.8 builds.

View File

@ -2,7 +2,7 @@ Source: {{.Name}}
Section: science
Priority: extra
Maintainer: {{.Author}}
Build-Depends: debhelper (>= 8.0.0), golang-1.8
Build-Depends: debhelper (>= 8.0.0), golang-1.9
Standards-Version: 3.9.5
Homepage: https://ethereum.org
Vcs-Git: git://github.com/ethereum/go-ethereum.git

View File

@ -5,7 +5,7 @@
#export DH_VERBOSE=1
override_dh_auto_build:
build/env.sh /usr/lib/go-1.8/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
build/env.sh /usr/lib/go-1.9/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
override_dh_auto_test:

View File

@ -46,8 +46,5 @@ func disasmCmd(ctx *cli.Context) error {
code := strings.TrimSpace(string(in[:]))
fmt.Printf("%v\n", code)
if err = asm.PrintDisassembled(code); err != nil {
return err
}
return nil
return asm.PrintDisassembled(code)
}

View File

@ -57,11 +57,15 @@ func (l *JSONLogger) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cos
}
// CaptureEnd is triggered at end of execution.
func (l *JSONLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration) error {
func (l *JSONLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) error {
type endLog struct {
Output string `json:"output"`
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
Time time.Duration `json:"time"`
Err string `json:"error,omitempty"`
}
return l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t})
if err != nil {
return l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t, err.Error()})
}
return l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t, ""})
}

View File

@ -53,7 +53,7 @@ var (
}
CodeFileFlag = cli.StringFlag{
Name: "codefile",
Usage: "file containing EVM code",
Usage: "File containing EVM code. If '-' is specified, code is read from stdin ",
}
GasFlag = cli.Uint64Flag{
Name: "gas",
@ -102,6 +102,10 @@ var (
Name: "sender",
Usage: "The transaction origin",
}
ReceiverFlag = cli.StringFlag{
Name: "receiver",
Usage: "The transaction receiver (execution context)",
}
DisableMemoryFlag = cli.BoolFlag{
Name: "nomemory",
Usage: "disable memory output",
@ -131,6 +135,7 @@ func init() {
GenesisFlag,
MachineFlag,
SenderFlag,
ReceiverFlag,
DisableMemoryFlag,
DisableStackFlag,
}
@ -138,6 +143,7 @@ func init() {
compileCommand,
disasmCommand,
runCommand,
stateTestCommand,
}
}

View File

@ -84,6 +84,7 @@ func runCmd(ctx *cli.Context) error {
statedb *state.StateDB
chainConfig *params.ChainConfig
sender = common.StringToAddress("sender")
receiver = common.StringToAddress("receiver")
)
if ctx.GlobalBool(MachineFlag.Name) {
tracer = NewJSONLogger(logconfig, os.Stdout)
@ -104,46 +105,52 @@ func runCmd(ctx *cli.Context) error {
if ctx.GlobalString(SenderFlag.Name) != "" {
sender = common.HexToAddress(ctx.GlobalString(SenderFlag.Name))
}
statedb.CreateAccount(sender)
if ctx.GlobalString(ReceiverFlag.Name) != "" {
receiver = common.HexToAddress(ctx.GlobalString(ReceiverFlag.Name))
}
var (
code []byte
ret []byte
err error
)
if fn := ctx.Args().First(); len(fn) > 0 {
// The '--code' or '--codefile' flag overrides code in state
if ctx.GlobalString(CodeFileFlag.Name) != "" {
var hexcode []byte
var err error
// If - is specified, it means that code comes from stdin
if ctx.GlobalString(CodeFileFlag.Name) == "-" {
//Try reading from stdin
if hexcode, err = ioutil.ReadAll(os.Stdin); err != nil {
fmt.Printf("Could not load code from stdin: %v\n", err)
os.Exit(1)
}
} else {
// Codefile with hex assembly
if hexcode, err = ioutil.ReadFile(ctx.GlobalString(CodeFileFlag.Name)); err != nil {
fmt.Printf("Could not load code from file: %v\n", err)
os.Exit(1)
}
}
code = common.Hex2Bytes(string(bytes.TrimRight(hexcode, "\n")))
} else if ctx.GlobalString(CodeFlag.Name) != "" {
code = common.Hex2Bytes(ctx.GlobalString(CodeFlag.Name))
} else if fn := ctx.Args().First(); len(fn) > 0 {
// EASM-file to compile
src, err := ioutil.ReadFile(fn)
if err != nil {
return err
}
bin, err := compiler.Compile(fn, src, false)
if err != nil {
return err
}
code = common.Hex2Bytes(bin)
} else if ctx.GlobalString(CodeFlag.Name) != "" {
code = common.Hex2Bytes(ctx.GlobalString(CodeFlag.Name))
} else {
var hexcode []byte
if ctx.GlobalString(CodeFileFlag.Name) != "" {
var err error
hexcode, err = ioutil.ReadFile(ctx.GlobalString(CodeFileFlag.Name))
if err != nil {
fmt.Printf("Could not load code from file: %v\n", err)
os.Exit(1)
}
} else {
var err error
hexcode, err = ioutil.ReadAll(os.Stdin)
if err != nil {
fmt.Printf("Could not load code from stdin: %v\n", err)
os.Exit(1)
}
}
code = common.Hex2Bytes(string(bytes.TrimRight(hexcode, "\n")))
}
initialGas := ctx.GlobalUint64(GasFlag.Name)
runtimeConfig := runtime.Config{
Origin: sender,
@ -180,9 +187,9 @@ func runCmd(ctx *cli.Context) error {
input := append(code, common.Hex2Bytes(ctx.GlobalString(InputFlag.Name))...)
ret, _, leftOverGas, err = runtime.Create(input, &runtimeConfig)
} else {
receiver := common.StringToAddress("receiver")
statedb.SetCode(receiver, code)
if len(code) > 0 {
statedb.SetCode(receiver, code)
}
ret, leftOverGas, err = runtime.Call(receiver, common.Hex2Bytes(ctx.GlobalString(InputFlag.Name)), &runtimeConfig)
}
execTime := time.Since(tstart)
@ -227,13 +234,13 @@ Gas used: %d
`, execTime, mem.HeapObjects, mem.Alloc, mem.TotalAlloc, mem.NumGC, initialGas-leftOverGas)
}
if tracer != nil {
tracer.CaptureEnd(ret, initialGas-leftOverGas, execTime)
tracer.CaptureEnd(ret, initialGas-leftOverGas, execTime, err)
} else {
fmt.Printf("0x%x\n", ret)
if err != nil {
fmt.Printf(" error: %v\n", err)
}
}
if err != nil {
fmt.Printf(" error: %v\n", err)
}
return nil
}

View File

@ -0,0 +1,119 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/tests"
cli "gopkg.in/urfave/cli.v1"
)
var stateTestCommand = cli.Command{
Action: stateTestCmd,
Name: "statetest",
Usage: "executes the given state tests",
ArgsUsage: "<file>",
}
type StatetestResult struct {
Name string `json:"name"`
Pass bool `json:"pass"`
Fork string `json:"fork"`
Error string `json:"error,omitempty"`
State *state.Dump `json:"state,omitempty"`
}
func stateTestCmd(ctx *cli.Context) error {
if len(ctx.Args().First()) == 0 {
return errors.New("path-to-test argument required")
}
// Configure the go-ethereum logger
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name)))
log.Root().SetHandler(glogger)
// Configure the EVM logger
config := &vm.LogConfig{
DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name),
DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
}
var (
tracer vm.Tracer
debugger *vm.StructLogger
)
switch {
case ctx.GlobalBool(MachineFlag.Name):
tracer = NewJSONLogger(config, os.Stderr)
case ctx.GlobalBool(DebugFlag.Name):
debugger = vm.NewStructLogger(config)
tracer = debugger
default:
debugger = vm.NewStructLogger(config)
}
// Load the test content from the input file
src, err := ioutil.ReadFile(ctx.Args().First())
if err != nil {
return err
}
var tests map[string]tests.StateTest
if err = json.Unmarshal(src, &tests); err != nil {
return err
}
// Iterate over all the tests, run them and aggregate the results
cfg := vm.Config{
Tracer: tracer,
Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name),
}
results := make([]StatetestResult, 0, len(tests))
for key, test := range tests {
for _, st := range test.Subtests() {
// Run the test and aggregate the result
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
if state, err := test.Run(st, cfg); err != nil {
// Test failed, mark as so and dump any state to aid debugging
result.Pass, result.Error = false, err.Error()
if ctx.GlobalBool(DumpFlag.Name) && state != nil {
dump := state.RawDump()
result.State = &dump
}
}
results = append(results, *result)
// Print any structured logs collected
if ctx.GlobalBool(DebugFlag.Name) {
if debugger != nil {
fmt.Fprintln(os.Stderr, "#### TRACE ####")
vm.WriteTrace(os.Stderr, debugger.StructLogs())
}
}
}
}
out, _ := json.MarshalIndent(results, "", " ")
fmt.Println(string(out))
return nil
}

View File

@ -413,8 +413,9 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
// Iterate over all the files and look for Ethereum addresses
var address common.Address
for _, file := range gist.Files {
if len(file.Content) == 2+common.AddressLength*2 {
address = common.HexToAddress(file.Content)
content := strings.TrimSpace(file.Content)
if len(content) == 2+common.AddressLength*2 {
address = common.HexToAddress(content)
}
}
if address == (common.Address{}) {

View File

@ -263,23 +263,6 @@ func getPassPhrase(prompt string, confirmation bool, i int, passwords []string)
return password
}
// getWhisperYesNo retrieves an indication of whether or not the user wants the created
// account to also be enabled as a whisper identity
func getWhisperYesNo(prompt string) bool {
// prompt the user for the whisper preference
if prompt != "" {
fmt.Println(prompt)
}
shhRes, err := console.Stdin.PromptConfirm("Enable the new account as a Whisper Identity?")
if err != nil {
utils.Fatalf("Failed to read response: %v", err)
}
return shhRes
}
func ambiguousAddrRecovery(ks *keystore.KeyStore, err *keystore.AmbiguousAddrError, auth string) accounts.Account {
fmt.Printf("Multiple key files exist for address %x:\n", err.Addr)
for _, a := range err.Matches {
@ -310,10 +293,9 @@ func ambiguousAddrRecovery(ks *keystore.KeyStore, err *keystore.AmbiguousAddrErr
func accountCreate(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
password := getPassPhrase("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx))
whisperEnabled := getWhisperYesNo("You can also choose to enable your new account as a Whisper identity.")
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
account, err := ks.NewAccount(password, whisperEnabled)
account, err := ks.NewAccount(password)
if err != nil {
utils.Fatalf("Failed to create account: %v", err)
}

View File

@ -21,6 +21,7 @@ import (
"fmt"
"os"
"runtime"
"sort"
"strings"
"time"
@ -67,6 +68,8 @@ var (
utils.EthashDatasetsInMemoryFlag,
utils.EthashDatasetsOnDiskFlag,
utils.TxPoolNoLocalsFlag,
utils.TxPoolJournalFlag,
utils.TxPoolRejournalFlag,
utils.TxPoolPriceLimitFlag,
utils.TxPoolPriceBumpFlag,
utils.TxPoolAccountSlotsFlag,
@ -155,6 +158,7 @@ func init() {
attachCommand,
javascriptCommand,
// See misccmd.go:
makecacheCommand,
makedagCommand,
versionCommand,
bugCommand,
@ -162,6 +166,7 @@ func init() {
// See config.go
dumpConfigCommand,
}
sort.Sort(cli.CommandsByName(app.Commands))
app.Flags = append(app.Flags, nodeFlags...)
app.Flags = append(app.Flags, rpcFlags...)
@ -234,24 +239,30 @@ func startNode(ctx *cli.Context, stack *node.Node) {
}
stateReader := ethclient.NewClient(rpcClient)
// Open and self derive any wallets already attached
// Open any wallets already attached
for _, wallet := range stack.AccountManager().Wallets() {
if err := wallet.Open(""); err != nil {
log.Warn("Failed to open wallet", "url", wallet.URL(), "err", err)
} else {
wallet.SelfDerive(accounts.DefaultBaseDerivationPath, stateReader)
}
}
// Listen for wallet event till termination
for event := range events {
if event.Arrive {
switch event.Kind {
case accounts.WalletArrived:
if err := event.Wallet.Open(""); err != nil {
log.Warn("New wallet appeared, failed to open", "url", event.Wallet.URL(), "err", err)
}
case accounts.WalletOpened:
status, _ := event.Wallet.Status()
log.Info("New wallet appeared", "url", event.Wallet.URL(), "status", status)
if event.Wallet.URL().Scheme == "ledger" {
event.Wallet.SelfDerive(accounts.DefaultLedgerBaseDerivationPath, stateReader)
} else {
log.Info("New wallet appeared", "url", event.Wallet.URL(), "status", event.Wallet.Status())
event.Wallet.SelfDerive(accounts.DefaultBaseDerivationPath, stateReader)
}
} else {
case accounts.WalletDropped:
log.Info("Old wallet dropped", "url", event.Wallet.URL())
event.Wallet.Close()
}

View File

@ -18,9 +18,7 @@ package main
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
@ -33,14 +31,27 @@ import (
)
var (
makedagCommand = cli.Command{
Action: utils.MigrateFlags(makedag),
Name: "makedag",
Usage: "Generate ethash DAG (for testing)",
makecacheCommand = cli.Command{
Action: utils.MigrateFlags(makecache),
Name: "makecache",
Usage: "Generate ethash verification cache (for testing)",
ArgsUsage: "<blockNum> <outputDir>",
Category: "MISCELLANEOUS COMMANDS",
Description: `
The makedag command generates an ethash DAG in /tmp/dag.
The makecache command generates an ethash cache in <outputDir>.
This command exists to support the system testing project.
Regular users do not need to execute it.
`,
}
makedagCommand = cli.Command{
Action: utils.MigrateFlags(makedag),
Name: "makedag",
Usage: "Generate ethash mining DAG (for testing)",
ArgsUsage: "<blockNum> <outputDir>",
Category: "MISCELLANEOUS COMMANDS",
Description: `
The makedag command generates an ethash DAG in <outputDir>.
This command exists to support the system testing project.
Regular users do not need to execute it.
@ -65,33 +76,33 @@ The output of this command is supposed to be machine-readable.
}
)
// makecache generates an ethash verification cache into the provided folder.
func makecache(ctx *cli.Context) error {
args := ctx.Args()
if len(args) != 2 {
utils.Fatalf(`Usage: geth makecache <block number> <outputdir>`)
}
block, err := strconv.ParseUint(args[0], 0, 64)
if err != nil {
utils.Fatalf("Invalid block number: %v", err)
}
ethash.MakeCache(block, args[1])
return nil
}
// makedag generates an ethash mining DAG into the provided folder.
func makedag(ctx *cli.Context) error {
args := ctx.Args()
wrongArgs := func() {
if len(args) != 2 {
utils.Fatalf(`Usage: geth makedag <block number> <outputdir>`)
}
switch {
case len(args) == 2:
blockNum, err := strconv.ParseUint(args[0], 0, 64)
dir := args[1]
if err != nil {
wrongArgs()
} else {
dir = filepath.Clean(dir)
// seems to require a trailing slash
if !strings.HasSuffix(dir, "/") {
dir = dir + "/"
}
_, err = ioutil.ReadDir(dir)
if err != nil {
utils.Fatalf("Can't find dir")
}
fmt.Println("making DAG, this could take awhile...")
ethash.MakeDataset(blockNum, dir)
}
default:
wrongArgs()
block, err := strconv.ParseUint(args[0], 0, 64)
if err != nil {
utils.Fatalf("Invalid block number: %v", err)
}
ethash.MakeDataset(block, args[1])
return nil
}

View File

@ -96,6 +96,8 @@ var AppHelpFlagGroups = []flagGroup{
Name: "TRANSACTION POOL",
Flags: []cli.Flag{
utils.TxPoolNoLocalsFlag,
utils.TxPoolJournalFlag,
utils.TxPoolRejournalFlag,
utils.TxPoolPriceLimitFlag,
utils.TxPoolPriceBumpFlag,
utils.TxPoolAccountSlotsFlag,

View File

@ -425,6 +425,11 @@ services:
- "{{.Port}}:80"{{else}}
environment:
- VIRTUAL_HOST={{.VHost}}{{end}}
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "10"
restart: always
`

View File

@ -42,7 +42,7 @@ RUN \
WORKDIR /eth-netstats
EXPOSE 3000
RUN echo 'module.exports = {trusted: [{{.Trusted}}], banned: []};' > lib/utils/config.js
RUN echo 'module.exports = {trusted: [{{.Trusted}}], banned: [{{.Banned}}], reserved: ["yournode"]};' > lib/utils/config.js
CMD ["npm", "start"]
`
@ -59,25 +59,37 @@ services:
- "{{.Port}}:3000"{{end}}
environment:
- WS_SECRET={{.Secret}}{{if .VHost}}
- VIRTUAL_HOST={{.VHost}}{{end}}
- VIRTUAL_HOST={{.VHost}}{{end}}{{if .Banned}}
- BANNED={{.Banned}}{{end}}
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "10"
restart: always
`
// deployEthstats deploys a new ethstats container to a remote machine via SSH,
// docker and docker-compose. If an instance with the specified network name
// already exists there, it will be overwritten!
func deployEthstats(client *sshClient, network string, port int, secret string, vhost string, trusted []string) ([]byte, error) {
func deployEthstats(client *sshClient, network string, port int, secret string, vhost string, trusted []string, banned []string) ([]byte, error) {
// Generate the content to upload to the server
workdir := fmt.Sprintf("%d", rand.Int63())
files := make(map[string][]byte)
trustedLabels := make([]string, len(trusted))
for i, address := range trusted {
trusted[i] = fmt.Sprintf("\"%s\"", address)
trustedLabels[i] = fmt.Sprintf("\"%s\"", address)
}
bannedLabels := make([]string, len(banned))
for i, address := range banned {
bannedLabels[i] = fmt.Sprintf("\"%s\"", address)
}
dockerfile := new(bytes.Buffer)
template.Must(template.New("").Parse(ethstatsDockerfile)).Execute(dockerfile, map[string]interface{}{
"Trusted": strings.Join(trusted, ", "),
"Trusted": strings.Join(trustedLabels, ", "),
"Banned": strings.Join(bannedLabels, ", "),
})
files[filepath.Join(workdir, "Dockerfile")] = dockerfile.Bytes()
@ -87,6 +99,7 @@ func deployEthstats(client *sshClient, network string, port int, secret string,
"Port": port,
"Secret": secret,
"VHost": vhost,
"Banned": strings.Join(banned, ","),
})
files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes()
@ -107,11 +120,12 @@ type ethstatsInfos struct {
port int
secret string
config string
banned []string
}
// String implements the stringer interface.
func (info *ethstatsInfos) String() string {
return fmt.Sprintf("host=%s, port=%d, secret=%s", info.host, info.port, info.secret)
return fmt.Sprintf("host=%s, port=%d, secret=%s, banned=%v", info.host, info.port, info.secret, info.banned)
}
// checkEthstats does a health-check against an ethstats server to verify whether
@ -145,6 +159,9 @@ func checkEthstats(client *sshClient, network string) (*ethstatsInfos, error) {
if port != 80 && port != 443 {
config += fmt.Sprintf(":%d", port)
}
// Retrieve the IP blacklist
banned := strings.Split(infos.envvars["BANNED"], ",")
// Run a sanity check to see if the port is reachable
if err = checkPort(host, port); err != nil {
log.Warn("Ethstats service seems unreachable", "server", host, "port", port, "err", err)
@ -155,5 +172,6 @@ func checkEthstats(client *sshClient, network string) (*ethstatsInfos, error) {
port: port,
secret: secret,
config: config,
banned: banned,
}, nil
}

View File

@ -82,6 +82,11 @@ services:
- CAPTCHA_SECRET={{.CaptchaSecret}}{{if .VHost}}
- VIRTUAL_HOST={{.VHost}}
- VIRTUAL_PORT=8080{{end}}
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "10"
restart: always
`

View File

@ -43,6 +43,11 @@ services:
- "{{.Port}}:80"
volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "10"
restart: always
`

View File

@ -30,7 +30,7 @@ import (
// nodeDockerfile is the Dockerfile required to run an Ethereum node.
var nodeDockerfile = `
FROM ethereum/client-go:alpine-develop
FROM ethereum/client-go:latest
ADD genesis.json /genesis.json
{{if .Unlock}}
@ -38,9 +38,9 @@ ADD genesis.json /genesis.json
ADD signer.pass /signer.pass
{{end}}
RUN \
echo '/geth init /genesis.json' > geth.sh && \{{if .Unlock}}
echo 'geth init /genesis.json' > geth.sh && \{{if .Unlock}}
echo 'mkdir -p /root/.ethereum/keystore/ && cp /signer.json /root/.ethereum/keystore/' >> geth.sh && \{{end}}
echo $'/geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .BootV4}}--bootnodesv4 {{.BootV4}}{{end}} {{if .BootV5}}--bootnodesv5 {{.BootV5}}{{end}} {{if .Etherbase}}--etherbase {{.Etherbase}} --mine{{end}}{{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --targetgaslimit {{.GasTarget}} --gasprice {{.GasPrice}}' >> geth.sh
echo $'geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .BootV4}}--bootnodesv4 {{.BootV4}}{{end}} {{if .BootV5}}--bootnodesv5 {{.BootV5}}{{end}} {{if .Etherbase}}--etherbase {{.Etherbase}} --mine{{end}}{{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --targetgaslimit {{.GasTarget}} --gasprice {{.GasPrice}}' >> geth.sh
ENTRYPOINT ["/bin/sh", "geth.sh"]
`
@ -68,6 +68,11 @@ services:
- MINER_NAME={{.Etherbase}}
- GAS_TARGET={{.GasTarget}}
- GAS_PRICE={{.GasPrice}}
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "10"
restart: always
`
@ -192,7 +197,7 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
// Container available, retrieve its node ID and its genesis json
var out []byte
if out, err = client.Run(fmt.Sprintf("docker exec %s_%s_1 /geth --exec admin.nodeInfo.id attach", network, kind)); err != nil {
if out, err = client.Run(fmt.Sprintf("docker exec %s_%s_1 geth --exec admin.nodeInfo.id attach", network, kind)); err != nil {
return nil, ErrServiceUnreachable
}
id := bytes.Trim(bytes.TrimSpace(out), "\"")

View File

@ -122,7 +122,7 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
}
}
// If a public key exists for this SSH server, check that it matches
if bytes.Compare(pubkey, key.Marshal()) == 0 {
if bytes.Equal(pubkey, key.Marshal()) {
return nil
}
// We have a mismatch, forbid connecting

View File

@ -22,6 +22,7 @@ import (
"fmt"
"io/ioutil"
"math/big"
"net"
"os"
"path/filepath"
"sort"
@ -106,17 +107,15 @@ func (w *wizard) readString() string {
// readDefaultString reads a single line from stdin, trimming if from spaces. If
// an empty line is entered, the default value is returned.
func (w *wizard) readDefaultString(def string) string {
for {
fmt.Printf("> ")
text, err := w.in.ReadString('\n')
if err != nil {
log.Crit("Failed to read user input", "err", err)
}
if text = strings.TrimSpace(text); text != "" {
return text
}
return def
fmt.Printf("> ")
text, err := w.in.ReadString('\n')
if err != nil {
log.Crit("Failed to read user input", "err", err)
}
if text = strings.TrimSpace(text); text != "" {
return text
}
return def
}
// readInt reads a single line from stdin, trimming if from spaces, enforcing it
@ -162,6 +161,7 @@ func (w *wizard) readDefaultInt(def int) int {
}
}
/*
// readFloat reads a single line from stdin, trimming if from spaces, enforcing it
// to parse into a float.
func (w *wizard) readFloat() float64 {
@ -182,6 +182,7 @@ func (w *wizard) readFloat() float64 {
return val
}
}
*/
// readDefaultFloat reads a single line from stdin, trimming if from spaces, enforcing
// it to parse into a float. If an empty line is entered, the default value is returned.
@ -207,15 +208,13 @@ func (w *wizard) readDefaultFloat(def float64) float64 {
// readPassword reads a single line from stdin, trimming it from the trailing new
// line and returns it. The input will not be echoed.
func (w *wizard) readPassword() string {
for {
fmt.Printf("> ")
text, err := terminal.ReadPassword(int(syscall.Stdin))
if err != nil {
log.Crit("Failed to read password", "err", err)
}
fmt.Println()
return string(text)
fmt.Printf("> ")
text, err := terminal.ReadPassword(int(syscall.Stdin))
if err != nil {
log.Crit("Failed to read password", "err", err)
}
fmt.Println()
return string(text)
}
// readAddress reads a single line from stdin, trimming if from spaces and converts
@ -279,3 +278,26 @@ func (w *wizard) readJSON() string {
return string(blob)
}
}
// readIPAddress reads a single line from stdin, trimming if from spaces and
// converts it to a network IP address.
func (w *wizard) readIPAddress() net.IP {
for {
// Read the IP address from the user
fmt.Printf("> ")
text, err := w.in.ReadString('\n')
if err != nil {
log.Crit("Failed to read user input", "err", err)
}
if text = strings.TrimSpace(text); text == "" {
return nil
}
// Make sure it looks ok and return it if so
ip := net.ParseIP(text)
if ip == nil {
log.Error("Invalid IP address, please retry")
continue
}
return ip
}
}

View File

@ -60,6 +60,22 @@ func (w *wizard) deployEthstats() {
fmt.Printf("What should be the secret password for the API? (default = %s)\n", infos.secret)
infos.secret = w.readDefaultString(infos.secret)
}
// Gather any blacklists to ban from reporting
fmt.Println()
fmt.Printf("Keep existing IP %v blacklist (y/n)? (default = yes)\n", infos.banned)
if w.readDefaultString("y") != "y" {
infos.banned = nil
fmt.Println()
fmt.Println("Which IP addresses should be blacklisted?")
for {
if ip := w.readIPAddress(); ip != nil {
infos.banned = append(infos.banned, ip.String())
continue
}
break
}
}
// Try to deploy the ethstats server on the host
trusted := make([]string, 0, len(w.servers))
for _, client := range w.servers {
@ -67,7 +83,7 @@ func (w *wizard) deployEthstats() {
trusted = append(trusted, client.address)
}
}
if out, err := deployEthstats(client, w.network, infos.port, infos.secret, infos.host, trusted); err != nil {
if out, err := deployEthstats(client, w.network, infos.port, infos.secret, infos.host, trusted, infos.banned); err != nil {
log.Error("Failed to deploy ethstats container", "err", err)
if len(out) > 0 {
fmt.Printf("%s\n", out)

View File

@ -71,22 +71,20 @@ func (w *wizard) makeServer() string {
fmt.Println()
fmt.Println("Please enter remote server's address:")
for {
// Read and fial the server to ensure docker is present
input := w.readString()
// Read and fial the server to ensure docker is present
input := w.readString()
client, err := dial(input, nil)
if err != nil {
log.Error("Server not ready for puppeth", "err", err)
return ""
}
// All checks passed, start tracking the server
w.servers[input] = client
w.conf.Servers[input] = client.pubkey
w.conf.flush()
return input
client, err := dial(input, nil)
if err != nil {
log.Error("Server not ready for puppeth", "err", err)
return ""
}
// All checks passed, start tracking the server
w.servers[input] = client
w.conf.Servers[input] = client.pubkey
w.conf.flush()
return input
}
// selectServer lists the user all the currnetly known servers to choose from,

View File

@ -1,38 +0,0 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/swarm/storage"
"gopkg.in/urfave/cli.v1"
)
func cleandb(ctx *cli.Context) {
args := ctx.Args()
if len(args) != 1 {
utils.Fatalf("Need path to chunks database as the first and only argument")
}
chunkDbPath := args[0]
hash := storage.MakeHashFunc("SHA3")
dbStore, err := storage.NewDbStore(chunkDbPath, hash, 10000000, 0)
if err != nil {
utils.Fatalf("Cannot initialise dbstore: %v", err)
}
dbStore.Cleanup()
}

116
vendor/github.com/ethereum/go-ethereum/cmd/swarm/db.go generated vendored Normal file
View File

@ -0,0 +1,116 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"fmt"
"io"
"os"
"path/filepath"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/storage"
"gopkg.in/urfave/cli.v1"
)
func dbExport(ctx *cli.Context) {
args := ctx.Args()
if len(args) != 2 {
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database) and <file> (path to write the tar archive to, - for stdout)")
}
store, err := openDbStore(args[0])
if err != nil {
utils.Fatalf("error opening local chunk database: %s", err)
}
defer store.Close()
var out io.Writer
if args[1] == "-" {
out = os.Stdout
} else {
f, err := os.Create(args[1])
if err != nil {
utils.Fatalf("error opening output file: %s", err)
}
defer f.Close()
out = f
}
count, err := store.Export(out)
if err != nil {
utils.Fatalf("error exporting local chunk database: %s", err)
}
log.Info(fmt.Sprintf("successfully exported %d chunks", count))
}
func dbImport(ctx *cli.Context) {
args := ctx.Args()
if len(args) != 2 {
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database) and <file> (path to read the tar archive from, - for stdin)")
}
store, err := openDbStore(args[0])
if err != nil {
utils.Fatalf("error opening local chunk database: %s", err)
}
defer store.Close()
var in io.Reader
if args[1] == "-" {
in = os.Stdin
} else {
f, err := os.Open(args[1])
if err != nil {
utils.Fatalf("error opening input file: %s", err)
}
defer f.Close()
in = f
}
count, err := store.Import(in)
if err != nil {
utils.Fatalf("error importing local chunk database: %s", err)
}
log.Info(fmt.Sprintf("successfully imported %d chunks", count))
}
func dbClean(ctx *cli.Context) {
args := ctx.Args()
if len(args) != 1 {
utils.Fatalf("invalid arguments, please specify <chunkdb> (path to a local chunk database)")
}
store, err := openDbStore(args[0])
if err != nil {
utils.Fatalf("error opening local chunk database: %s", err)
}
defer store.Close()
store.Cleanup()
}
func openDbStore(path string) (*storage.DbStore, error) {
if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil {
return nil, fmt.Errorf("invalid chunkdb path: %s", err)
}
hash := storage.MakeHashFunc("SHA3")
return storage.NewDbStore(path, hash, 10000000, 0)
}

View File

@ -25,6 +25,7 @@ import (
"os"
"os/signal"
"runtime"
"sort"
"strconv"
"strings"
"syscall"
@ -240,15 +241,69 @@ Removes a path from the manifest
},
},
{
Action: cleandb,
Name: "db",
Usage: "manage the local chunk database",
ArgsUsage: "db COMMAND",
Description: `
Manage the local chunk database.
`,
Subcommands: []cli.Command{
{
Action: dbExport,
Name: "export",
Usage: "export a local chunk database as a tar archive (use - to send to stdout)",
ArgsUsage: "<chunkdb> <file>",
Description: `
Export a local chunk database as a tar archive (use - to send to stdout).
swarm db export ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar
The export may be quite large, consider piping the output through the Unix
pv(1) tool to get a progress bar:
swarm db export ~/.ethereum/swarm/bzz-KEY/chunks - | pv > chunks.tar
`,
},
{
Action: dbImport,
Name: "import",
Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)",
ArgsUsage: "<chunkdb> <file>",
Description: `
Import chunks from a tar archive into a local chunk database (use - to read from stdin).
swarm db import ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar
The import may be quite large, consider piping the input through the Unix
pv(1) tool to get a progress bar:
pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -
`,
},
{
Action: dbClean,
Name: "clean",
Usage: "remove corrupt entries from a local chunk database",
ArgsUsage: "<chunkdb>",
Description: `
Remove corrupt entries from a local chunk database.
`,
},
},
},
{
Action: func(ctx *cli.Context) {
utils.Fatalf("ERROR: 'swarm cleandb' has been removed, please use 'swarm db clean'.")
},
Name: "cleandb",
Usage: "Cleans database of corrupted entries",
Usage: "DEPRECATED: use 'swarm db clean'",
ArgsUsage: " ",
Description: `
Cleans database of corrupted entries.
DEPRECATED: use 'swarm db clean'.
`,
},
}
sort.Sort(cli.CommandsByName(app.Commands))
app.Flags = []cli.Flag{
utils.IdentityFlag,

View File

@ -164,7 +164,7 @@ func ImportChain(chain *core.BlockChain, fn string) error {
func hasAllBlocks(chain *core.BlockChain, bs []*types.Block) bool {
for _, b := range bs {
if !chain.HasBlock(b.Hash()) {
if !chain.HasBlock(b.Hash(), b.NumberU64()) {
return false
}
}

View File

@ -41,7 +41,6 @@ import (
"github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethstats"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/les"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
@ -121,7 +120,7 @@ var (
}
NoUSBFlag = cli.BoolFlag{
Name: "nousb",
Usage: "Disables monitoring for and managine USB hardware wallets",
Usage: "Disables monitoring for and managing USB hardware wallets",
}
NetworkIdFlag = cli.Uint64Flag{
Name: "networkid",
@ -213,6 +212,16 @@ var (
Name: "txpool.nolocals",
Usage: "Disables price exemptions for locally submitted transactions",
}
TxPoolJournalFlag = cli.StringFlag{
Name: "txpool.journal",
Usage: "Disk journal for local transaction to survive node restarts",
Value: core.DefaultTxPoolConfig.Journal,
}
TxPoolRejournalFlag = cli.DurationFlag{
Name: "txpool.rejournal",
Usage: "Time interval to regenerate the local transaction journal",
Value: core.DefaultTxPoolConfig.Rejournal,
}
TxPoolPriceLimitFlag = cli.Uint64Flag{
Name: "txpool.pricelimit",
Usage: "Minimum gas price limit to enforce for acceptance into the pool",
@ -838,6 +847,12 @@ func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) {
if ctx.GlobalIsSet(TxPoolNoLocalsFlag.Name) {
cfg.NoLocals = ctx.GlobalBool(TxPoolNoLocalsFlag.Name)
}
if ctx.GlobalIsSet(TxPoolJournalFlag.Name) {
cfg.Journal = ctx.GlobalString(TxPoolJournalFlag.Name)
}
if ctx.GlobalIsSet(TxPoolRejournalFlag.Name) {
cfg.Rejournal = ctx.GlobalDuration(TxPoolRejournalFlag.Name)
}
if ctx.GlobalIsSet(TxPoolPriceLimitFlag.Name) {
cfg.PriceLimit = ctx.GlobalUint64(TxPoolPriceLimitFlag.Name)
}
@ -934,10 +949,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
cfg.NetworkId = ctx.GlobalUint64(NetworkIdFlag.Name)
}
// Ethereum needs to know maxPeers to calculate the light server peer ratio.
// TODO(fjl): ensure Ethereum can get MaxPeers from node.
cfg.MaxPeers = ctx.GlobalInt(MaxPeersFlag.Name)
if ctx.GlobalIsSet(CacheFlag.Name) {
cfg.DatabaseCache = ctx.GlobalInt(CacheFlag.Name)
}
@ -1077,14 +1088,17 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
engine := ethash.NewFaker()
if !ctx.GlobalBool(FakePoWFlag.Name) {
engine = ethash.New("", 1, 0, "", 1, 0)
engine = ethash.New(
stack.ResolvePath(eth.DefaultConfig.EthashCacheDir), eth.DefaultConfig.EthashCachesInMem, eth.DefaultConfig.EthashCachesOnDisk,
stack.ResolvePath(eth.DefaultConfig.EthashDatasetDir), eth.DefaultConfig.EthashDatasetsInMem, eth.DefaultConfig.EthashDatasetsOnDisk,
)
}
config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx))
if err != nil {
Fatalf("%v", err)
}
vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)}
chain, err = core.NewBlockChain(chainDb, config, engine, new(event.TypeMux), vmcfg)
chain, err = core.NewBlockChain(chainDb, config, engine, vmcfg)
if err != nil {
Fatalf("Can't create BlockChain: %v", err)
}

View File

@ -47,6 +47,9 @@ func FromHex(s string) []byte {
//
// Returns an exact copy of the provided bytes
func CopyBytes(b []byte) (copiedBytes []byte) {
if b == nil {
return nil
}
copiedBytes = make([]byte, len(b))
copy(copiedBytes, b)

View File

@ -26,11 +26,10 @@ import (
)
var (
textZero = []byte(`0x0`)
bytesT = reflect.TypeOf(Bytes(nil))
bigT = reflect.TypeOf((*Big)(nil))
uintT = reflect.TypeOf(Uint(0))
uint64T = reflect.TypeOf(Uint64(0))
bytesT = reflect.TypeOf(Bytes(nil))
bigT = reflect.TypeOf((*Big)(nil))
uintT = reflect.TypeOf(Uint(0))
uint64T = reflect.TypeOf(Uint64(0))
)
// Bytes marshals/unmarshals as a JSON string with 0x prefix.

View File

@ -24,6 +24,7 @@ import (
"reflect"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto/sha3"
)
const (
@ -87,7 +88,7 @@ func (h Hash) MarshalText() ([]byte, error) {
return hexutil.Bytes(h[:]).MarshalText()
}
// Sets the hash to the value of b. If b is larger than len(h) it will panic
// Sets the hash to the value of b. If b is larger than len(h), 'b' will be cropped (from the left).
func (h *Hash) SetBytes(b []byte) {
if len(b) > len(h) {
b = b[len(b)-HashLength:]
@ -96,7 +97,7 @@ func (h *Hash) SetBytes(b []byte) {
copy(h[HashLength-len(b):], b)
}
// Set string `s` to h. If s is larger than len(h) it will panic
// Set string `s` to h. If s is larger than len(h) s will be cropped (from left) to fit.
func (h *Hash) SetString(s string) { h.SetBytes([]byte(s)) }
// Sets h to other
@ -163,7 +164,28 @@ func (a Address) Str() string { return string(a[:]) }
func (a Address) Bytes() []byte { return a[:] }
func (a Address) Big() *big.Int { return new(big.Int).SetBytes(a[:]) }
func (a Address) Hash() Hash { return BytesToHash(a[:]) }
func (a Address) Hex() string { return hexutil.Encode(a[:]) }
// Hex returns an EIP55-compliant hex string representation of the address.
func (a Address) Hex() string {
unchecksummed := hex.EncodeToString(a[:])
sha := sha3.NewKeccak256()
sha.Write([]byte(unchecksummed))
hash := sha.Sum(nil)
result := []byte(unchecksummed)
for i := 0; i < len(result); i++ {
hashByte := hash[i/2]
if i%2 == 0 {
hashByte = hashByte >> 4
} else {
hashByte &= 0xf
}
if result[i] > '9' && hashByte > 7 {
result[i] -= 32
}
}
return "0x" + string(result)
}
// String implements the stringer interface and is used also by the logger.
func (a Address) String() string {

View File

@ -229,9 +229,9 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) {
// Tally up the new vote from the signer
var authorize bool
switch {
case bytes.Compare(header.Nonce[:], nonceAuthVote) == 0:
case bytes.Equal(header.Nonce[:], nonceAuthVote):
authorize = true
case bytes.Compare(header.Nonce[:], nonceDropVote) == 0:
case bytes.Equal(header.Nonce[:], nonceDropVote):
authorize = false
default:
return nil, errInvalidVote

View File

@ -36,8 +36,9 @@ import (
// Ethash proof-of-work protocol constants.
var (
blockReward *big.Int = big.NewInt(5e+18) // Block reward in wei for successfully mining a block
maxUncles = 2 // Maximum number of uncles allowed in a single block
frontierBlockReward *big.Int = big.NewInt(5e+18) // Block reward in wei for successfully mining a block
byzantiumBlockReward *big.Int = big.NewInt(3e+18) // Block reward in wei for successfully mining a block upward from Byzantium
maxUncles = 2 // Maximum number of uncles allowed in a single block
)
// Various error messages to mark blocks invalid. These should be private to
@ -287,8 +288,10 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *
// given the parent block's time and difficulty.
// TODO (karalabe): Move the chain maker into this package and make this private!
func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
next := new(big.Int).Add(parent.Number, common.Big1)
next := new(big.Int).Add(parent.Number, big1)
switch {
case config.IsByzantium(next):
return calcDifficultyByzantium(time, parent)
case config.IsHomestead(next):
return calcDifficultyHomestead(time, parent)
default:
@ -299,10 +302,73 @@ func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Heade
// Some weird constants to avoid constant memory allocs for them.
var (
expDiffPeriod = big.NewInt(100000)
big1 = big.NewInt(1)
big2 = big.NewInt(2)
big9 = big.NewInt(9)
big10 = big.NewInt(10)
bigMinus99 = big.NewInt(-99)
big2999999 = big.NewInt(2999999)
)
// calcDifficultyByzantium is the difficulty adjustment algorithm. It returns
// the difficulty that a new block should have when created at time given the
// parent block's time and difficulty. The calculation uses the Byzantium rules.
func calcDifficultyByzantium(time uint64, parent *types.Header) *big.Int {
// https://github.com/ethereum/EIPs/issues/100.
// algorithm:
// diff = (parent_diff +
// (parent_diff / 2048 * max((2 if len(parent.uncles) else 1) - ((timestamp - parent.timestamp) // 9), -99))
// ) + 2^(periodCount - 2)
bigTime := new(big.Int).SetUint64(time)
bigParentTime := new(big.Int).Set(parent.Time)
// holds intermediate values to make the algo easier to read & audit
x := new(big.Int)
y := new(big.Int)
// (2 if len(parent_uncles) else 1) - (block_timestamp - parent_timestamp) // 9
x.Sub(bigTime, bigParentTime)
x.Div(x, big9)
if parent.UncleHash == types.EmptyUncleHash {
x.Sub(big1, x)
} else {
x.Sub(big2, x)
}
// max((2 if len(parent_uncles) else 1) - (block_timestamp - parent_timestamp) // 9, -99)
if x.Cmp(bigMinus99) < 0 {
x.Set(bigMinus99)
}
// (parent_diff + parent_diff // 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99))
y.Div(parent.Difficulty, params.DifficultyBoundDivisor)
x.Mul(y, x)
x.Add(parent.Difficulty, x)
// minimum difficulty can ever be (before exponential factor)
if x.Cmp(params.MinimumDifficulty) < 0 {
x.Set(params.MinimumDifficulty)
}
// calculate a fake block numer for the ice-age delay:
// https://github.com/ethereum/EIPs/pull/669
// fake_block_number = min(0, block.number - 3_000_000
fakeBlockNumber := new(big.Int)
if parent.Number.Cmp(big2999999) >= 0 {
fakeBlockNumber = fakeBlockNumber.Sub(parent.Number, big2999999) // Note, parent is 1 less than the actual block number
}
// for the exponential factor
periodCount := fakeBlockNumber
periodCount.Div(periodCount, expDiffPeriod)
// the exponential factor, commonly referred to as "the bomb"
// diff = diff + 2^(periodCount - 2)
if periodCount.Cmp(big1) > 0 {
y.Sub(periodCount, big2)
y.Exp(big2, y, nil)
x.Add(x, y)
}
return x
}
// calcDifficultyHomestead is the difficulty adjustment algorithm. It returns
// the difficulty that a new block should have when created at time given the
// parent block's time and difficulty. The calculation uses the Homestead rules.
@ -320,12 +386,12 @@ func calcDifficultyHomestead(time uint64, parent *types.Header) *big.Int {
x := new(big.Int)
y := new(big.Int)
// 1 - (block_timestamp -parent_timestamp) // 10
// 1 - (block_timestamp - parent_timestamp) // 10
x.Sub(bigTime, bigParentTime)
x.Div(x, big10)
x.Sub(common.Big1, x)
x.Sub(big1, x)
// max(1 - (block_timestamp - parent_timestamp) // 10, -99)))
// max(1 - (block_timestamp - parent_timestamp) // 10, -99)
if x.Cmp(bigMinus99) < 0 {
x.Set(bigMinus99)
}
@ -339,14 +405,14 @@ func calcDifficultyHomestead(time uint64, parent *types.Header) *big.Int {
x.Set(params.MinimumDifficulty)
}
// for the exponential factor
periodCount := new(big.Int).Add(parent.Number, common.Big1)
periodCount := new(big.Int).Add(parent.Number, big1)
periodCount.Div(periodCount, expDiffPeriod)
// the exponential factor, commonly referred to as "the bomb"
// diff = diff + 2^(periodCount - 2)
if periodCount.Cmp(common.Big1) > 0 {
y.Sub(periodCount, common.Big2)
y.Exp(common.Big2, y, nil)
if periodCount.Cmp(big1) > 0 {
y.Sub(periodCount, big2)
y.Exp(big2, y, nil)
x.Add(x, y)
}
return x
@ -373,12 +439,12 @@ func calcDifficultyFrontier(time uint64, parent *types.Header) *big.Int {
diff.Set(params.MinimumDifficulty)
}
periodCount := new(big.Int).Add(parent.Number, common.Big1)
periodCount := new(big.Int).Add(parent.Number, big1)
periodCount.Div(periodCount, expDiffPeriod)
if periodCount.Cmp(common.Big1) > 0 {
if periodCount.Cmp(big1) > 0 {
// diff = diff + 2^(periodCount - 2)
expDiff := periodCount.Sub(periodCount, common.Big2)
expDiff.Exp(common.Big2, expDiff, nil)
expDiff := periodCount.Sub(periodCount, big2)
expDiff.Exp(big2, expDiff, nil)
diff.Add(diff, expDiff)
diff = math.BigMax(diff, params.MinimumDifficulty)
}
@ -444,7 +510,7 @@ func (ethash *Ethash) Prepare(chain consensus.ChainReader, header *types.Header)
// setting the final state and assembling the block.
func (ethash *Ethash) Finalize(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
// Accumulate any block and uncle rewards and commit the final state root
AccumulateRewards(state, header, uncles)
AccumulateRewards(chain.Config(), state, header, uncles)
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
// Header seems complete, assemble into a block and return
@ -461,7 +527,13 @@ var (
// reward. The total reward consists of the static block reward and rewards for
// included uncles. The coinbase of each uncle block is also rewarded.
// TODO (karalabe): Move the chain maker into this package and make this private!
func AccumulateRewards(state *state.StateDB, header *types.Header, uncles []*types.Header) {
func AccumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header) {
// Select the correct block reward based on chain progression
blockReward := frontierBlockReward
if config.IsByzantium(header.Number) {
blockReward = byzantiumBlockReward
}
// Accumulate the rewards for the miner and any included uncles
reward := new(big.Int).Set(blockReward)
r := new(big.Int)
for _, uncle := range uncles {

View File

@ -23,6 +23,7 @@ import (
"strings"
"time"
"github.com/ethereum/go-ethereum/accounts/usbwallet"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
"github.com/robertkrimen/otto"
@ -83,6 +84,49 @@ func (b *bridge) NewAccount(call otto.FunctionCall) (response otto.Value) {
return ret
}
// OpenWallet is a wrapper around personal.openWallet which can interpret and
// react to certain error messages, such as the Trezor PIN matrix request.
func (b *bridge) OpenWallet(call otto.FunctionCall) (response otto.Value) {
// Make sure we have an wallet specified to open
if !call.Argument(0).IsString() {
throwJSException("first argument must be the wallet URL to open")
}
wallet := call.Argument(0)
var passwd otto.Value
if call.Argument(1).IsUndefined() || call.Argument(1).IsNull() {
passwd, _ = otto.ToValue("")
} else {
passwd = call.Argument(1)
}
// Open the wallet and return if successful in itself
val, err := call.Otto.Call("jeth.openWallet", nil, wallet, passwd)
if err == nil {
return val
}
// Wallet open failed, report error unless it's a PIN entry
if !strings.HasSuffix(err.Error(), usbwallet.ErrTrezorPINNeeded.Error()) {
throwJSException(err.Error())
}
// Trezor PIN matrix input requested, display the matrix to the user and fetch the data
fmt.Fprintf(b.printer, "Look at the device for number positions\n\n")
fmt.Fprintf(b.printer, "7 | 8 | 9\n")
fmt.Fprintf(b.printer, "--+---+--\n")
fmt.Fprintf(b.printer, "4 | 5 | 6\n")
fmt.Fprintf(b.printer, "--+---+--\n")
fmt.Fprintf(b.printer, "1 | 2 | 3\n\n")
if input, err := b.prompter.PromptPassword("Please enter current PIN: "); err != nil {
throwJSException(err.Error())
} else {
passwd, _ = otto.ToValue(input)
}
if val, err = call.Otto.Call("jeth.openWallet", nil, wallet, passwd); err != nil {
throwJSException(err.Error())
}
return val
}
// UnlockAccount is a wrapper around the personal.unlockAccount RPC method that
// uses a non-echoing password prompt to acquire the passphrase and executes the
// original RPC method (saved in jeth.unlockAccount) with it to actually execute

View File

@ -160,10 +160,15 @@ func (c *Console) init(preload []string) error {
if err != nil {
return err
}
// Override the unlockAccount, newAccount and sign methods since these require user interaction.
// Assign these method in the Console the original web3 callbacks. These will be called by the jeth.*
// methods after they got the password from the user and send the original web3 request to the backend.
// Override the openWallet, unlockAccount, newAccount and sign methods since
// these require user interaction. Assign these method in the Console the
// original web3 callbacks. These will be called by the jeth.* methods after
// they got the password from the user and send the original web3 request to
// the backend.
if obj := personal.Object(); obj != nil { // make sure the personal api is enabled over the interface
if _, err = c.jsre.Run(`jeth.openWallet = personal.openWallet;`); err != nil {
return fmt.Errorf("personal.openWallet: %v", err)
}
if _, err = c.jsre.Run(`jeth.unlockAccount = personal.unlockAccount;`); err != nil {
return fmt.Errorf("personal.unlockAccount: %v", err)
}
@ -173,6 +178,7 @@ func (c *Console) init(preload []string) error {
if _, err = c.jsre.Run(`jeth.sign = personal.sign;`); err != nil {
return fmt.Errorf("personal.sign: %v", err)
}
obj.Set("openWallet", bridge.OpenWallet)
obj.Set("unlockAccount", bridge.UnlockAccount)
obj.Set("newAccount", bridge.NewAccount)
obj.Set("sign", bridge.Sign)

View File

@ -376,12 +376,12 @@ func (self *Chequebook) autoDeposit(interval time.Duration) {
ticker := time.NewTicker(interval)
self.quit = make(chan bool)
quit := self.quit
go func() {
FOR:
for {
select {
case <-quit:
break FOR
return
case <-ticker.C:
self.lock.Lock()
if self.balance.Cmp(self.buffer) < 0 {
@ -395,7 +395,6 @@ func (self *Chequebook) autoDeposit(interval time.Duration) {
}
}
}()
return
}
// Outbox can issue cheques from a single contract to a single beneficiary.
@ -436,7 +435,6 @@ type Inbox struct {
sender common.Address // local peer's address to send cashing tx from
signer *ecdsa.PublicKey // peer's public key
txhash string // tx hash of last cashing tx
abigen bind.ContractBackend // blockchain API
session *contract.ChequebookSession // abi contract backend with tx opts
quit chan bool // when closed causes autocash to stop
maxUncashed *big.Int // threshold that triggers autocashing
@ -525,12 +523,12 @@ func (self *Inbox) autoCash(cashInterval time.Duration) {
ticker := time.NewTicker(cashInterval)
self.quit = make(chan bool)
quit := self.quit
go func() {
FOR:
for {
select {
case <-quit:
break FOR
return
case <-ticker.C:
self.lock.Lock()
if self.cheque != nil && self.cheque.Amount.Cmp(self.cashed) != 0 {
@ -543,7 +541,6 @@ func (self *Inbox) autoCash(cashInterval time.Duration) {
}
}
}()
return
}
// Receive is called to deposit the latest cheque to the incoming Inbox.

View File

@ -145,7 +145,7 @@ func (l *lexer) ignore() {
// Accepts checks whether the given input matches the next rune
func (l *lexer) accept(valid string) bool {
if strings.IndexRune(valid, l.next()) >= 0 {
if strings.ContainsRune(valid, l.next()) {
return true
}
@ -157,7 +157,7 @@ func (l *lexer) accept(valid string) bool {
// acceptRun will continue to advance the seeker until valid
// can no longer be met.
func (l *lexer) acceptRun(valid string) {
for strings.IndexRune(valid, l.next()) >= 0 {
for strings.ContainsRune(valid, l.next()) {
}
l.backup()
}
@ -166,7 +166,7 @@ func (l *lexer) acceptRun(valid string) {
// to advance the seeker until the rune has been found.
func (l *lexer) acceptRunUntil(until rune) bool {
// Continues running until a rune is found
for i := l.next(); strings.IndexRune(string(until), i) == -1; i = l.next() {
for i := l.next(); !strings.ContainsRune(string(until), i); i = l.next() {
if i == 0 {
return false
}
@ -254,7 +254,7 @@ func lexInsideString(l *lexer) stateFn {
func lexNumber(l *lexer) stateFn {
acceptance := Numbers
if l.accept("0") && l.accept("xX") {
if l.accept("0") || l.accept("xX") {
acceptance = HexadecimalNumbers
}
l.acceptRun(acceptance)

View File

@ -23,7 +23,6 @@ import (
"io"
"math/big"
mrand "math/rand"
"runtime"
"sync"
"sync/atomic"
"time"
@ -79,10 +78,15 @@ const (
type BlockChain struct {
config *params.ChainConfig // chain & network configuration
hc *HeaderChain
chainDb ethdb.Database
eventMux *event.TypeMux
genesisBlock *types.Block
hc *HeaderChain
chainDb ethdb.Database
rmLogsFeed event.Feed
chainFeed event.Feed
chainSideFeed event.Feed
chainHeadFeed event.Feed
logsFeed event.Feed
scope event.SubscriptionScope
genesisBlock *types.Block
mu sync.RWMutex // global mutex for locking chain operations
chainmu sync.RWMutex // blockchain insertion lock
@ -115,7 +119,7 @@ type BlockChain struct {
// NewBlockChain returns a fully initialised block chain using information
// available in the database. It initialises the default Ethereum Validator and
// Processor.
func NewBlockChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine, mux *event.TypeMux, vmConfig vm.Config) (*BlockChain, error) {
func NewBlockChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config) (*BlockChain, error) {
bodyCache, _ := lru.New(bodyCacheLimit)
bodyRLPCache, _ := lru.New(bodyCacheLimit)
blockCache, _ := lru.New(blockCacheLimit)
@ -126,7 +130,6 @@ func NewBlockChain(chainDb ethdb.Database, config *params.ChainConfig, engine co
config: config,
chainDb: chainDb,
stateCache: state.NewDatabase(chainDb),
eventMux: mux,
quit: make(chan struct{}),
bodyCache: bodyCache,
bodyRLPCache: bodyRLPCache,
@ -511,10 +514,13 @@ func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
return body
}
// HasBlock checks if a block is fully present in the database or not, caching
// it if present.
func (bc *BlockChain) HasBlock(hash common.Hash) bool {
return bc.GetBlockByHash(hash) != nil
// HasBlock checks if a block is fully present in the database or not.
func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
if bc.blockCache.Contains(hash) {
return true
}
ok, _ := bc.chainDb.Has(blockBodyKey(hash, number))
return ok
}
// HasBlockAndState checks if a block and associated state trie is fully present
@ -594,6 +600,8 @@ func (bc *BlockChain) Stop() {
if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
return
}
// Unsubscribe all subscriptions registered from blockchain
bc.scope.Close()
close(bc.quit)
atomic.StoreInt32(&bc.procInterrupt, 1)
@ -687,120 +695,73 @@ func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts ty
// InsertReceiptChain attempts to complete an already existing header chain with
// transaction and receipt data.
// XXX should this be moved to the test?
func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
bc.wg.Add(1)
defer bc.wg.Done()
// Do a sanity check that the provided chain is actually ordered and linked
for i := 1; i < len(blockChain); i++ {
if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
// Chain broke ancestry, log a messge (programming error) and skip insertion
log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
"prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
}
}
// Pre-checks passed, start the block body and receipt imports
bc.wg.Add(1)
defer bc.wg.Done()
// Collect some import statistics to report on
stats := struct{ processed, ignored int32 }{}
start := time.Now()
var (
stats = struct{ processed, ignored int32 }{}
start = time.Now()
bytes = 0
batch = bc.chainDb.NewBatch()
)
for i, block := range blockChain {
receipts := receiptChain[i]
// Short circuit insertion if shutting down or processing failed
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
return 0, nil
}
// Short circuit if the owner header is unknown
if !bc.HasHeader(block.Hash(), block.NumberU64()) {
return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
}
// Skip if the entire data is already known
if bc.HasBlock(block.Hash(), block.NumberU64()) {
stats.ignored++
continue
}
// Compute all the non-consensus fields of the receipts
SetReceiptsData(bc.config, block, receipts)
// Write all the data out into the database
if err := WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()); err != nil {
return i, fmt.Errorf("failed to write block body: %v", err)
}
if err := WriteBlockReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil {
return i, fmt.Errorf("failed to write block receipts: %v", err)
}
if err := WriteTxLookupEntries(batch, block); err != nil {
return i, fmt.Errorf("failed to write lookup metadata: %v", err)
}
stats.processed++
// Create the block importing task queue and worker functions
tasks := make(chan int, len(blockChain))
for i := 0; i < len(blockChain) && i < len(receiptChain); i++ {
tasks <- i
}
close(tasks)
errs, failed := make([]error, len(tasks)), int32(0)
process := func(worker int) {
for index := range tasks {
block, receipts := blockChain[index], receiptChain[index]
// Short circuit insertion if shutting down or processing failed
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
return
if batch.ValueSize() >= ethdb.IdealBatchSize {
if err := batch.Write(); err != nil {
return 0, err
}
if atomic.LoadInt32(&failed) > 0 {
return
}
// Short circuit if the owner header is unknown
if !bc.HasHeader(block.Hash()) {
errs[index] = fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
atomic.AddInt32(&failed, 1)
return
}
// Skip if the entire data is already known
if bc.HasBlock(block.Hash()) {
atomic.AddInt32(&stats.ignored, 1)
continue
}
// Compute all the non-consensus fields of the receipts
SetReceiptsData(bc.config, block, receipts)
// Write all the data out into the database
if err := WriteBody(bc.chainDb, block.Hash(), block.NumberU64(), block.Body()); err != nil {
errs[index] = fmt.Errorf("failed to write block body: %v", err)
atomic.AddInt32(&failed, 1)
log.Crit("Failed to write block body", "err", err)
return
}
if err := WriteBlockReceipts(bc.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil {
errs[index] = fmt.Errorf("failed to write block receipts: %v", err)
atomic.AddInt32(&failed, 1)
log.Crit("Failed to write block receipts", "err", err)
return
}
if err := WriteMipmapBloom(bc.chainDb, block.NumberU64(), receipts); err != nil {
errs[index] = fmt.Errorf("failed to write log blooms: %v", err)
atomic.AddInt32(&failed, 1)
log.Crit("Failed to write log blooms", "err", err)
return
}
if err := WriteTransactions(bc.chainDb, block); err != nil {
errs[index] = fmt.Errorf("failed to write individual transactions: %v", err)
atomic.AddInt32(&failed, 1)
log.Crit("Failed to write individual transactions", "err", err)
return
}
if err := WriteReceipts(bc.chainDb, receipts); err != nil {
errs[index] = fmt.Errorf("failed to write individual receipts: %v", err)
atomic.AddInt32(&failed, 1)
log.Crit("Failed to write individual receipts", "err", err)
return
}
atomic.AddInt32(&stats.processed, 1)
bytes += batch.ValueSize()
batch = bc.chainDb.NewBatch()
}
}
// Start as many worker threads as goroutines allowed
pending := new(sync.WaitGroup)
for i := 0; i < runtime.GOMAXPROCS(0); i++ {
pending.Add(1)
go func(id int) {
defer pending.Done()
process(id)
}(i)
}
pending.Wait()
// If anything failed, report
if failed > 0 {
for i, err := range errs {
if err != nil {
return i, err
}
if batch.ValueSize() > 0 {
bytes += batch.ValueSize()
if err := batch.Write(); err != nil {
return 0, err
}
}
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
log.Debug("Premature abort during receipts processing")
return 0, nil
}
// Update the head fast sync block if better
bc.mu.Lock()
head := blockChain[len(errs)-1]
head := blockChain[len(blockChain)-1]
if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case
if bc.GetTd(bc.currentFastBlock.Hash(), bc.currentFastBlock.NumberU64()).Cmp(td) < 0 {
if err := WriteHeadFastBlockHash(bc.chainDb, head.Hash()); err != nil {
@ -811,16 +772,18 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
}
bc.mu.Unlock()
// Report some public statistics so the user has a clue what's going on
last := blockChain[len(blockChain)-1]
log.Info("Imported new block receipts", "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
"number", last.Number(), "hash", last.Hash(), "ignored", stats.ignored)
log.Info("Imported new block receipts",
"count", stats.processed,
"elapsed", common.PrettyDuration(time.Since(start)),
"bytes", bytes,
"number", head.Number(),
"hash", head.Hash(),
"ignored", stats.ignored)
return 0, nil
}
// WriteBlock writes the block to the chain.
func (bc *BlockChain) WriteBlock(block *types.Block) (status WriteStatus, err error) {
func (bc *BlockChain) WriteBlockAndState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
bc.wg.Add(1)
defer bc.wg.Done()
@ -833,15 +796,28 @@ func (bc *BlockChain) WriteBlock(block *types.Block) (status WriteStatus, err er
bc.mu.Lock()
defer bc.mu.Unlock()
if bc.HasBlock(block.Hash(), block.NumberU64()) {
log.Trace("Block existed", "hash", block.Hash())
return
}
localTd := bc.GetTd(bc.currentBlock.Hash(), bc.currentBlock.NumberU64())
externTd := new(big.Int).Add(block.Difficulty(), ptd)
// Irrelevant of the canonical status, write the block itself to the database
if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
log.Crit("Failed to write block total difficulty", "err", err)
return NonStatTy, err
}
if err := WriteBlock(bc.chainDb, block); err != nil {
log.Crit("Failed to write block contents", "err", err)
// Write other block data using a batch.
batch := bc.chainDb.NewBatch()
if err := WriteBlock(batch, block); err != nil {
return NonStatTy, err
}
if _, err := state.CommitTo(batch, bc.config.IsEIP158(block.Number())); err != nil {
return NonStatTy, err
}
if err := WriteBlockReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil {
return NonStatTy, err
}
// If the total difficulty is higher than our known, add it to the canonical chain
@ -854,20 +830,46 @@ func (bc *BlockChain) WriteBlock(block *types.Block) (status WriteStatus, err er
return NonStatTy, err
}
}
bc.insert(block) // Insert the block as the new head of the chain
// Write the positional metadata for transaction and receipt lookups
if err := WriteTxLookupEntries(batch, block); err != nil {
return NonStatTy, err
}
// Write hash preimages
if err := WritePreimages(bc.chainDb, block.NumberU64(), state.Preimages()); err != nil {
return NonStatTy, err
}
status = CanonStatTy
} else {
status = SideStatTy
}
if err := batch.Write(); err != nil {
return NonStatTy, err
}
// Set new head.
if status == CanonStatTy {
bc.insert(block)
}
bc.futureBlocks.Remove(block.Hash())
return
return status, nil
}
// InsertChain will attempt to insert the given chain in to the canonical chain or, otherwise, create a fork. If an error is returned
// it will return the index number of the failing block as well an error describing what went wrong (for possible errors see core/errors.go).
// InsertChain attempts to insert the given batch of blocks in to the canonical
// chain or, otherwise, create a fork. If an error is returned it will return
// the index number of the failing block as well an error describing what went
// wrong.
//
// After insertion is done, all accumulated events will be fired.
func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
n, events, logs, err := bc.insertChain(chain)
bc.PostChainEvents(events, logs)
return n, err
}
// insertChain will execute the actual chain insertion and event aggregation. The
// only reason this method exists as a separate one is to make locking cleaner
// with deferred statements.
func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) {
// Do a sanity check that the provided chain is actually ordered and linked
for i := 1; i < len(chain); i++ {
if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() {
@ -875,7 +877,7 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(),
"parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash())
return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
}
}
@ -892,6 +894,7 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
var (
stats = insertStats{startTime: mclock.Now()}
events = make([]interface{}, 0, len(chain))
lastCanon *types.Block
coalescedLogs []*types.Log
)
// Start the parallel header verifier
@ -915,7 +918,7 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
// If the header is a banned one, straight out abort
if BadHashes[block.Hash()] {
bc.reportBlock(block, nil, ErrBlacklistedHash)
return i, ErrBlacklistedHash
return i, events, coalescedLogs, ErrBlacklistedHash
}
// Wait for the block's verification to complete
bstart := time.Now()
@ -936,7 +939,7 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
// if given.
max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
if block.Time().Cmp(max) > 0 {
return i, fmt.Errorf("future block: %v > %v", block.Time(), max)
return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max)
}
bc.futureBlocks.Add(block.Hash(), block)
stats.queued++
@ -950,7 +953,7 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
}
bc.reportBlock(block, nil, err)
return i, err
return i, events, coalescedLogs, err
}
// Create a new statedb using the parent block and report an
// error if it fails.
@ -962,62 +965,35 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
}
state, err := state.New(parent.Root(), bc.stateCache)
if err != nil {
return i, err
return i, events, coalescedLogs, err
}
// Process block using the parent state as reference point.
receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig)
if err != nil {
bc.reportBlock(block, receipts, err)
return i, err
return i, events, coalescedLogs, err
}
// Validate the state using the default validator
err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas)
if err != nil {
bc.reportBlock(block, receipts, err)
return i, err
return i, events, coalescedLogs, err
}
// Write state changes to database
if _, err = state.CommitTo(bc.chainDb, bc.config.IsEIP158(block.Number())); err != nil {
return i, err
}
// coalesce logs for later processing
coalescedLogs = append(coalescedLogs, logs...)
if err = WriteBlockReceipts(bc.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil {
return i, err
}
// write the block to the chain and get the status
status, err := bc.WriteBlock(block)
// Write the block to the chain and get the status.
status, err := bc.WriteBlockAndState(block, receipts, state)
if err != nil {
return i, err
return i, events, coalescedLogs, err
}
switch status {
case CanonStatTy:
log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()),
"txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart)))
coalescedLogs = append(coalescedLogs, logs...)
blockInsertTimer.UpdateSince(bstart)
events = append(events, ChainEvent{block, block.Hash(), logs})
lastCanon = block
// This puts transactions in a extra db for rpc
if err := WriteTransactions(bc.chainDb, block); err != nil {
return i, err
}
// store the receipts
if err := WriteReceipts(bc.chainDb, receipts); err != nil {
return i, err
}
// Write map map bloom filters
if err := WriteMipmapBloom(bc.chainDb, block.NumberU64(), receipts); err != nil {
return i, err
}
// Write hash preimages
if err := WritePreimages(bc.chainDb, block.NumberU64(), state.Preimages()); err != nil {
return i, err
}
case SideStatTy:
log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed",
common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()))
@ -1029,9 +1005,11 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
stats.usedGas += usedGas.Uint64()
stats.report(chain, i)
}
go bc.postChainEvents(events, coalescedLogs)
return 0, nil
// Append a single chain head event if we've progressed the chain
if lastCanon != nil && bc.LastBlockHash() == lastCanon.Hash() {
events = append(events, ChainHeadEvent{lastCanon})
}
return 0, events, coalescedLogs, nil
}
// insertStats tracks and reports on block insertion.
@ -1167,17 +1145,8 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
for _, block := range newChain {
// insert the block in the canonical way, re-writing history
bc.insert(block)
// write canonical receipts and transactions
if err := WriteTransactions(bc.chainDb, block); err != nil {
return err
}
receipts := GetBlockReceipts(bc.chainDb, block.Hash(), block.NumberU64())
// write receipts
if err := WriteReceipts(bc.chainDb, receipts); err != nil {
return err
}
// Write map map bloom filters
if err := WriteMipmapBloom(bc.chainDb, block.NumberU64(), receipts); err != nil {
// write lookup entries for hash based transaction/receipt searches
if err := WriteTxLookupEntries(bc.chainDb, block); err != nil {
return err
}
addedTxs = append(addedTxs, block.Transactions()...)
@ -1188,22 +1157,15 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
// When transactions get deleted from the database that means the
// receipts that were created in the fork must also be deleted
for _, tx := range diff {
DeleteReceipt(bc.chainDb, tx.Hash())
DeleteTransaction(bc.chainDb, tx.Hash())
}
// Must be posted in a goroutine because of the transaction pool trying
// to acquire the chain manager lock
if len(diff) > 0 {
go bc.eventMux.Post(RemovedTransactionEvent{diff})
DeleteTxLookupEntry(bc.chainDb, tx.Hash())
}
if len(deletedLogs) > 0 {
go bc.eventMux.Post(RemovedLogsEvent{deletedLogs})
go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
}
if len(oldChain) > 0 {
go func() {
for _, block := range oldChain {
bc.eventMux.Post(ChainSideEvent{Block: block})
bc.chainSideFeed.Send(ChainSideEvent{Block: block})
}
}()
}
@ -1211,22 +1173,25 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
return nil
}
// postChainEvents iterates over the events generated by a chain insertion and
// posts them into the event mux.
func (bc *BlockChain) postChainEvents(events []interface{}, logs []*types.Log) {
// PostChainEvents iterates over the events generated by a chain insertion and
// posts them into the event feed.
// TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock.
func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) {
// post event logs for further processing
bc.eventMux.Post(logs)
if logs != nil {
bc.logsFeed.Send(logs)
}
for _, event := range events {
if event, ok := event.(ChainEvent); ok {
// We need some control over the mining operation. Acquiring locks and waiting
// for the miner to create new block takes too long and in most cases isn't
// even necessary.
if bc.LastBlockHash() == event.Hash {
bc.eventMux.Post(ChainHeadEvent{event.Block})
}
switch ev := event.(type) {
case ChainEvent:
bc.chainFeed.Send(ev)
case ChainHeadEvent:
bc.chainHeadFeed.Send(ev)
case ChainSideEvent:
bc.chainSideFeed.Send(ev)
}
// Fire the insertion events individually too
bc.eventMux.Post(event)
}
}
@ -1373,8 +1338,8 @@ func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
// HasHeader checks if a block header is present in the database or not, caching
// it if present.
func (bc *BlockChain) HasHeader(hash common.Hash) bool {
return bc.hc.HasHeader(hash)
func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
return bc.hc.HasHeader(hash, number)
}
// GetBlockHashesFromHash retrieves a number of block hashes starting at a given
@ -1394,3 +1359,28 @@ func (bc *BlockChain) Config() *params.ChainConfig { return bc.config }
// Engine retrieves the blockchain's consensus engine.
func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
// SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
}
// SubscribeChainEvent registers a subscription of ChainEvent.
func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
return bc.scope.Track(bc.chainFeed.Subscribe(ch))
}
// SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
}
// SubscribeChainSideEvent registers a subscription of ChainSideEvent.
func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
}
// SubscribeLogsEvent registers a subscription of []*types.Log.
func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
return bc.scope.Track(bc.logsFeed.Subscribe(ch))
}

View File

@ -0,0 +1,18 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package bloombits implements bloom filtering on batches of data.
package bloombits

View File

@ -0,0 +1,87 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bloombits
import (
"errors"
"github.com/ethereum/go-ethereum/core/types"
)
// errSectionOutOfBounds is returned if the user tried to add more bloom filters
// to the batch than available space, or if tries to retrieve above the capacity,
var errSectionOutOfBounds = errors.New("section out of bounds")
// Generator takes a number of bloom filters and generates the rotated bloom bits
// to be used for batched filtering.
type Generator struct {
blooms [types.BloomBitLength][]byte // Rotated blooms for per-bit matching
sections uint // Number of sections to batch together
nextBit uint // Next bit to set when adding a bloom
}
// NewGenerator creates a rotated bloom generator that can iteratively fill a
// batched bloom filter's bits.
func NewGenerator(sections uint) (*Generator, error) {
if sections%8 != 0 {
return nil, errors.New("section count not multiple of 8")
}
b := &Generator{sections: sections}
for i := 0; i < types.BloomBitLength; i++ {
b.blooms[i] = make([]byte, sections/8)
}
return b, nil
}
// AddBloom takes a single bloom filter and sets the corresponding bit column
// in memory accordingly.
func (b *Generator) AddBloom(index uint, bloom types.Bloom) error {
// Make sure we're not adding more bloom filters than our capacity
if b.nextBit >= b.sections {
return errSectionOutOfBounds
}
if b.nextBit != index {
return errors.New("bloom filter with unexpected index")
}
// Rotate the bloom and insert into our collection
byteIndex := b.nextBit / 8
bitMask := byte(1) << byte(7-b.nextBit%8)
for i := 0; i < types.BloomBitLength; i++ {
bloomByteIndex := types.BloomByteLength - 1 - i/8
bloomBitMask := byte(1) << byte(i%8)
if (bloom[bloomByteIndex] & bloomBitMask) != 0 {
b.blooms[i][byteIndex] |= bitMask
}
}
b.nextBit++
return nil
}
// Bitset returns the bit vector belonging to the given bit index after all
// blooms have been added.
func (b *Generator) Bitset(idx uint) ([]byte, error) {
if b.nextBit != b.sections {
return nil, errors.New("bloom not fully generated yet")
}
if idx >= b.sections {
return nil, errSectionOutOfBounds
}
return b.blooms[idx], nil
}

View File

@ -0,0 +1,615 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bloombits
import (
"bytes"
"errors"
"math"
"sort"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common/bitutil"
"github.com/ethereum/go-ethereum/crypto"
)
// bloomIndexes represents the bit indexes inside the bloom filter that belong
// to some key.
type bloomIndexes [3]uint
// calcBloomIndexes returns the bloom filter bit indexes belonging to the given key.
func calcBloomIndexes(b []byte) bloomIndexes {
b = crypto.Keccak256(b)
var idxs bloomIndexes
for i := 0; i < len(idxs); i++ {
idxs[i] = (uint(b[2*i])<<8)&2047 + uint(b[2*i+1])
}
return idxs
}
// partialMatches with a non-nil vector represents a section in which some sub-
// matchers have already found potential matches. Subsequent sub-matchers will
// binary AND their matches with this vector. If vector is nil, it represents a
// section to be processed by the first sub-matcher.
type partialMatches struct {
section uint64
bitset []byte
}
// Retrieval represents a request for retrieval task assignments for a given
// bit with the given number of fetch elements, or a response for such a request.
// It can also have the actual results set to be used as a delivery data struct.
type Retrieval struct {
Bit uint
Sections []uint64
Bitsets [][]byte
}
// Matcher is a pipelined system of schedulers and logic matchers which perform
// binary AND/OR operations on the bit-streams, creating a stream of potential
// blocks to inspect for data content.
type Matcher struct {
sectionSize uint64 // Size of the data batches to filter on
filters [][]bloomIndexes // Filter the system is matching for
schedulers map[uint]*scheduler // Retrieval schedulers for loading bloom bits
retrievers chan chan uint // Retriever processes waiting for bit allocations
counters chan chan uint // Retriever processes waiting for task count reports
retrievals chan chan *Retrieval // Retriever processes waiting for task allocations
deliveries chan *Retrieval // Retriever processes waiting for task response deliveries
running uint32 // Atomic flag whether a session is live or not
}
// NewMatcher creates a new pipeline for retrieving bloom bit streams and doing
// address and topic filtering on them.
func NewMatcher(sectionSize uint64, filters [][][]byte) *Matcher {
// Create the matcher instance
m := &Matcher{
sectionSize: sectionSize,
schedulers: make(map[uint]*scheduler),
retrievers: make(chan chan uint),
counters: make(chan chan uint),
retrievals: make(chan chan *Retrieval),
deliveries: make(chan *Retrieval),
}
// Calculate the bloom bit indexes for the groups we're interested in
m.filters = nil
for _, filter := range filters {
bloomBits := make([]bloomIndexes, len(filter))
for i, clause := range filter {
bloomBits[i] = calcBloomIndexes(clause)
}
m.filters = append(m.filters, bloomBits)
}
// For every bit, create a scheduler to load/download the bit vectors
for _, bloomIndexLists := range m.filters {
for _, bloomIndexList := range bloomIndexLists {
for _, bloomIndex := range bloomIndexList {
m.addScheduler(bloomIndex)
}
}
}
return m
}
// addScheduler adds a bit stream retrieval scheduler for the given bit index if
// it has not existed before. If the bit is already selected for filtering, the
// existing scheduler can be used.
func (m *Matcher) addScheduler(idx uint) {
if _, ok := m.schedulers[idx]; ok {
return
}
m.schedulers[idx] = newScheduler(idx)
}
// Start starts the matching process and returns a stream of bloom matches in
// a given range of blocks. If there are no more matches in the range, the result
// channel is closed.
func (m *Matcher) Start(begin, end uint64, results chan uint64) (*MatcherSession, error) {
// Make sure we're not creating concurrent sessions
if atomic.SwapUint32(&m.running, 1) == 1 {
return nil, errors.New("matcher already running")
}
defer atomic.StoreUint32(&m.running, 0)
// Initiate a new matching round
session := &MatcherSession{
matcher: m,
quit: make(chan struct{}),
kill: make(chan struct{}),
}
for _, scheduler := range m.schedulers {
scheduler.reset()
}
sink := m.run(begin, end, cap(results), session)
// Read the output from the result sink and deliver to the user
session.pend.Add(1)
go func() {
defer session.pend.Done()
defer close(results)
for {
select {
case <-session.quit:
return
case res, ok := <-sink:
// New match result found
if !ok {
return
}
// Calculate the first and last blocks of the section
sectionStart := res.section * m.sectionSize
first := sectionStart
if begin > first {
first = begin
}
last := sectionStart + m.sectionSize - 1
if end < last {
last = end
}
// Iterate over all the blocks in the section and return the matching ones
for i := first; i <= last; i++ {
// Skip the entire byte if no matches are found inside
next := res.bitset[(i-sectionStart)/8]
if next == 0 {
i += 7
continue
}
// Some bit it set, do the actual submatching
if bit := 7 - i%8; next&(1<<bit) != 0 {
select {
case <-session.quit:
return
case results <- i:
}
}
}
}
}
}()
return session, nil
}
// run creates a daisy-chain of sub-matchers, one for the address set and one
// for each topic set, each sub-matcher receiving a section only if the previous
// ones have all found a potential match in one of the blocks of the section,
// then binary AND-ing its own matches and forwaring the result to the next one.
//
// The method starts feeding the section indexes into the first sub-matcher on a
// new goroutine and returns a sink channel receiving the results.
func (m *Matcher) run(begin, end uint64, buffer int, session *MatcherSession) chan *partialMatches {
// Create the source channel and feed section indexes into
source := make(chan *partialMatches, buffer)
session.pend.Add(1)
go func() {
defer session.pend.Done()
defer close(source)
for i := begin / m.sectionSize; i <= end/m.sectionSize; i++ {
select {
case <-session.quit:
return
case source <- &partialMatches{i, bytes.Repeat([]byte{0xff}, int(m.sectionSize/8))}:
}
}
}()
// Assemble the daisy-chained filtering pipeline
next := source
dist := make(chan *request, buffer)
for _, bloom := range m.filters {
next = m.subMatch(next, dist, bloom, session)
}
// Start the request distribution
session.pend.Add(1)
go m.distributor(dist, session)
return next
}
// subMatch creates a sub-matcher that filters for a set of addresses or topics, binary OR-s those matches, then
// binary AND-s the result to the daisy-chain input (source) and forwards it to the daisy-chain output.
// The matches of each address/topic are calculated by fetching the given sections of the three bloom bit indexes belonging to
// that address/topic, and binary AND-ing those vectors together.
func (m *Matcher) subMatch(source chan *partialMatches, dist chan *request, bloom []bloomIndexes, session *MatcherSession) chan *partialMatches {
// Start the concurrent schedulers for each bit required by the bloom filter
sectionSources := make([][3]chan uint64, len(bloom))
sectionSinks := make([][3]chan []byte, len(bloom))
for i, bits := range bloom {
for j, bit := range bits {
sectionSources[i][j] = make(chan uint64, cap(source))
sectionSinks[i][j] = make(chan []byte, cap(source))
m.schedulers[bit].run(sectionSources[i][j], dist, sectionSinks[i][j], session.quit, &session.pend)
}
}
process := make(chan *partialMatches, cap(source)) // entries from source are forwarded here after fetches have been initiated
results := make(chan *partialMatches, cap(source))
session.pend.Add(2)
go func() {
// Tear down the goroutine and terminate all source channels
defer session.pend.Done()
defer close(process)
defer func() {
for _, bloomSources := range sectionSources {
for _, bitSource := range bloomSources {
close(bitSource)
}
}
}()
// Read sections from the source channel and multiplex into all bit-schedulers
for {
select {
case <-session.quit:
return
case subres, ok := <-source:
// New subresult from previous link
if !ok {
return
}
// Multiplex the section index to all bit-schedulers
for _, bloomSources := range sectionSources {
for _, bitSource := range bloomSources {
select {
case <-session.quit:
return
case bitSource <- subres.section:
}
}
}
// Notify the processor that this section will become available
select {
case <-session.quit:
return
case process <- subres:
}
}
}
}()
go func() {
// Tear down the goroutine and terminate the final sink channel
defer session.pend.Done()
defer close(results)
// Read the source notifications and collect the delivered results
for {
select {
case <-session.quit:
return
case subres, ok := <-process:
// Notified of a section being retrieved
if !ok {
return
}
// Gather all the sub-results and merge them together
var orVector []byte
for _, bloomSinks := range sectionSinks {
var andVector []byte
for _, bitSink := range bloomSinks {
var data []byte
select {
case <-session.quit:
return
case data = <-bitSink:
}
if andVector == nil {
andVector = make([]byte, int(m.sectionSize/8))
copy(andVector, data)
} else {
bitutil.ANDBytes(andVector, andVector, data)
}
}
if orVector == nil {
orVector = andVector
} else {
bitutil.ORBytes(orVector, orVector, andVector)
}
}
if orVector == nil {
orVector = make([]byte, int(m.sectionSize/8))
}
if subres.bitset != nil {
bitutil.ANDBytes(orVector, orVector, subres.bitset)
}
if bitutil.TestBytes(orVector) {
select {
case <-session.quit:
return
case results <- &partialMatches{subres.section, orVector}:
}
}
}
}
}()
return results
}
// distributor receives requests from the schedulers and queues them into a set
// of pending requests, which are assigned to retrievers wanting to fulfil them.
func (m *Matcher) distributor(dist chan *request, session *MatcherSession) {
defer session.pend.Done()
var (
requests = make(map[uint][]uint64) // Per-bit list of section requests, ordered by section number
unallocs = make(map[uint]struct{}) // Bits with pending requests but not allocated to any retriever
retrievers chan chan uint // Waiting retrievers (toggled to nil if unallocs is empty)
)
var (
allocs int // Number of active allocations to handle graceful shutdown requests
shutdown = session.quit // Shutdown request channel, will gracefully wait for pending requests
)
// assign is a helper method fo try to assign a pending bit an an actively
// listening servicer, or schedule it up for later when one arrives.
assign := func(bit uint) {
select {
case fetcher := <-m.retrievers:
allocs++
fetcher <- bit
default:
// No retrievers active, start listening for new ones
retrievers = m.retrievers
unallocs[bit] = struct{}{}
}
}
for {
select {
case <-shutdown:
// Graceful shutdown requested, wait until all pending requests are honoured
if allocs == 0 {
return
}
shutdown = nil
case <-session.kill:
// Pending requests not honoured in time, hard terminate
return
case req := <-dist:
// New retrieval request arrived to be distributed to some fetcher process
queue := requests[req.bit]
index := sort.Search(len(queue), func(i int) bool { return queue[i] >= req.section })
requests[req.bit] = append(queue[:index], append([]uint64{req.section}, queue[index:]...)...)
// If it's a new bit and we have waiting fetchers, allocate to them
if len(queue) == 0 {
assign(req.bit)
}
case fetcher := <-retrievers:
// New retriever arrived, find the lowest section-ed bit to assign
bit, best := uint(0), uint64(math.MaxUint64)
for idx := range unallocs {
if requests[idx][0] < best {
bit, best = idx, requests[idx][0]
}
}
// Stop tracking this bit (and alloc notifications if no more work is available)
delete(unallocs, bit)
if len(unallocs) == 0 {
retrievers = nil
}
allocs++
fetcher <- bit
case fetcher := <-m.counters:
// New task count request arrives, return number of items
fetcher <- uint(len(requests[<-fetcher]))
case fetcher := <-m.retrievals:
// New fetcher waiting for tasks to retrieve, assign
task := <-fetcher
if want := len(task.Sections); want >= len(requests[task.Bit]) {
task.Sections = requests[task.Bit]
delete(requests, task.Bit)
} else {
task.Sections = append(task.Sections[:0], requests[task.Bit][:want]...)
requests[task.Bit] = append(requests[task.Bit][:0], requests[task.Bit][want:]...)
}
fetcher <- task
// If anything was left unallocated, try to assign to someone else
if len(requests[task.Bit]) > 0 {
assign(task.Bit)
}
case result := <-m.deliveries:
// New retrieval task response from fetcher, split out missing sections and
// deliver complete ones
var (
sections = make([]uint64, 0, len(result.Sections))
bitsets = make([][]byte, 0, len(result.Bitsets))
missing = make([]uint64, 0, len(result.Sections))
)
for i, bitset := range result.Bitsets {
if len(bitset) == 0 {
missing = append(missing, result.Sections[i])
continue
}
sections = append(sections, result.Sections[i])
bitsets = append(bitsets, bitset)
}
m.schedulers[result.Bit].deliver(sections, bitsets)
allocs--
// Reschedule missing sections and allocate bit if newly available
if len(missing) > 0 {
queue := requests[result.Bit]
for _, section := range missing {
index := sort.Search(len(queue), func(i int) bool { return queue[i] >= section })
queue = append(queue[:index], append([]uint64{section}, queue[index:]...)...)
}
requests[result.Bit] = queue
if len(queue) == len(missing) {
assign(result.Bit)
}
}
// If we're in the process of shutting down, terminate
if allocs == 0 && shutdown == nil {
return
}
}
}
}
// MatcherSession is returned by a started matcher to be used as a terminator
// for the actively running matching operation.
type MatcherSession struct {
matcher *Matcher
quit chan struct{} // Quit channel to request pipeline termination
kill chan struct{} // Term channel to signal non-graceful forced shutdown
pend sync.WaitGroup
}
// Close stops the matching process and waits for all subprocesses to terminate
// before returning. The timeout may be used for graceful shutdown, allowing the
// currently running retrievals to complete before this time.
func (s *MatcherSession) Close(timeout time.Duration) {
// Bail out if the matcher is not running
select {
case <-s.quit:
return
default:
}
// Signal termination and wait for all goroutines to tear down
close(s.quit)
time.AfterFunc(timeout, func() { close(s.kill) })
s.pend.Wait()
}
// AllocateRetrieval assigns a bloom bit index to a client process that can either
// immediately reuest and fetch the section contents assigned to this bit or wait
// a little while for more sections to be requested.
func (s *MatcherSession) AllocateRetrieval() (uint, bool) {
fetcher := make(chan uint)
select {
case <-s.quit:
return 0, false
case s.matcher.retrievers <- fetcher:
bit, ok := <-fetcher
return bit, ok
}
}
// PendingSections returns the number of pending section retrievals belonging to
// the given bloom bit index.
func (s *MatcherSession) PendingSections(bit uint) int {
fetcher := make(chan uint)
select {
case <-s.quit:
return 0
case s.matcher.counters <- fetcher:
fetcher <- bit
return int(<-fetcher)
}
}
// AllocateSections assigns all or part of an already allocated bit-task queue
// to the requesting process.
func (s *MatcherSession) AllocateSections(bit uint, count int) []uint64 {
fetcher := make(chan *Retrieval)
select {
case <-s.quit:
return nil
case s.matcher.retrievals <- fetcher:
task := &Retrieval{
Bit: bit,
Sections: make([]uint64, count),
}
fetcher <- task
return (<-fetcher).Sections
}
}
// DeliverSections delivers a batch of section bit-vectors for a specific bloom
// bit index to be injected into the processing pipeline.
func (s *MatcherSession) DeliverSections(bit uint, sections []uint64, bitsets [][]byte) {
select {
case <-s.kill:
return
case s.matcher.deliveries <- &Retrieval{Bit: bit, Sections: sections, Bitsets: bitsets}:
}
}
// Multiplex polls the matcher session for rerieval tasks and multiplexes it into
// the reuested retrieval queue to be serviced together with other sessions.
//
// This method will block for the lifetime of the session. Even after termination
// of the session, any request in-flight need to be responded to! Empty responses
// are fine though in that case.
func (s *MatcherSession) Multiplex(batch int, wait time.Duration, mux chan chan *Retrieval) {
for {
// Allocate a new bloom bit index to retrieve data for, stopping when done
bit, ok := s.AllocateRetrieval()
if !ok {
return
}
// Bit allocated, throttle a bit if we're below our batch limit
if s.PendingSections(bit) < batch {
select {
case <-s.quit:
// Session terminating, we can't meaningfully service, abort
s.AllocateSections(bit, 0)
s.DeliverSections(bit, []uint64{}, [][]byte{})
return
case <-time.After(wait):
// Throttling up, fetch whatever's available
}
}
// Allocate as much as we can handle and request servicing
sections := s.AllocateSections(bit, batch)
request := make(chan *Retrieval)
select {
case <-s.quit:
// Session terminating, we can't meaningfully service, abort
s.DeliverSections(bit, sections, make([][]byte, len(sections)))
return
case mux <- request:
// Retrieval accepted, something must arrive before we're aborting
request <- &Retrieval{Bit: bit, Sections: sections}
result := <-request
s.DeliverSections(result.Bit, result.Sections, result.Bitsets)
}
}
}

View File

@ -0,0 +1,181 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bloombits
import (
"sync"
)
// request represents a bloom retrieval task to prioritize and pull from the local
// database or remotely from the network.
type request struct {
section uint64 // Section index to retrieve the a bit-vector from
bit uint // Bit index within the section to retrieve the vector of
}
// response represents the state of a requested bit-vector through a scheduler.
type response struct {
cached []byte // Cached bits to dedup multiple requests
done chan struct{} // Channel to allow waiting for completion
}
// scheduler handles the scheduling of bloom-filter retrieval operations for
// entire section-batches belonging to a single bloom bit. Beside scheduling the
// retrieval operations, this struct also deduplicates the requests and caches
// the results to minimize network/database overhead even in complex filtering
// scenarios.
type scheduler struct {
bit uint // Index of the bit in the bloom filter this scheduler is responsible for
responses map[uint64]*response // Currently pending retrieval requests or already cached responses
lock sync.Mutex // Lock protecting the responses from concurrent access
}
// newScheduler creates a new bloom-filter retrieval scheduler for a specific
// bit index.
func newScheduler(idx uint) *scheduler {
return &scheduler{
bit: idx,
responses: make(map[uint64]*response),
}
}
// run creates a retrieval pipeline, receiving section indexes from sections and
// returning the results in the same order through the done channel. Concurrent
// runs of the same scheduler are allowed, leading to retrieval task deduplication.
func (s *scheduler) run(sections chan uint64, dist chan *request, done chan []byte, quit chan struct{}, wg *sync.WaitGroup) {
// Create a forwarder channel between requests and responses of the same size as
// the distribution channel (since that will block the pipeline anyway).
pend := make(chan uint64, cap(dist))
// Start the pipeline schedulers to forward between user -> distributor -> user
wg.Add(2)
go s.scheduleRequests(sections, dist, pend, quit, wg)
go s.scheduleDeliveries(pend, done, quit, wg)
}
// reset cleans up any leftovers from previous runs. This is required before a
// restart to ensure the no previously requested but never delivered state will
// cause a lockup.
func (s *scheduler) reset() {
s.lock.Lock()
defer s.lock.Unlock()
for section, res := range s.responses {
if res.cached == nil {
delete(s.responses, section)
}
}
}
// scheduleRequests reads section retrieval requests from the input channel,
// deduplicates the stream and pushes unique retrieval tasks into the distribution
// channel for a database or network layer to honour.
func (s *scheduler) scheduleRequests(reqs chan uint64, dist chan *request, pend chan uint64, quit chan struct{}, wg *sync.WaitGroup) {
// Clean up the goroutine and pipeline when done
defer wg.Done()
defer close(pend)
// Keep reading and scheduling section requests
for {
select {
case <-quit:
return
case section, ok := <-reqs:
// New section retrieval requested
if !ok {
return
}
// Deduplicate retrieval requests
unique := false
s.lock.Lock()
if s.responses[section] == nil {
s.responses[section] = &response{
done: make(chan struct{}),
}
unique = true
}
s.lock.Unlock()
// Schedule the section for retrieval and notify the deliverer to expect this section
if unique {
select {
case <-quit:
return
case dist <- &request{bit: s.bit, section: section}:
}
}
select {
case <-quit:
return
case pend <- section:
}
}
}
}
// scheduleDeliveries reads section acceptance notifications and waits for them
// to be delivered, pushing them into the output data buffer.
func (s *scheduler) scheduleDeliveries(pend chan uint64, done chan []byte, quit chan struct{}, wg *sync.WaitGroup) {
// Clean up the goroutine and pipeline when done
defer wg.Done()
defer close(done)
// Keep reading notifications and scheduling deliveries
for {
select {
case <-quit:
return
case idx, ok := <-pend:
// New section retrieval pending
if !ok {
return
}
// Wait until the request is honoured
s.lock.Lock()
res := s.responses[idx]
s.lock.Unlock()
select {
case <-quit:
return
case <-res.done:
}
// Deliver the result
select {
case <-quit:
return
case done <- res.cached:
}
}
}
}
// deliver is called by the request distributor when a reply to a request arrives.
func (s *scheduler) deliver(sections []uint64, data [][]byte) {
s.lock.Lock()
defer s.lock.Unlock()
for i, section := range sections {
if res := s.responses[section]; res != nil && res.cached == nil { // Avoid non-requests and double deliveries
res.cached = data[i]
close(res.done)
}
}
}

View File

@ -0,0 +1,413 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"encoding/binary"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
)
// ChainIndexerBackend defines the methods needed to process chain segments in
// the background and write the segment results into the database. These can be
// used to create filter blooms or CHTs.
type ChainIndexerBackend interface {
// Reset initiates the processing of a new chain segment, potentially terminating
// any partially completed operations (in case of a reorg).
Reset(section uint64)
// Process crunches through the next header in the chain segment. The caller
// will ensure a sequential order of headers.
Process(header *types.Header)
// Commit finalizes the section metadata and stores it into the database.
Commit() error
}
// ChainIndexer does a post-processing job for equally sized sections of the
// canonical chain (like BlooomBits and CHT structures). A ChainIndexer is
// connected to the blockchain through the event system by starting a
// ChainEventLoop in a goroutine.
//
// Further child ChainIndexers can be added which use the output of the parent
// section indexer. These child indexers receive new head notifications only
// after an entire section has been finished or in case of rollbacks that might
// affect already finished sections.
type ChainIndexer struct {
chainDb ethdb.Database // Chain database to index the data from
indexDb ethdb.Database // Prefixed table-view of the db to write index metadata into
backend ChainIndexerBackend // Background processor generating the index data content
children []*ChainIndexer // Child indexers to cascade chain updates to
active uint32 // Flag whether the event loop was started
update chan struct{} // Notification channel that headers should be processed
quit chan chan error // Quit channel to tear down running goroutines
sectionSize uint64 // Number of blocks in a single chain segment to process
confirmsReq uint64 // Number of confirmations before processing a completed segment
storedSections uint64 // Number of sections successfully indexed into the database
knownSections uint64 // Number of sections known to be complete (block wise)
cascadedHead uint64 // Block number of the last completed section cascaded to subindexers
throttling time.Duration // Disk throttling to prevent a heavy upgrade from hogging resources
log log.Logger
lock sync.RWMutex
}
// NewChainIndexer creates a new chain indexer to do background processing on
// chain segments of a given size after certain number of confirmations passed.
// The throttling parameter might be used to prevent database thrashing.
func NewChainIndexer(chainDb, indexDb ethdb.Database, backend ChainIndexerBackend, section, confirm uint64, throttling time.Duration, kind string) *ChainIndexer {
c := &ChainIndexer{
chainDb: chainDb,
indexDb: indexDb,
backend: backend,
update: make(chan struct{}, 1),
quit: make(chan chan error),
sectionSize: section,
confirmsReq: confirm,
throttling: throttling,
log: log.New("type", kind),
}
// Initialize database dependent fields and start the updater
c.loadValidSections()
go c.updateLoop()
return c
}
// Start creates a goroutine to feed chain head events into the indexer for
// cascading background processing. Children do not need to be started, they
// are notified about new events by their parents.
func (c *ChainIndexer) Start(currentHeader *types.Header, chainEventer func(ch chan<- ChainEvent) event.Subscription) {
go c.eventLoop(currentHeader, chainEventer)
}
// Close tears down all goroutines belonging to the indexer and returns any error
// that might have occurred internally.
func (c *ChainIndexer) Close() error {
var errs []error
// Tear down the primary update loop
errc := make(chan error)
c.quit <- errc
if err := <-errc; err != nil {
errs = append(errs, err)
}
// If needed, tear down the secondary event loop
if atomic.LoadUint32(&c.active) != 0 {
c.quit <- errc
if err := <-errc; err != nil {
errs = append(errs, err)
}
}
// Close all children
for _, child := range c.children {
if err := child.Close(); err != nil {
errs = append(errs, err)
}
}
// Return any failures
switch {
case len(errs) == 0:
return nil
case len(errs) == 1:
return errs[0]
default:
return fmt.Errorf("%v", errs)
}
}
// eventLoop is a secondary - optional - event loop of the indexer which is only
// started for the outermost indexer to push chain head events into a processing
// queue.
func (c *ChainIndexer) eventLoop(currentHeader *types.Header, chainEventer func(ch chan<- ChainEvent) event.Subscription) {
// Mark the chain indexer as active, requiring an additional teardown
atomic.StoreUint32(&c.active, 1)
events := make(chan ChainEvent, 10)
sub := chainEventer(events)
defer sub.Unsubscribe()
// Fire the initial new head event to start any outstanding processing
c.newHead(currentHeader.Number.Uint64(), false)
var (
prevHeader = currentHeader
prevHash = currentHeader.Hash()
)
for {
select {
case errc := <-c.quit:
// Chain indexer terminating, report no failure and abort
errc <- nil
return
case ev, ok := <-events:
// Received a new event, ensure it's not nil (closing) and update
if !ok {
errc := <-c.quit
errc <- nil
return
}
header := ev.Block.Header()
if header.ParentHash != prevHash {
c.newHead(FindCommonAncestor(c.chainDb, prevHeader, header).Number.Uint64(), true)
}
c.newHead(header.Number.Uint64(), false)
prevHeader, prevHash = header, header.Hash()
}
}
}
// newHead notifies the indexer about new chain heads and/or reorgs.
func (c *ChainIndexer) newHead(head uint64, reorg bool) {
c.lock.Lock()
defer c.lock.Unlock()
// If a reorg happened, invalidate all sections until that point
if reorg {
// Revert the known section number to the reorg point
changed := head / c.sectionSize
if changed < c.knownSections {
c.knownSections = changed
}
// Revert the stored sections from the database to the reorg point
if changed < c.storedSections {
c.setValidSections(changed)
}
// Update the new head number to te finalized section end and notify children
head = changed * c.sectionSize
if head < c.cascadedHead {
c.cascadedHead = head
for _, child := range c.children {
child.newHead(c.cascadedHead, true)
}
}
return
}
// No reorg, calculate the number of newly known sections and update if high enough
var sections uint64
if head >= c.confirmsReq {
sections = (head + 1 - c.confirmsReq) / c.sectionSize
if sections > c.knownSections {
c.knownSections = sections
select {
case c.update <- struct{}{}:
default:
}
}
}
}
// updateLoop is the main event loop of the indexer which pushes chain segments
// down into the processing backend.
func (c *ChainIndexer) updateLoop() {
var (
updating bool
updated time.Time
)
for {
select {
case errc := <-c.quit:
// Chain indexer terminating, report no failure and abort
errc <- nil
return
case <-c.update:
// Section headers completed (or rolled back), update the index
c.lock.Lock()
if c.knownSections > c.storedSections {
// Periodically print an upgrade log message to the user
if time.Since(updated) > 8*time.Second {
if c.knownSections > c.storedSections+1 {
updating = true
c.log.Info("Upgrading chain index", "percentage", c.storedSections*100/c.knownSections)
}
updated = time.Now()
}
// Cache the current section count and head to allow unlocking the mutex
section := c.storedSections
var oldHead common.Hash
if section > 0 {
oldHead = c.sectionHead(section - 1)
}
// Process the newly defined section in the background
c.lock.Unlock()
newHead, err := c.processSection(section, oldHead)
if err != nil {
c.log.Error("Section processing failed", "error", err)
}
c.lock.Lock()
// If processing succeeded and no reorgs occcurred, mark the section completed
if err == nil && oldHead == c.sectionHead(section-1) {
c.setSectionHead(section, newHead)
c.setValidSections(section + 1)
if c.storedSections == c.knownSections && updating {
updating = false
c.log.Info("Finished upgrading chain index")
}
c.cascadedHead = c.storedSections*c.sectionSize - 1
for _, child := range c.children {
c.log.Trace("Cascading chain index update", "head", c.cascadedHead)
child.newHead(c.cascadedHead, false)
}
} else {
// If processing failed, don't retry until further notification
c.log.Debug("Chain index processing failed", "section", section, "err", err)
c.knownSections = c.storedSections
}
}
// If there are still further sections to process, reschedule
if c.knownSections > c.storedSections {
time.AfterFunc(c.throttling, func() {
select {
case c.update <- struct{}{}:
default:
}
})
}
c.lock.Unlock()
}
}
}
// processSection processes an entire section by calling backend functions while
// ensuring the continuity of the passed headers. Since the chain mutex is not
// held while processing, the continuity can be broken by a long reorg, in which
// case the function returns with an error.
func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (common.Hash, error) {
c.log.Trace("Processing new chain section", "section", section)
// Reset and partial processing
c.backend.Reset(section)
for number := section * c.sectionSize; number < (section+1)*c.sectionSize; number++ {
hash := GetCanonicalHash(c.chainDb, number)
if hash == (common.Hash{}) {
return common.Hash{}, fmt.Errorf("canonical block #%d unknown", number)
}
header := GetHeader(c.chainDb, hash, number)
if header == nil {
return common.Hash{}, fmt.Errorf("block #%d [%x…] not found", number, hash[:4])
} else if header.ParentHash != lastHead {
return common.Hash{}, fmt.Errorf("chain reorged during section processing")
}
c.backend.Process(header)
lastHead = header.Hash()
}
if err := c.backend.Commit(); err != nil {
c.log.Error("Section commit failed", "error", err)
return common.Hash{}, err
}
return lastHead, nil
}
// Sections returns the number of processed sections maintained by the indexer
// and also the information about the last header indexed for potential canonical
// verifications.
func (c *ChainIndexer) Sections() (uint64, uint64, common.Hash) {
c.lock.Lock()
defer c.lock.Unlock()
return c.storedSections, c.storedSections*c.sectionSize - 1, c.sectionHead(c.storedSections - 1)
}
// AddChildIndexer adds a child ChainIndexer that can use the output of this one
func (c *ChainIndexer) AddChildIndexer(indexer *ChainIndexer) {
c.lock.Lock()
defer c.lock.Unlock()
c.children = append(c.children, indexer)
// Cascade any pending updates to new children too
if c.storedSections > 0 {
indexer.newHead(c.storedSections*c.sectionSize-1, false)
}
}
// loadValidSections reads the number of valid sections from the index database
// and caches is into the local state.
func (c *ChainIndexer) loadValidSections() {
data, _ := c.indexDb.Get([]byte("count"))
if len(data) == 8 {
c.storedSections = binary.BigEndian.Uint64(data[:])
}
}
// setValidSections writes the number of valid sections to the index database
func (c *ChainIndexer) setValidSections(sections uint64) {
// Set the current number of valid sections in the database
var data [8]byte
binary.BigEndian.PutUint64(data[:], sections)
c.indexDb.Put([]byte("count"), data[:])
// Remove any reorged sections, caching the valids in the mean time
for c.storedSections > sections {
c.storedSections--
c.removeSectionHead(c.storedSections)
}
c.storedSections = sections // needed if new > old
}
// sectionHead retrieves the last block hash of a processed section from the
// index database.
func (c *ChainIndexer) sectionHead(section uint64) common.Hash {
var data [8]byte
binary.BigEndian.PutUint64(data[:], section)
hash, _ := c.indexDb.Get(append([]byte("shead"), data[:]...))
if len(hash) == len(common.Hash{}) {
return common.BytesToHash(hash)
}
return common.Hash{}
}
// setSectionHead writes the last block hash of a processed section to the index
// database.
func (c *ChainIndexer) setSectionHead(section uint64, hash common.Hash) {
var data [8]byte
binary.BigEndian.PutUint64(data[:], section)
c.indexDb.Put(append([]byte("shead"), data[:]...), hash.Bytes())
}
// removeSectionHead removes the reference to a processed section from the index
// database.
func (c *ChainIndexer) removeSectionHead(section uint64) {
var data [8]byte
binary.BigEndian.PutUint64(data[:], section)
c.indexDb.Delete(append([]byte("shead"), data[:]...))
}

View File

@ -27,7 +27,6 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
)
@ -180,7 +179,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, db ethdb.Dat
if gen != nil {
gen(i, b)
}
ethash.AccumulateRewards(statedb, h, b.uncles)
ethash.AccumulateRewards(config, statedb, h, b.uncles)
root, err := statedb.CommitTo(db, config.IsEIP158(h.Number))
if err != nil {
panic(fmt.Sprintf("state write error: %v", err))
@ -218,6 +217,7 @@ func makeHeader(config *params.ChainConfig, parent *types.Block, state *state.St
Number: parent.Number(),
Time: new(big.Int).Sub(time, big.NewInt(10)),
Difficulty: parent.Difficulty(),
UncleHash: parent.UncleHash(),
}),
GasLimit: CalcGasLimit(parent),
GasUsed: new(big.Int),
@ -235,7 +235,7 @@ func newCanonical(n int, full bool) (ethdb.Database, *BlockChain, error) {
db, _ := ethdb.NewMemDatabase()
genesis := gspec.MustCommit(db)
blockchain, _ := NewBlockChain(db, params.AllProtocolChanges, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
blockchain, _ := NewBlockChain(db, params.AllProtocolChanges, ethash.NewFaker(), vm.Config{})
// Create and inject the requested chain
if n == 0 {
return db, blockchain, nil

View File

@ -1,75 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"bytes"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
)
// ValidateDAOHeaderExtraData validates the extra-data field of a block header to
// ensure it conforms to DAO hard-fork rules.
//
// DAO hard-fork extension to the header validity:
// a) if the node is no-fork, do not accept blocks in the [fork, fork+10) range
// with the fork specific extra-data set
// b) if the node is pro-fork, require blocks in the specific range to have the
// unique extra-data set.
func ValidateDAOHeaderExtraData(config *params.ChainConfig, header *types.Header) error {
// Short circuit validation if the node doesn't care about the DAO fork
if config.DAOForkBlock == nil {
return nil
}
// Make sure the block is within the fork's modified extra-data range
limit := new(big.Int).Add(config.DAOForkBlock, params.DAOForkExtraRange)
if header.Number.Cmp(config.DAOForkBlock) < 0 || header.Number.Cmp(limit) >= 0 {
return nil
}
// Depending whether we support or oppose the fork, validate the extra-data contents
if config.DAOForkSupport {
if !bytes.Equal(header.Extra, params.DAOForkBlockExtra) {
return fmt.Errorf("DAO pro-fork bad block extra-data: 0x%x", header.Extra)
}
} else {
if bytes.Equal(header.Extra, params.DAOForkBlockExtra) {
return fmt.Errorf("DAO no-fork bad block extra-data: 0x%x", header.Extra)
}
}
// All ok, header has the same extra-data we expect
return nil
}
// ApplyDAOHardFork modifies the state database according to the DAO hard-fork
// rules, transferring all balances of a set of DAO accounts to a single refund
// contract.
func ApplyDAOHardFork(statedb *state.StateDB) {
// Retrieve the contract to refund balances into
if !statedb.Exist(params.DAORefundContract) {
statedb.CreateAccount(params.DAORefundContract)
}
// Move every DAO account and extra-balance account funds into the refund contract
for _, addr := range params.DAODrainList() {
statedb.AddBalance(params.DAORefundContract, statedb.GetBalance(addr))
statedb.SetBalance(addr, new(big.Int))
}
}

View File

@ -23,7 +23,6 @@ import (
"errors"
"fmt"
"math/big"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
@ -34,44 +33,55 @@ import (
"github.com/ethereum/go-ethereum/rlp"
)
// DatabaseReader wraps the Get method of a backing data store.
type DatabaseReader interface {
Get(key []byte) (value []byte, err error)
}
// DatabaseDeleter wraps the Delete method of a backing data store.
type DatabaseDeleter interface {
Delete(key []byte) error
}
var (
headHeaderKey = []byte("LastHeader")
headBlockKey = []byte("LastBlock")
headFastKey = []byte("LastFast")
headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
tdSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + tdSuffix -> td
numSuffix = []byte("n") // headerPrefix + num (uint64 big endian) + numSuffix -> hash
blockHashPrefix = []byte("H") // blockHashPrefix + hash -> num (uint64 big endian)
bodyPrefix = []byte("b") // bodyPrefix + num (uint64 big endian) + hash -> block body
blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts
preimagePrefix = "secure-key-" // preimagePrefix + hash -> preimage
// Data item prefixes (use single byte to avoid mixing data types, avoid `i`).
headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
tdSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + tdSuffix -> td
numSuffix = []byte("n") // headerPrefix + num (uint64 big endian) + numSuffix -> hash
blockHashPrefix = []byte("H") // blockHashPrefix + hash -> num (uint64 big endian)
bodyPrefix = []byte("b") // bodyPrefix + num (uint64 big endian) + hash -> block body
blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts
lookupPrefix = []byte("l") // lookupPrefix + hash -> transaction/receipt lookup metadata
bloomBitsPrefix = []byte("B") // bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash -> bloom bits
txMetaSuffix = []byte{0x01}
receiptsPrefix = []byte("receipts-")
preimagePrefix = "secure-key-" // preimagePrefix + hash -> preimage
configPrefix = []byte("ethereum-config-") // config prefix for the db
mipmapPre = []byte("mipmap-log-bloom-")
MIPMapLevels = []uint64{1000000, 500000, 100000, 50000, 1000}
// Chain index prefixes (use `i` + single byte to avoid mixing data types).
BloomBitsIndexPrefix = []byte("iB") // BloomBitsIndexPrefix is the data table of a chain indexer to track its progress
configPrefix = []byte("ethereum-config-") // config prefix for the db
// used by old (non-sequential keys) db, now only used for conversion
oldBlockPrefix = []byte("block-")
oldHeaderSuffix = []byte("-header")
oldTdSuffix = []byte("-td") // headerPrefix + num (uint64 big endian) + hash + tdSuffix -> td
oldBodySuffix = []byte("-body")
oldBlockNumPrefix = []byte("block-num-")
oldBlockReceiptsPrefix = []byte("receipts-block-")
oldBlockHashPrefix = []byte("block-hash-") // [deprecated by the header/block split, remove eventually]
// used by old db, now only used for conversion
oldReceiptsPrefix = []byte("receipts-")
oldTxMetaSuffix = []byte{0x01}
ErrChainConfigNotFound = errors.New("ChainConfig not found") // general config not found error
mipmapBloomMu sync.Mutex // protect against race condition when updating mipmap blooms
preimageCounter = metrics.NewCounter("db/preimage/total")
preimageHitCounter = metrics.NewCounter("db/preimage/hits")
)
// txLookupEntry is a positional metadata to help looking up the data content of
// a transaction or receipt given only its hash.
type txLookupEntry struct {
BlockHash common.Hash
BlockIndex uint64
Index uint64
}
// encodeBlockNumber encodes a block number as big endian uint64
func encodeBlockNumber(number uint64) []byte {
enc := make([]byte, 8)
@ -80,13 +90,10 @@ func encodeBlockNumber(number uint64) []byte {
}
// GetCanonicalHash retrieves a hash assigned to a canonical block number.
func GetCanonicalHash(db ethdb.Database, number uint64) common.Hash {
func GetCanonicalHash(db DatabaseReader, number uint64) common.Hash {
data, _ := db.Get(append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...))
if len(data) == 0 {
data, _ = db.Get(append(oldBlockNumPrefix, big.NewInt(int64(number)).Bytes()...))
if len(data) == 0 {
return common.Hash{}
}
return common.Hash{}
}
return common.BytesToHash(data)
}
@ -97,18 +104,10 @@ const missingNumber = uint64(0xffffffffffffffff)
// GetBlockNumber returns the block number assigned to a block hash
// if the corresponding header is present in the database
func GetBlockNumber(db ethdb.Database, hash common.Hash) uint64 {
func GetBlockNumber(db DatabaseReader, hash common.Hash) uint64 {
data, _ := db.Get(append(blockHashPrefix, hash.Bytes()...))
if len(data) != 8 {
data, _ := db.Get(append(append(oldBlockPrefix, hash.Bytes()...), oldHeaderSuffix...))
if len(data) == 0 {
return missingNumber
}
header := new(types.Header)
if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
log.Crit("Failed to decode block header", "err", err)
}
return header.Number.Uint64()
return missingNumber
}
return binary.BigEndian.Uint64(data)
}
@ -118,7 +117,7 @@ func GetBlockNumber(db ethdb.Database, hash common.Hash) uint64 {
// last block hash is only updated upon a full block import, the last header
// hash is updated already at header import, allowing head tracking for the
// light synchronization mechanism.
func GetHeadHeaderHash(db ethdb.Database) common.Hash {
func GetHeadHeaderHash(db DatabaseReader) common.Hash {
data, _ := db.Get(headHeaderKey)
if len(data) == 0 {
return common.Hash{}
@ -127,7 +126,7 @@ func GetHeadHeaderHash(db ethdb.Database) common.Hash {
}
// GetHeadBlockHash retrieves the hash of the current canonical head block.
func GetHeadBlockHash(db ethdb.Database) common.Hash {
func GetHeadBlockHash(db DatabaseReader) common.Hash {
data, _ := db.Get(headBlockKey)
if len(data) == 0 {
return common.Hash{}
@ -139,7 +138,7 @@ func GetHeadBlockHash(db ethdb.Database) common.Hash {
// fast synchronization. The difference between this and GetHeadBlockHash is that
// whereas the last block hash is only updated upon a full block import, the last
// fast hash is updated when importing pre-processed blocks.
func GetHeadFastBlockHash(db ethdb.Database) common.Hash {
func GetHeadFastBlockHash(db DatabaseReader) common.Hash {
data, _ := db.Get(headFastKey)
if len(data) == 0 {
return common.Hash{}
@ -149,17 +148,14 @@ func GetHeadFastBlockHash(db ethdb.Database) common.Hash {
// GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil
// if the header's not found.
func GetHeaderRLP(db ethdb.Database, hash common.Hash, number uint64) rlp.RawValue {
data, _ := db.Get(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
if len(data) == 0 {
data, _ = db.Get(append(append(oldBlockPrefix, hash.Bytes()...), oldHeaderSuffix...))
}
func GetHeaderRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValue {
data, _ := db.Get(headerKey(hash, number))
return data
}
// GetHeader retrieves the block header corresponding to the hash, nil if none
// found.
func GetHeader(db ethdb.Database, hash common.Hash, number uint64) *types.Header {
func GetHeader(db DatabaseReader, hash common.Hash, number uint64) *types.Header {
data := GetHeaderRLP(db, hash, number)
if len(data) == 0 {
return nil
@ -173,17 +169,22 @@ func GetHeader(db ethdb.Database, hash common.Hash, number uint64) *types.Header
}
// GetBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
func GetBodyRLP(db ethdb.Database, hash common.Hash, number uint64) rlp.RawValue {
data, _ := db.Get(append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
if len(data) == 0 {
data, _ = db.Get(append(append(oldBlockPrefix, hash.Bytes()...), oldBodySuffix...))
}
func GetBodyRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValue {
data, _ := db.Get(blockBodyKey(hash, number))
return data
}
func headerKey(hash common.Hash, number uint64) []byte {
return append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
}
func blockBodyKey(hash common.Hash, number uint64) []byte {
return append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
}
// GetBody retrieves the block body (transactons, uncles) corresponding to the
// hash, nil if none found.
func GetBody(db ethdb.Database, hash common.Hash, number uint64) *types.Body {
func GetBody(db DatabaseReader, hash common.Hash, number uint64) *types.Body {
data := GetBodyRLP(db, hash, number)
if len(data) == 0 {
return nil
@ -198,13 +199,10 @@ func GetBody(db ethdb.Database, hash common.Hash, number uint64) *types.Body {
// GetTd retrieves a block's total difficulty corresponding to the hash, nil if
// none found.
func GetTd(db ethdb.Database, hash common.Hash, number uint64) *big.Int {
func GetTd(db DatabaseReader, hash common.Hash, number uint64) *big.Int {
data, _ := db.Get(append(append(append(headerPrefix, encodeBlockNumber(number)...), hash[:]...), tdSuffix...))
if len(data) == 0 {
data, _ = db.Get(append(append(oldBlockPrefix, hash.Bytes()...), oldTdSuffix...))
if len(data) == 0 {
return nil
}
return nil
}
td := new(big.Int)
if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
@ -220,7 +218,7 @@ func GetTd(db ethdb.Database, hash common.Hash, number uint64) *big.Int {
//
// Note, due to concurrent download of header and block body the header and thus
// canonical hash can be stored in the database but the body data not (yet).
func GetBlock(db ethdb.Database, hash common.Hash, number uint64) *types.Block {
func GetBlock(db DatabaseReader, hash common.Hash, number uint64) *types.Block {
// Retrieve the block header and body contents
header := GetHeader(db, hash, number)
if header == nil {
@ -236,13 +234,10 @@ func GetBlock(db ethdb.Database, hash common.Hash, number uint64) *types.Block {
// GetBlockReceipts retrieves the receipts generated by the transactions included
// in a block given by its hash.
func GetBlockReceipts(db ethdb.Database, hash common.Hash, number uint64) types.Receipts {
func GetBlockReceipts(db DatabaseReader, hash common.Hash, number uint64) types.Receipts {
data, _ := db.Get(append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash[:]...))
if len(data) == 0 {
data, _ = db.Get(append(oldBlockReceiptsPrefix, hash.Bytes()...))
if len(data) == 0 {
return nil
}
return nil
}
storageReceipts := []*types.ReceiptForStorage{}
if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
@ -256,10 +251,38 @@ func GetBlockReceipts(db ethdb.Database, hash common.Hash, number uint64) types.
return receipts
}
// GetTxLookupEntry retrieves the positional metadata associated with a transaction
// hash to allow retrieving the transaction or receipt by hash.
func GetTxLookupEntry(db DatabaseReader, hash common.Hash) (common.Hash, uint64, uint64) {
// Load the positional metadata from disk and bail if it fails
data, _ := db.Get(append(lookupPrefix, hash.Bytes()...))
if len(data) == 0 {
return common.Hash{}, 0, 0
}
// Parse and return the contents of the lookup entry
var entry txLookupEntry
if err := rlp.DecodeBytes(data, &entry); err != nil {
log.Error("Invalid lookup entry RLP", "hash", hash, "err", err)
return common.Hash{}, 0, 0
}
return entry.BlockHash, entry.BlockIndex, entry.Index
}
// GetTransaction retrieves a specific transaction from the database, along with
// its added positional metadata.
func GetTransaction(db ethdb.Database, hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) {
// Retrieve the transaction itself from the database
func GetTransaction(db DatabaseReader, hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) {
// Retrieve the lookup metadata and resolve the transaction from the body
blockHash, blockNumber, txIndex := GetTxLookupEntry(db, hash)
if blockHash != (common.Hash{}) {
body := GetBody(db, blockHash, blockNumber)
if body == nil || len(body.Transactions) <= int(txIndex) {
log.Error("Transaction referenced missing", "number", blockNumber, "hash", blockHash, "index", txIndex)
return nil, common.Hash{}, 0, 0
}
return body.Transactions[txIndex], blockHash, blockNumber, txIndex
}
// Old transaction representation, load the transaction and it's metadata separately
data, _ := db.Get(hash.Bytes())
if len(data) == 0 {
return nil, common.Hash{}, 0, 0
@ -269,37 +292,58 @@ func GetTransaction(db ethdb.Database, hash common.Hash) (*types.Transaction, co
return nil, common.Hash{}, 0, 0
}
// Retrieve the blockchain positional metadata
data, _ = db.Get(append(hash.Bytes(), txMetaSuffix...))
data, _ = db.Get(append(hash.Bytes(), oldTxMetaSuffix...))
if len(data) == 0 {
return nil, common.Hash{}, 0, 0
}
var meta struct {
BlockHash common.Hash
BlockIndex uint64
Index uint64
}
if err := rlp.DecodeBytes(data, &meta); err != nil {
var entry txLookupEntry
if err := rlp.DecodeBytes(data, &entry); err != nil {
return nil, common.Hash{}, 0, 0
}
return &tx, meta.BlockHash, meta.BlockIndex, meta.Index
return &tx, entry.BlockHash, entry.BlockIndex, entry.Index
}
// GetReceipt returns a receipt by hash
func GetReceipt(db ethdb.Database, hash common.Hash) *types.Receipt {
data, _ := db.Get(append(receiptsPrefix, hash[:]...))
// GetReceipt retrieves a specific transaction receipt from the database, along with
// its added positional metadata.
func GetReceipt(db DatabaseReader, hash common.Hash) (*types.Receipt, common.Hash, uint64, uint64) {
// Retrieve the lookup metadata and resolve the receipt from the receipts
blockHash, blockNumber, receiptIndex := GetTxLookupEntry(db, hash)
if blockHash != (common.Hash{}) {
receipts := GetBlockReceipts(db, blockHash, blockNumber)
if len(receipts) <= int(receiptIndex) {
log.Error("Receipt refereced missing", "number", blockNumber, "hash", blockHash, "index", receiptIndex)
return nil, common.Hash{}, 0, 0
}
return receipts[receiptIndex], blockHash, blockNumber, receiptIndex
}
// Old receipt representation, load the receipt and set an unknown metadata
data, _ := db.Get(append(oldReceiptsPrefix, hash[:]...))
if len(data) == 0 {
return nil
return nil, common.Hash{}, 0, 0
}
var receipt types.ReceiptForStorage
err := rlp.DecodeBytes(data, &receipt)
if err != nil {
log.Error("Invalid receipt RLP", "hash", hash, "err", err)
}
return (*types.Receipt)(&receipt)
return (*types.Receipt)(&receipt), common.Hash{}, 0, 0
}
// GetBloomBits retrieves the compressed bloom bit vector belonging to the given
// section and bit index from the.
func GetBloomBits(db DatabaseReader, bit uint, section uint64, head common.Hash) []byte {
key := append(append(bloomBitsPrefix, make([]byte, 10)...), head.Bytes()...)
binary.BigEndian.PutUint16(key[1:], uint16(bit))
binary.BigEndian.PutUint64(key[3:], section)
bits, _ := db.Get(key)
return bits
}
// WriteCanonicalHash stores the canonical hash for the given block number.
func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) error {
func WriteCanonicalHash(db ethdb.Putter, hash common.Hash, number uint64) error {
key := append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...)
if err := db.Put(key, hash.Bytes()); err != nil {
log.Crit("Failed to store number to hash mapping", "err", err)
@ -308,7 +352,7 @@ func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) erro
}
// WriteHeadHeaderHash stores the head header's hash.
func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error {
func WriteHeadHeaderHash(db ethdb.Putter, hash common.Hash) error {
if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
log.Crit("Failed to store last header's hash", "err", err)
}
@ -316,7 +360,7 @@ func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error {
}
// WriteHeadBlockHash stores the head block's hash.
func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
func WriteHeadBlockHash(db ethdb.Putter, hash common.Hash) error {
if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
log.Crit("Failed to store last block's hash", "err", err)
}
@ -324,7 +368,7 @@ func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
}
// WriteHeadFastBlockHash stores the fast head block's hash.
func WriteHeadFastBlockHash(db ethdb.Database, hash common.Hash) error {
func WriteHeadFastBlockHash(db ethdb.Putter, hash common.Hash) error {
if err := db.Put(headFastKey, hash.Bytes()); err != nil {
log.Crit("Failed to store last fast block's hash", "err", err)
}
@ -332,7 +376,7 @@ func WriteHeadFastBlockHash(db ethdb.Database, hash common.Hash) error {
}
// WriteHeader serializes a block header into the database.
func WriteHeader(db ethdb.Database, header *types.Header) error {
func WriteHeader(db ethdb.Putter, header *types.Header) error {
data, err := rlp.EncodeToBytes(header)
if err != nil {
return err
@ -352,7 +396,7 @@ func WriteHeader(db ethdb.Database, header *types.Header) error {
}
// WriteBody serializes the body of a block into the database.
func WriteBody(db ethdb.Database, hash common.Hash, number uint64, body *types.Body) error {
func WriteBody(db ethdb.Putter, hash common.Hash, number uint64, body *types.Body) error {
data, err := rlp.EncodeToBytes(body)
if err != nil {
return err
@ -361,7 +405,7 @@ func WriteBody(db ethdb.Database, hash common.Hash, number uint64, body *types.B
}
// WriteBodyRLP writes a serialized body of a block into the database.
func WriteBodyRLP(db ethdb.Database, hash common.Hash, number uint64, rlp rlp.RawValue) error {
func WriteBodyRLP(db ethdb.Putter, hash common.Hash, number uint64, rlp rlp.RawValue) error {
key := append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
if err := db.Put(key, rlp); err != nil {
log.Crit("Failed to store block body", "err", err)
@ -370,7 +414,7 @@ func WriteBodyRLP(db ethdb.Database, hash common.Hash, number uint64, rlp rlp.Ra
}
// WriteTd serializes the total difficulty of a block into the database.
func WriteTd(db ethdb.Database, hash common.Hash, number uint64, td *big.Int) error {
func WriteTd(db ethdb.Putter, hash common.Hash, number uint64, td *big.Int) error {
data, err := rlp.EncodeToBytes(td)
if err != nil {
return err
@ -383,7 +427,7 @@ func WriteTd(db ethdb.Database, hash common.Hash, number uint64, td *big.Int) er
}
// WriteBlock serializes a block into the database, header and body separately.
func WriteBlock(db ethdb.Database, block *types.Block) error {
func WriteBlock(db ethdb.Putter, block *types.Block) error {
// Store the body first to retain database consistency
if err := WriteBody(db, block.Hash(), block.NumberU64(), block.Body()); err != nil {
return err
@ -398,7 +442,7 @@ func WriteBlock(db ethdb.Database, block *types.Block) error {
// WriteBlockReceipts stores all the transaction receipts belonging to a block
// as a single receipt slice. This is used during chain reorganisations for
// rescheduling dropped transactions.
func WriteBlockReceipts(db ethdb.Database, hash common.Hash, number uint64, receipts types.Receipts) error {
func WriteBlockReceipts(db ethdb.Putter, hash common.Hash, number uint64, receipts types.Receipts) error {
// Convert the receipts into their storage form and serialize them
storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
for i, receipt := range receipts {
@ -416,103 +460,63 @@ func WriteBlockReceipts(db ethdb.Database, hash common.Hash, number uint64, rece
return nil
}
// WriteTransactions stores the transactions associated with a specific block
// into the given database. Beside writing the transaction, the function also
// stores a metadata entry along with the transaction, detailing the position
// of this within the blockchain.
func WriteTransactions(db ethdb.Database, block *types.Block) error {
batch := db.NewBatch()
// Iterate over each transaction and encode it with its metadata
// WriteTxLookupEntries stores a positional metadata for every transaction from
// a block, enabling hash based transaction and receipt lookups.
func WriteTxLookupEntries(db ethdb.Putter, block *types.Block) error {
// Iterate over each transaction and encode its metadata
for i, tx := range block.Transactions() {
// Encode and queue up the transaction for storage
data, err := rlp.EncodeToBytes(tx)
if err != nil {
return err
}
if err = batch.Put(tx.Hash().Bytes(), data); err != nil {
return err
}
// Encode and queue up the transaction metadata for storage
meta := struct {
BlockHash common.Hash
BlockIndex uint64
Index uint64
}{
entry := txLookupEntry{
BlockHash: block.Hash(),
BlockIndex: block.NumberU64(),
Index: uint64(i),
}
data, err = rlp.EncodeToBytes(meta)
data, err := rlp.EncodeToBytes(entry)
if err != nil {
return err
}
if err := batch.Put(append(tx.Hash().Bytes(), txMetaSuffix...), data); err != nil {
if err := db.Put(append(lookupPrefix, tx.Hash().Bytes()...), data); err != nil {
return err
}
}
// Write the scheduled data into the database
if err := batch.Write(); err != nil {
log.Crit("Failed to store transactions", "err", err)
}
return nil
}
// WriteReceipt stores a single transaction receipt into the database.
func WriteReceipt(db ethdb.Database, receipt *types.Receipt) error {
storageReceipt := (*types.ReceiptForStorage)(receipt)
data, err := rlp.EncodeToBytes(storageReceipt)
if err != nil {
return err
}
return db.Put(append(receiptsPrefix, receipt.TxHash.Bytes()...), data)
}
// WriteBloomBits writes the compressed bloom bits vector belonging to the given
// section and bit index.
func WriteBloomBits(db ethdb.Putter, bit uint, section uint64, head common.Hash, bits []byte) {
key := append(append(bloomBitsPrefix, make([]byte, 10)...), head.Bytes()...)
// WriteReceipts stores a batch of transaction receipts into the database.
func WriteReceipts(db ethdb.Database, receipts types.Receipts) error {
batch := db.NewBatch()
binary.BigEndian.PutUint16(key[1:], uint16(bit))
binary.BigEndian.PutUint64(key[3:], section)
// Iterate over all the receipts and queue them for database injection
for _, receipt := range receipts {
storageReceipt := (*types.ReceiptForStorage)(receipt)
data, err := rlp.EncodeToBytes(storageReceipt)
if err != nil {
return err
}
if err := batch.Put(append(receiptsPrefix, receipt.TxHash.Bytes()...), data); err != nil {
return err
}
if err := db.Put(key, bits); err != nil {
log.Crit("Failed to store bloom bits", "err", err)
}
// Write the scheduled data into the database
if err := batch.Write(); err != nil {
log.Crit("Failed to store receipts", "err", err)
}
return nil
}
// DeleteCanonicalHash removes the number to hash canonical mapping.
func DeleteCanonicalHash(db ethdb.Database, number uint64) {
func DeleteCanonicalHash(db DatabaseDeleter, number uint64) {
db.Delete(append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...))
}
// DeleteHeader removes all block header data associated with a hash.
func DeleteHeader(db ethdb.Database, hash common.Hash, number uint64) {
func DeleteHeader(db DatabaseDeleter, hash common.Hash, number uint64) {
db.Delete(append(blockHashPrefix, hash.Bytes()...))
db.Delete(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
}
// DeleteBody removes all block body data associated with a hash.
func DeleteBody(db ethdb.Database, hash common.Hash, number uint64) {
func DeleteBody(db DatabaseDeleter, hash common.Hash, number uint64) {
db.Delete(append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
}
// DeleteTd removes all block total difficulty data associated with a hash.
func DeleteTd(db ethdb.Database, hash common.Hash, number uint64) {
func DeleteTd(db DatabaseDeleter, hash common.Hash, number uint64) {
db.Delete(append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...), tdSuffix...))
}
// DeleteBlock removes all block data associated with a hash.
func DeleteBlock(db ethdb.Database, hash common.Hash, number uint64) {
func DeleteBlock(db DatabaseDeleter, hash common.Hash, number uint64) {
DeleteBlockReceipts(db, hash, number)
DeleteHeader(db, hash, number)
DeleteBody(db, hash, number)
@ -520,61 +524,13 @@ func DeleteBlock(db ethdb.Database, hash common.Hash, number uint64) {
}
// DeleteBlockReceipts removes all receipt data associated with a block hash.
func DeleteBlockReceipts(db ethdb.Database, hash common.Hash, number uint64) {
func DeleteBlockReceipts(db DatabaseDeleter, hash common.Hash, number uint64) {
db.Delete(append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
}
// DeleteTransaction removes all transaction data associated with a hash.
func DeleteTransaction(db ethdb.Database, hash common.Hash) {
db.Delete(hash.Bytes())
db.Delete(append(hash.Bytes(), txMetaSuffix...))
}
// DeleteReceipt removes all receipt data associated with a transaction hash.
func DeleteReceipt(db ethdb.Database, hash common.Hash) {
db.Delete(append(receiptsPrefix, hash.Bytes()...))
}
// returns a formatted MIP mapped key by adding prefix, canonical number and level
//
// ex. fn(98, 1000) = (prefix || 1000 || 0)
func mipmapKey(num, level uint64) []byte {
lkey := make([]byte, 8)
binary.BigEndian.PutUint64(lkey, level)
key := new(big.Int).SetUint64(num / level * level)
return append(mipmapPre, append(lkey, key.Bytes()...)...)
}
// WriteMipmapBloom writes each address included in the receipts' logs to the
// MIP bloom bin.
func WriteMipmapBloom(db ethdb.Database, number uint64, receipts types.Receipts) error {
mipmapBloomMu.Lock()
defer mipmapBloomMu.Unlock()
batch := db.NewBatch()
for _, level := range MIPMapLevels {
key := mipmapKey(number, level)
bloomDat, _ := db.Get(key)
bloom := types.BytesToBloom(bloomDat)
for _, receipt := range receipts {
for _, log := range receipt.Logs {
bloom.Add(log.Address.Big())
}
}
batch.Put(key, bloom.Bytes())
}
if err := batch.Write(); err != nil {
return fmt.Errorf("mipmap write fail for: %d: %v", number, err)
}
return nil
}
// GetMipmapBloom returns a bloom filter using the number and level as input
// parameters. For available levels see MIPMapLevels.
func GetMipmapBloom(db ethdb.Database, number, level uint64) types.Bloom {
bloomDat, _ := db.Get(mipmapKey(number, level))
return types.BytesToBloom(bloomDat)
// DeleteTxLookupEntry removes all transaction data associated with a hash.
func DeleteTxLookupEntry(db DatabaseDeleter, hash common.Hash) {
db.Delete(append(lookupPrefix, hash.Bytes()...))
}
// PreimageTable returns a Database instance with the key prefix for preimage entries.
@ -605,7 +561,7 @@ func WritePreimages(db ethdb.Database, number uint64, preimages map[common.Hash]
}
// GetBlockChainVersion reads the version number from db.
func GetBlockChainVersion(db ethdb.Database) int {
func GetBlockChainVersion(db DatabaseReader) int {
var vsn uint
enc, _ := db.Get([]byte("BlockchainVersion"))
rlp.DecodeBytes(enc, &vsn)
@ -613,13 +569,13 @@ func GetBlockChainVersion(db ethdb.Database) int {
}
// WriteBlockChainVersion writes vsn as the version number to db.
func WriteBlockChainVersion(db ethdb.Database, vsn int) {
func WriteBlockChainVersion(db ethdb.Putter, vsn int) {
enc, _ := rlp.EncodeToBytes(uint(vsn))
db.Put([]byte("BlockchainVersion"), enc)
}
// WriteChainConfig writes the chain config settings to the database.
func WriteChainConfig(db ethdb.Database, hash common.Hash, cfg *params.ChainConfig) error {
func WriteChainConfig(db ethdb.Putter, hash common.Hash, cfg *params.ChainConfig) error {
// short circuit and ignore if nil config. GetChainConfig
// will return a default.
if cfg == nil {
@ -635,7 +591,7 @@ func WriteChainConfig(db ethdb.Database, hash common.Hash, cfg *params.ChainConf
}
// GetChainConfig will fetch the network settings based on the given hash.
func GetChainConfig(db ethdb.Database, hash common.Hash) (*params.ChainConfig, error) {
func GetChainConfig(db DatabaseReader, hash common.Hash) (*params.ChainConfig, error) {
jsonChainConfig, _ := db.Get(append(configPrefix, hash[:]...))
if len(jsonChainConfig) == 0 {
return nil, ErrChainConfigNotFound
@ -650,7 +606,7 @@ func GetChainConfig(db ethdb.Database, hash common.Hash) (*params.ChainConfig, e
}
// FindCommonAncestor returns the last common ancestor of two block headers
func FindCommonAncestor(db ethdb.Database, a, b *types.Header) *types.Header {
func FindCommonAncestor(db DatabaseReader, a, b *types.Header) *types.Header {
for bn := b.Number.Uint64(); a.Number.Uint64() > bn; {
a = GetHeader(db, a.ParentHash, a.Number.Uint64()-1)
if a == nil {

View File

@ -28,4 +28,8 @@ var (
// ErrBlacklistedHash is returned if a block to import is on the blacklist.
ErrBlacklistedHash = errors.New("blacklisted hash")
// ErrNonceTooHigh is returned if the nonce of a transaction is higher than the
// next one expected based on the local chain.
ErrNonceTooHigh = errors.New("nonce too high")
)

View File

@ -13,8 +13,6 @@ import (
"github.com/ethereum/go-ethereum/params"
)
var _ = (*genesisSpecMarshaling)(nil)
func (g Genesis) MarshalJSON() ([]byte, error) {
type Genesis struct {
Config *params.ChainConfig `json:"config"`
@ -26,7 +24,7 @@ func (g Genesis) MarshalJSON() ([]byte, error) {
Mixhash common.Hash `json:"mixHash"`
Coinbase common.Address `json:"coinbase"`
Alloc map[common.UnprefixedAddress]GenesisAccount `json:"alloc" gencodec:"required"`
Number uint64 `json:"number"`
Number math.HexOrDecimal64 `json:"number"`
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
ParentHash common.Hash `json:"parentHash"`
}
@ -45,7 +43,7 @@ func (g Genesis) MarshalJSON() ([]byte, error) {
enc.Alloc[common.UnprefixedAddress(k)] = v
}
}
enc.Number = g.Number
enc.Number = math.HexOrDecimal64(g.Number)
enc.GasUsed = math.HexOrDecimal64(g.GasUsed)
enc.ParentHash = g.ParentHash
return json.Marshal(&enc)
@ -62,7 +60,7 @@ func (g *Genesis) UnmarshalJSON(input []byte) error {
Mixhash *common.Hash `json:"mixHash"`
Coinbase *common.Address `json:"coinbase"`
Alloc map[common.UnprefixedAddress]GenesisAccount `json:"alloc" gencodec:"required"`
Number *uint64 `json:"number"`
Number *math.HexOrDecimal64 `json:"number"`
GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
ParentHash *common.Hash `json:"parentHash"`
}
@ -104,7 +102,7 @@ func (g *Genesis) UnmarshalJSON(input []byte) error {
g.Alloc[common.Address(k)] = v
}
if dec.Number != nil {
g.Number = *dec.Number
g.Number = uint64(*dec.Number)
}
if dec.GasUsed != nil {
g.GasUsed = uint64(*dec.GasUsed)

View File

@ -92,6 +92,7 @@ type genesisSpecMarshaling struct {
ExtraData hexutil.Bytes
GasLimit math.HexOrDecimal64
GasUsed math.HexOrDecimal64
Number math.HexOrDecimal64
Difficulty *math.HexOrDecimal256
Alloc map[common.UnprefixedAddress]GenesisAccount
}

View File

@ -267,7 +267,7 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, writeHeader WhCa
return i, errors.New("aborted")
}
// If the header's already known, skip it, otherwise store
if hc.GetHeader(header.Hash(), header.Number.Uint64()) != nil {
if hc.HasHeader(header.Hash(), header.Number.Uint64()) {
stats.ignored++
continue
}
@ -361,10 +361,13 @@ func (hc *HeaderChain) GetHeaderByHash(hash common.Hash) *types.Header {
return hc.GetHeader(hash, hc.GetBlockNumber(hash))
}
// HasHeader checks if a block header is present in the database or not, caching
// it if present.
func (hc *HeaderChain) HasHeader(hash common.Hash) bool {
return hc.GetHeaderByHash(hash) != nil
// HasHeader checks if a block header is present in the database or not.
func (hc *HeaderChain) HasHeader(hash common.Hash, number uint64) bool {
if hc.numberCache.Contains(hash) || hc.headerCache.Contains(hash) {
return true
}
ok, _ := hc.chainDb.Has(headerKey(hash, number))
return ok
}
// GetHeaderByNumber retrieves a block header from the database by number,

View File

@ -91,11 +91,6 @@ func (ch suicideChange) undo(s *StateDB) {
if obj != nil {
obj.suicided = ch.prev
obj.setBalance(ch.prevbalance)
// if the object wasn't suicided before, remove
// it from the list of destructed objects as well.
if !obj.suicided {
delete(s.stateObjectsDestructed, *ch.account)
}
}
}

View File

@ -46,9 +46,8 @@ type StateDB struct {
trie Trie
// This map holds 'live' objects, which will get modified while processing a state transition.
stateObjects map[common.Address]*stateObject
stateObjectsDirty map[common.Address]struct{}
stateObjectsDestructed map[common.Address]struct{}
stateObjects map[common.Address]*stateObject
stateObjectsDirty map[common.Address]struct{}
// DB error.
// State objects are used by the consensus core and VM which are
@ -83,14 +82,13 @@ func New(root common.Hash, db Database) (*StateDB, error) {
return nil, err
}
return &StateDB{
db: db,
trie: tr,
stateObjects: make(map[common.Address]*stateObject),
stateObjectsDirty: make(map[common.Address]struct{}),
stateObjectsDestructed: make(map[common.Address]struct{}),
refund: new(big.Int),
logs: make(map[common.Hash][]*types.Log),
preimages: make(map[common.Hash][]byte),
db: db,
trie: tr,
stateObjects: make(map[common.Address]*stateObject),
stateObjectsDirty: make(map[common.Address]struct{}),
refund: new(big.Int),
logs: make(map[common.Hash][]*types.Log),
preimages: make(map[common.Hash][]byte),
}, nil
}
@ -115,7 +113,6 @@ func (self *StateDB) Reset(root common.Hash) error {
self.trie = tr
self.stateObjects = make(map[common.Address]*stateObject)
self.stateObjectsDirty = make(map[common.Address]struct{})
self.stateObjectsDestructed = make(map[common.Address]struct{})
self.thash = common.Hash{}
self.bhash = common.Hash{}
self.txIndex = 0
@ -323,7 +320,6 @@ func (self *StateDB) Suicide(addr common.Address) bool {
})
stateObject.markSuicided()
stateObject.data.Balance = new(big.Int)
self.stateObjectsDestructed[addr] = struct{}{}
return true
}
@ -456,23 +452,19 @@ func (self *StateDB) Copy() *StateDB {
// Copy all the basic fields, initialize the memory ones
state := &StateDB{
db: self.db,
trie: self.trie,
stateObjects: make(map[common.Address]*stateObject, len(self.stateObjectsDirty)),
stateObjectsDirty: make(map[common.Address]struct{}, len(self.stateObjectsDirty)),
stateObjectsDestructed: make(map[common.Address]struct{}, len(self.stateObjectsDestructed)),
refund: new(big.Int).Set(self.refund),
logs: make(map[common.Hash][]*types.Log, len(self.logs)),
logSize: self.logSize,
preimages: make(map[common.Hash][]byte),
db: self.db,
trie: self.trie,
stateObjects: make(map[common.Address]*stateObject, len(self.stateObjectsDirty)),
stateObjectsDirty: make(map[common.Address]struct{}, len(self.stateObjectsDirty)),
refund: new(big.Int).Set(self.refund),
logs: make(map[common.Hash][]*types.Log, len(self.logs)),
logSize: self.logSize,
preimages: make(map[common.Hash][]byte),
}
// Copy the dirty states, logs, and preimages
for addr := range self.stateObjectsDirty {
state.stateObjects[addr] = self.stateObjects[addr].deepCopy(state, state.MarkStateObjectDirty)
state.stateObjectsDirty[addr] = struct{}{}
if self.stateObjects[addr].suicided {
state.stateObjectsDestructed[addr] = struct{}{}
}
}
for hash, logs := range self.logs {
state.logs[hash] = make([]*types.Log, len(logs))
@ -520,10 +512,9 @@ func (self *StateDB) GetRefund() *big.Int {
return self.refund
}
// IntermediateRoot computes the current root hash of the state trie.
// It is called in between transactions to get the root hash that
// goes into transaction receipts.
func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
// Finalise finalises the state by removing the self destructed objects
// and clears the journal as well as the refunds.
func (s *StateDB) Finalise(deleteEmptyObjects bool) {
for addr := range s.stateObjectsDirty {
stateObject := s.stateObjects[addr]
if stateObject.suicided || (deleteEmptyObjects && stateObject.empty()) {
@ -535,6 +526,13 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
}
// Invalidate journal because reverting across transactions is not allowed.
s.clearJournalAndRefund()
}
// IntermediateRoot computes the current root hash of the state trie.
// It is called in between transactions to get the root hash that
// goes into transaction receipts.
func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
s.Finalise(deleteEmptyObjects)
return s.trie.Hash()
}
@ -546,19 +544,6 @@ func (self *StateDB) Prepare(thash, bhash common.Hash, ti int) {
self.txIndex = ti
}
// Finalise finalises the state by removing the self destructed objects
// in the current stateObjectsDestructed buffer and clears the journal
// as well as the refunds.
//
// Please note that Finalise is used by EIP#98 and is used instead of
// IntermediateRoot.
func (s *StateDB) Finalise() {
for addr := range s.stateObjectsDestructed {
s.deleteStateObject(s.stateObjects[addr])
}
s.clearJournalAndRefund()
}
// DeleteSuicides flags the suicided objects for deletion so that it
// won't be referenced again when called / queried up on.
//

View File

@ -18,60 +18,24 @@ package state
import (
"bytes"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
// StateSync is the main state synchronisation scheduler, which provides yet the
// unknown state hashes to retrieve, accepts node data associated with said hashes
// and reconstructs the state database step by step until all is done.
type StateSync trie.TrieSync
// NewStateSync create a new state trie download scheduler.
func NewStateSync(root common.Hash, database trie.DatabaseReader) *StateSync {
func NewStateSync(root common.Hash, database trie.DatabaseReader) *trie.TrieSync {
var syncer *trie.TrieSync
callback := func(leaf []byte, parent common.Hash) error {
var obj struct {
Nonce uint64
Balance *big.Int
Root common.Hash
CodeHash []byte
}
var obj Account
if err := rlp.Decode(bytes.NewReader(leaf), &obj); err != nil {
return err
}
syncer.AddSubTrie(obj.Root, 64, parent, nil)
syncer.AddRawEntry(common.BytesToHash(obj.CodeHash), 64, parent)
return nil
}
syncer = trie.NewTrieSync(root, database, callback)
return (*StateSync)(syncer)
}
// Missing retrieves the known missing nodes from the state trie for retrieval.
func (s *StateSync) Missing(max int) []common.Hash {
return (*trie.TrieSync)(s).Missing(max)
}
// Process injects a batch of retrieved trie nodes data, returning if something
// was committed to the memcache and also the index of an entry if processing of
// it failed.
func (s *StateSync) Process(list []trie.SyncResult) (bool, int, error) {
return (*trie.TrieSync)(s).Process(list)
}
// Commit flushes the data stored in the internal memcache out to persistent
// storage, returning th enumber of items written and any occurred error.
func (s *StateSync) Commit(dbw trie.DatabaseWriter) (int, error) {
return (*trie.TrieSync)(s).Commit(dbw)
}
// Pending returns the number of state entries currently pending for download.
func (s *StateSync) Pending() int {
return (*trie.TrieSync)(s).Pending()
return syncer
}

View File

@ -98,17 +98,23 @@ func ApplyTransaction(config *params.ChainConfig, bc *BlockChain, author *common
// about the transaction and calling mechanisms.
vmenv := vm.NewEVM(context, statedb, config, cfg)
// Apply the transaction to the current state (included in the env)
_, gas, err := ApplyMessage(vmenv, msg, gp)
_, gas, failed, err := ApplyMessage(vmenv, msg, gp)
if err != nil {
return nil, nil, err
}
// Update the state with pending changes
var root []byte
if config.IsByzantium(header.Number) {
statedb.Finalise(true)
} else {
root = statedb.IntermediateRoot(config.IsEIP158(header.Number)).Bytes()
}
usedGas.Add(usedGas, gas)
// Create a new receipt for the transaction, storing the intermediate root and gas used by the tx
// based on the eip phase, we're passing wether the root touch-delete accounts.
root := statedb.IntermediateRoot(config.IsEIP158(header.Number))
receipt := types.NewReceipt(root.Bytes(), usedGas)
receipt := types.NewReceipt(root, failed, usedGas)
receipt.TxHash = tx.Hash()
receipt.GasUsed = new(big.Int).Set(gas)
// if the transaction created a contract, store the creation address in the receipt.

View File

@ -18,7 +18,6 @@ package core
import (
"errors"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
@ -59,8 +58,7 @@ type StateTransition struct {
value *big.Int
data []byte
state vm.StateDB
evm *vm.EVM
evm *vm.EVM
}
// Message represents a message sent to a contract.
@ -127,11 +125,11 @@ func NewStateTransition(evm *vm.EVM, msg Message, gp *GasPool) *StateTransition
// the gas used (which includes gas refunds) and an error if it failed. An error always
// indicates a core error meaning that the message would always fail for that particular
// state and would never be accepted within a block.
func ApplyMessage(evm *vm.EVM, msg Message, gp *GasPool) ([]byte, *big.Int, error) {
func ApplyMessage(evm *vm.EVM, msg Message, gp *GasPool) ([]byte, *big.Int, bool, error) {
st := NewStateTransition(evm, msg, gp)
ret, _, gasUsed, err := st.TransitionDb()
return ret, gasUsed, err
ret, _, gasUsed, failed, err := st.TransitionDb()
return ret, gasUsed, failed, err
}
func (st *StateTransition) from() vm.AccountRef {
@ -198,8 +196,11 @@ func (st *StateTransition) preCheck() error {
// Make sure this transaction's nonce is correct
if msg.CheckNonce() {
if n := st.state.GetNonce(sender.Address()); n != msg.Nonce() {
return fmt.Errorf("invalid nonce: have %d, expected %d", msg.Nonce(), n)
nonce := st.state.GetNonce(sender.Address())
if nonce < msg.Nonce() {
return ErrNonceTooHigh
} else if nonce > msg.Nonce() {
return ErrNonceTooLow
}
}
return st.buyGas()
@ -208,7 +209,7 @@ func (st *StateTransition) preCheck() error {
// TransitionDb will transition the state by applying the current message and returning the result
// including the required gas for the operation as well as the used gas. It returns an error if it
// failed. An error indicates a consensus issue.
func (st *StateTransition) TransitionDb() (ret []byte, requiredGas, usedGas *big.Int, err error) {
func (st *StateTransition) TransitionDb() (ret []byte, requiredGas, usedGas *big.Int, failed bool, err error) {
if err = st.preCheck(); err != nil {
return
}
@ -222,10 +223,10 @@ func (st *StateTransition) TransitionDb() (ret []byte, requiredGas, usedGas *big
// TODO convert to uint64
intrinsicGas := IntrinsicGas(st.data, contractCreation, homestead)
if intrinsicGas.BitLen() > 64 {
return nil, nil, nil, vm.ErrOutOfGas
return nil, nil, nil, false, vm.ErrOutOfGas
}
if err = st.useGas(intrinsicGas.Uint64()); err != nil {
return nil, nil, nil, err
return nil, nil, nil, false, err
}
var (
@ -248,7 +249,7 @@ func (st *StateTransition) TransitionDb() (ret []byte, requiredGas, usedGas *big
// sufficient balance to make the transfer happen. The first
// balance transfer may never fail.
if vmerr == vm.ErrInsufficientBalance {
return nil, nil, nil, vmerr
return nil, nil, nil, false, vmerr
}
}
requiredGas = new(big.Int).Set(st.gasUsed())
@ -256,7 +257,7 @@ func (st *StateTransition) TransitionDb() (ret []byte, requiredGas, usedGas *big
st.refundGas()
st.state.AddBalance(st.evm.Coinbase, new(big.Int).Mul(st.gasUsed(), st.gasPrice))
return ret, requiredGas, st.gasUsed(), err
return ret, requiredGas, st.gasUsed(), vmerr != nil, err
}
func (st *StateTransition) refundGas() {

View File

@ -0,0 +1,150 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"errors"
"io"
"os"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
)
// errNoActiveJournal is returned if a transaction is attempted to be inserted
// into the journal, but no such file is currently open.
var errNoActiveJournal = errors.New("no active journal")
// txJournal is a rotating log of transactions with the aim of storing locally
// created transactions to allow non-executed ones to survive node restarts.
type txJournal struct {
path string // Filesystem path to store the transactions at
writer io.WriteCloser // Output stream to write new transactions into
}
// newTxJournal creates a new transaction journal to
func newTxJournal(path string) *txJournal {
return &txJournal{
path: path,
}
}
// load parses a transaction journal dump from disk, loading its contents into
// the specified pool.
func (journal *txJournal) load(add func(*types.Transaction) error) error {
// Skip the parsing if the journal file doens't exist at all
if _, err := os.Stat(journal.path); os.IsNotExist(err) {
return nil
}
// Open the journal for loading any past transactions
input, err := os.Open(journal.path)
if err != nil {
return err
}
defer input.Close()
// Inject all transactions from the journal into the pool
stream := rlp.NewStream(input, 0)
total, dropped := 0, 0
var failure error
for {
// Parse the next transaction and terminate on error
tx := new(types.Transaction)
if err = stream.Decode(tx); err != nil {
if err != io.EOF {
failure = err
}
break
}
// Import the transaction and bump the appropriate progress counters
total++
if err = add(tx); err != nil {
log.Debug("Failed to add journaled transaction", "err", err)
dropped++
continue
}
}
log.Info("Loaded local transaction journal", "transactions", total, "dropped", dropped)
return failure
}
// insert adds the specified transaction to the local disk journal.
func (journal *txJournal) insert(tx *types.Transaction) error {
if journal.writer == nil {
return errNoActiveJournal
}
if err := rlp.Encode(journal.writer, tx); err != nil {
return err
}
return nil
}
// rotate regenerates the transaction journal based on the current contents of
// the transaction pool.
func (journal *txJournal) rotate(all map[common.Address]types.Transactions) error {
// Close the current journal (if any is open)
if journal.writer != nil {
if err := journal.writer.Close(); err != nil {
return err
}
journal.writer = nil
}
// Generate a new journal with the contents of the current pool
replacement, err := os.OpenFile(journal.path+".new", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755)
if err != nil {
return err
}
journaled := 0
for _, txs := range all {
for _, tx := range txs {
if err = rlp.Encode(replacement, tx); err != nil {
replacement.Close()
return err
}
}
journaled += len(txs)
}
replacement.Close()
// Replace the live journal with the newly generated one
if err = os.Rename(journal.path+".new", journal.path); err != nil {
return err
}
sink, err := os.OpenFile(journal.path, os.O_WRONLY|os.O_APPEND, 0755)
if err != nil {
return err
}
journal.writer = sink
log.Info("Regenerated local transaction journal", "transactions", journaled, "accounts", len(all))
return nil
}
// close flushes the transaction journal contents to disk and closes the file.
func (journal *txJournal) close() error {
var err error
if journal.writer != nil {
err = journal.writer.Close()
journal.writer = nil
}
return err
}

View File

@ -298,6 +298,7 @@ func (l *txList) Filter(costLimit, gasLimit *big.Int) (types.Transactions, types
// If the list was strict, filter anything above the lowest nonce
var invalids types.Transactions
if l.strict && len(removed) > 0 {
lowest := uint64(math.MaxUint64)
for _, tx := range removed {
@ -435,6 +436,7 @@ func (l *txPricedList) Cap(threshold *big.Int, local *accountSet) types.Transact
}
// Stop the discards if we've reached the threshold
if tx.GasPrice().Cmp(threshold) >= 0 {
save = append(save, tx)
break
}
// Non stale transaction found, discard unless local

View File

@ -19,6 +19,7 @@ package core
import (
"errors"
"fmt"
"math"
"math/big"
"sort"
"sync"
@ -34,6 +35,13 @@ import (
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
)
const (
// chainHeadChanSize is the size of channel listening to ChainHeadEvent.
chainHeadChanSize = 10
// rmTxChanSize is the size of channel listening to RemovedTransactionEvent.
rmTxChanSize = 10
)
var (
// ErrInvalidSender is returned if the transaction contains an invalid signature.
ErrInvalidSender = errors.New("invalid sender")
@ -95,11 +103,21 @@ var (
underpricedTxCounter = metrics.NewCounter("txpool/underpriced")
)
type stateFn func() (*state.StateDB, error)
// blockChain provides the state of blockchain and current gas limit to do
// some pre checks in tx pool and event subscribers.
type blockChain interface {
CurrentBlock() *types.Block
GetBlock(hash common.Hash, number uint64) *types.Block
StateAt(root common.Hash) (*state.StateDB, error)
SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription
}
// TxPoolConfig are the configuration parameters of the transaction pool.
type TxPoolConfig struct {
NoLocals bool // Whether local transaction handling should be disabled
NoLocals bool // Whether local transaction handling should be disabled
Journal string // Journal of local transactions to survive node restarts
Rejournal time.Duration // Time interval to regenerate the local transaction journal
PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool
PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce)
@ -115,6 +133,9 @@ type TxPoolConfig struct {
// DefaultTxPoolConfig contains the default configurations for the transaction
// pool.
var DefaultTxPoolConfig = TxPoolConfig{
Journal: "transactions.rlp",
Rejournal: time.Hour,
PriceLimit: 1,
PriceBump: 10,
@ -130,6 +151,10 @@ var DefaultTxPoolConfig = TxPoolConfig{
// unreasonable or unworkable.
func (config *TxPoolConfig) sanitize() TxPoolConfig {
conf := *config
if conf.Rejournal < time.Second {
log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second)
conf.Rejournal = time.Second
}
if conf.PriceLimit < 1 {
log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit)
conf.PriceLimit = DefaultTxPoolConfig.PriceLimit
@ -151,96 +176,116 @@ func (config *TxPoolConfig) sanitize() TxPoolConfig {
type TxPool struct {
config TxPoolConfig
chainconfig *params.ChainConfig
currentState stateFn // The state function which will allow us to do some pre checks
pendingState *state.ManagedState
gasLimit func() *big.Int // The current gas limit function callback
chain blockChain
gasPrice *big.Int
eventMux *event.TypeMux
events *event.TypeMuxSubscription
locals *accountSet
txFeed event.Feed
scope event.SubscriptionScope
chainHeadCh chan ChainHeadEvent
chainHeadSub event.Subscription
signer types.Signer
mu sync.RWMutex
currentState *state.StateDB // Current state in the blockchain head
pendingState *state.ManagedState // Pending state tracking virtual nonces
currentMaxGas *big.Int // Current gas limit for transaction caps
locals *accountSet // Set of local transaction to exepmt from evicion rules
journal *txJournal // Journal of local transaction to back up to disk
pending map[common.Address]*txList // All currently processable transactions
queue map[common.Address]*txList // Queued but non-processable transactions
beats map[common.Address]time.Time // Last heartbeat from each known account
all map[common.Hash]*types.Transaction // All transactions to allow lookups
priced *txPricedList // All transactions sorted by price
wg sync.WaitGroup // for shutdown sync
quit chan struct{}
wg sync.WaitGroup // for shutdown sync
homestead bool
}
// NewTxPool creates a new transaction pool to gather, sort and filter inbound
// trnsactions from the network.
func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, eventMux *event.TypeMux, currentStateFn stateFn, gasLimitFn func() *big.Int) *TxPool {
func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain blockChain) *TxPool {
// Sanitize the input to ensure no vulnerable gas prices are set
config = (&config).sanitize()
// Create the transaction pool with its initial settings
pool := &TxPool{
config: config,
chainconfig: chainconfig,
signer: types.NewEIP155Signer(chainconfig.ChainId),
pending: make(map[common.Address]*txList),
queue: make(map[common.Address]*txList),
beats: make(map[common.Address]time.Time),
all: make(map[common.Hash]*types.Transaction),
eventMux: eventMux,
currentState: currentStateFn,
gasLimit: gasLimitFn,
gasPrice: new(big.Int).SetUint64(config.PriceLimit),
pendingState: nil,
events: eventMux.Subscribe(ChainHeadEvent{}, RemovedTransactionEvent{}),
quit: make(chan struct{}),
config: config,
chainconfig: chainconfig,
chain: chain,
signer: types.NewEIP155Signer(chainconfig.ChainId),
pending: make(map[common.Address]*txList),
queue: make(map[common.Address]*txList),
beats: make(map[common.Address]time.Time),
all: make(map[common.Hash]*types.Transaction),
chainHeadCh: make(chan ChainHeadEvent, chainHeadChanSize),
gasPrice: new(big.Int).SetUint64(config.PriceLimit),
}
pool.locals = newAccountSet(pool.signer)
pool.priced = newTxPricedList(&pool.all)
pool.resetState()
pool.reset(nil, chain.CurrentBlock().Header())
// Start the various events loops and return
pool.wg.Add(2)
go pool.eventLoop()
go pool.expirationLoop()
// If local transactions and journaling is enabled, load from disk
if !config.NoLocals && config.Journal != "" {
pool.journal = newTxJournal(config.Journal)
if err := pool.journal.load(pool.AddLocal); err != nil {
log.Warn("Failed to load transaction journal", "err", err)
}
if err := pool.journal.rotate(pool.local()); err != nil {
log.Warn("Failed to rotate transaction journal", "err", err)
}
}
// Subscribe events from blockchain
pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh)
// Start the event loop and return
pool.wg.Add(1)
go pool.loop()
return pool
}
func (pool *TxPool) eventLoop() {
// loop is the transaction pool's main event loop, waiting for and reacting to
// outside blockchain events as well as for various reporting and transaction
// eviction events.
func (pool *TxPool) loop() {
defer pool.wg.Done()
// Start a ticker and keep track of interesting pool stats to report
// Start the stats reporting and transaction eviction tickers
var prevPending, prevQueued, prevStales int
report := time.NewTicker(statsReportInterval)
defer report.Stop()
// Track chain events. When a chain events occurs (new chain canon block)
// we need to know the new state. The new state will help us determine
// the nonces in the managed state
evict := time.NewTicker(evictionInterval)
defer evict.Stop()
journal := time.NewTicker(pool.config.Rejournal)
defer journal.Stop()
// Track the previous head headers for transaction reorgs
head := pool.chain.CurrentBlock()
// Keep waiting for and reacting to the various events
for {
select {
// Handle any events fired by the system
case ev, ok := <-pool.events.Chan():
if !ok {
return
}
switch ev := ev.Data.(type) {
case ChainHeadEvent:
// Handle ChainHeadEvent
case ev := <-pool.chainHeadCh:
if ev.Block != nil {
pool.mu.Lock()
if ev.Block != nil {
if pool.chainconfig.IsHomestead(ev.Block.Number()) {
pool.homestead = true
}
if pool.chainconfig.IsHomestead(ev.Block.Number()) {
pool.homestead = true
}
pool.resetState()
pool.mu.Unlock()
pool.reset(head.Header(), ev.Block.Header())
head = ev.Block
case RemovedTransactionEvent:
pool.addTxs(ev.Txs, false)
pool.mu.Unlock()
}
// Be unsubscribed due to system stopped
case <-pool.chainHeadSub.Err():
return
// Handle stats reporting ticks
case <-report.C:
@ -253,23 +298,118 @@ func (pool *TxPool) eventLoop() {
log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales)
prevPending, prevQueued, prevStales = pending, queued, stales
}
// Handle inactive account transaction eviction
case <-evict.C:
pool.mu.Lock()
for addr := range pool.queue {
// Skip local transactions from the eviction mechanism
if pool.locals.contains(addr) {
continue
}
// Any non-locals old enough should be removed
if time.Since(pool.beats[addr]) > pool.config.Lifetime {
for _, tx := range pool.queue[addr].Flatten() {
pool.removeTx(tx.Hash())
}
}
}
pool.mu.Unlock()
// Handle local transaction journal rotation
case <-journal.C:
if pool.journal != nil {
pool.mu.Lock()
if err := pool.journal.rotate(pool.local()); err != nil {
log.Warn("Failed to rotate local tx journal", "err", err)
}
pool.mu.Unlock()
}
}
}
}
func (pool *TxPool) resetState() {
currentState, err := pool.currentState()
// lockedReset is a wrapper around reset to allow calling it in a thread safe
// manner. This method is only ever used in the tester!
func (pool *TxPool) lockedReset(oldHead, newHead *types.Header) {
pool.mu.Lock()
defer pool.mu.Unlock()
pool.reset(oldHead, newHead)
}
// reset retrieves the current state of the blockchain and ensures the content
// of the transaction pool is valid with regard to the chain state.
func (pool *TxPool) reset(oldHead, newHead *types.Header) {
// If we're reorging an old state, reinject all dropped transactions
var reinject types.Transactions
if oldHead != nil && oldHead.Hash() != newHead.ParentHash {
// If the reorg is too deep, avoid doing it (will happen during fast sync)
oldNum := oldHead.Number.Uint64()
newNum := newHead.Number.Uint64()
if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 {
log.Warn("Skipping deep transaction reorg", "depth", depth)
} else {
// Reorg seems shallow enough to pull in all transactions into memory
var discarded, included types.Transactions
var (
rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64())
add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64())
)
for rem.NumberU64() > add.NumberU64() {
discarded = append(discarded, rem.Transactions()...)
if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
return
}
}
for add.NumberU64() > rem.NumberU64() {
included = append(included, add.Transactions()...)
if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
return
}
}
for rem.Hash() != add.Hash() {
discarded = append(discarded, rem.Transactions()...)
if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil {
log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash())
return
}
included = append(included, add.Transactions()...)
if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil {
log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash())
return
}
}
reinject = types.TxDifference(discarded, included)
}
}
// Initialize the internal state to the current head
if newHead == nil {
newHead = pool.chain.CurrentBlock().Header() // Special case during testing
}
statedb, err := pool.chain.StateAt(newHead.Root)
if err != nil {
log.Error("Failed reset txpool state", "err", err)
log.Error("Failed to reset txpool state", "err", err)
return
}
pool.pendingState = state.ManageState(currentState)
pool.currentState = statedb
pool.pendingState = state.ManageState(statedb)
pool.currentMaxGas = newHead.GasLimit
// Inject any transactions discarded due to reorgs
log.Debug("Reinjecting stale transactions", "count", len(reinject))
pool.addTxsLocked(reinject, false)
// validate the pool of pending transactions, this will remove
// any transactions that have been included in the block or
// have been invalidated because of another transaction (e.g.
// higher gas price)
pool.demoteUnexecutables(currentState)
pool.demoteUnexecutables()
// Update all accounts to the latest known pending nonce
for addr, list := range pool.pending {
@ -278,18 +418,30 @@ func (pool *TxPool) resetState() {
}
// Check the queue and move transactions over to the pending if possible
// or remove those that have become invalid
pool.promoteExecutables(currentState, nil)
pool.promoteExecutables(nil)
}
// Stop terminates the transaction pool.
func (pool *TxPool) Stop() {
pool.events.Unsubscribe()
close(pool.quit)
// Unsubscribe all subscriptions registered from txpool
pool.scope.Close()
// Unsubscribe subscriptions registered from blockchain
pool.chainHeadSub.Unsubscribe()
pool.wg.Wait()
if pool.journal != nil {
pool.journal.close()
}
log.Info("Transaction pool stopped")
}
// SubscribeTxPreEvent registers a subscription of TxPreEvent and
// starts sending event to the given channel.
func (pool *TxPool) SubscribeTxPreEvent(ch chan<- TxPreEvent) event.Subscription {
return pool.scope.Track(pool.txFeed.Subscribe(ch))
}
// GasPrice returns the current gas price enforced by the transaction pool.
func (pool *TxPool) GasPrice() *big.Int {
pool.mu.RLock()
@ -345,8 +497,8 @@ func (pool *TxPool) stats() (int, int) {
// Content retrieves the data content of the transaction pool, returning all the
// pending as well as queued transactions, grouped by account and sorted by nonce.
func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) {
pool.mu.RLock()
defer pool.mu.RUnlock()
pool.mu.Lock()
defer pool.mu.Unlock()
pending := make(map[common.Address]types.Transactions)
for addr, list := range pool.pending {
@ -373,6 +525,22 @@ func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) {
return pending, nil
}
// local retrieves all currently known local transactions, groupped by origin
// account and sorted by nonce. The returned transaction set is a copy and can be
// freely modified by calling code.
func (pool *TxPool) local() map[common.Address]types.Transactions {
txs := make(map[common.Address]types.Transactions)
for addr := range pool.locals.accounts {
if pending := pool.pending[addr]; pending != nil {
txs[addr] = append(txs[addr], pending.Flatten()...)
}
if queued := pool.queue[addr]; queued != nil {
txs[addr] = append(txs[addr], queued.Flatten()...)
}
}
return txs
}
// validateTx checks whether a transaction is valid according to the consensus
// rules and adheres to some heuristic limits of the local node (price and size).
func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
@ -386,7 +554,7 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
return ErrNegativeValue
}
// Ensure the transaction doesn't exceed the current block limit gas.
if pool.gasLimit().Cmp(tx.Gas()) < 0 {
if pool.currentMaxGas.Cmp(tx.Gas()) < 0 {
return ErrGasLimit
}
// Make sure the transaction is signed properly
@ -400,16 +568,12 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
return ErrUnderpriced
}
// Ensure the transaction adheres to nonce ordering
currentState, err := pool.currentState()
if err != nil {
return err
}
if currentState.GetNonce(from) > tx.Nonce() {
if pool.currentState.GetNonce(from) > tx.Nonce() {
return ErrNonceTooLow
}
// Transactor should have enough funds to cover the costs
// cost == V + GP * GL
if currentState.GetBalance(from).Cmp(tx.Cost()) < 0 {
if pool.currentState.GetBalance(from).Cmp(tx.Cost()) < 0 {
return ErrInsufficientFunds
}
intrGas := IntrinsicGas(tx.Data(), tx.To() == nil, pool.homestead)
@ -473,18 +637,22 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) {
}
pool.all[tx.Hash()] = tx
pool.priced.Put(tx)
pool.journalTx(from, tx)
log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
return old != nil, nil
}
// New transaction isn't replacing a pending one, push into queue and potentially mark local
// New transaction isn't replacing a pending one, push into queue
replace, err := pool.enqueueTx(hash, tx)
if err != nil {
return false, err
}
// Mark local addresses and journal local transactions
if local {
pool.locals.add(from)
}
pool.journalTx(from, tx)
log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To())
return replace, nil
}
@ -515,6 +683,18 @@ func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction) (bool, er
return old != nil, nil
}
// journalTx adds the specified transaction to the local disk journal if it is
// deemed to have been sent from a local account.
func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) {
// Only journal if it's enabled and the transaction is local
if pool.journal == nil || !pool.locals.contains(from) {
return
}
if err := pool.journal.insert(tx); err != nil {
log.Warn("Failed to journal local transaction", "err", err)
}
}
// promoteTx adds a transaction to the pending (processable) list of transactions.
//
// Note, this method assumes the pool lock is held!
@ -549,7 +729,7 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T
// Set the potentially new pending nonce and notify any subsystems of the new tx
pool.beats[addr] = time.Now()
pool.pendingState.SetNonce(addr, tx.Nonce()+1)
go pool.eventMux.Post(TxPreEvent{tx})
go pool.txFeed.Send(TxPreEvent{tx})
}
// AddLocal enqueues a single transaction into the pool if it is valid, marking
@ -592,12 +772,8 @@ func (pool *TxPool) addTx(tx *types.Transaction, local bool) error {
}
// If we added a new transaction, run promotion checks and return
if !replace {
state, err := pool.currentState()
if err != nil {
return err
}
from, _ := types.Sender(pool.signer, tx) // already validated
pool.promoteExecutables(state, []common.Address{from})
pool.promoteExecutables([]common.Address{from})
}
return nil
}
@ -607,6 +783,12 @@ func (pool *TxPool) addTxs(txs []*types.Transaction, local bool) error {
pool.mu.Lock()
defer pool.mu.Unlock()
return pool.addTxsLocked(txs, local)
}
// addTxsLocked attempts to queue a batch of transactions if they are valid,
// whilst assuming the transaction pool lock is already held.
func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) error {
// Add the batch of transaction, tracking the accepted ones
dirty := make(map[common.Address]struct{})
for _, tx := range txs {
@ -619,15 +801,11 @@ func (pool *TxPool) addTxs(txs []*types.Transaction, local bool) error {
}
// Only reprocess the internal state if something was actually added
if len(dirty) > 0 {
state, err := pool.currentState()
if err != nil {
return err
}
addrs := make([]common.Address, 0, len(dirty))
for addr, _ := range dirty {
addrs = append(addrs, addr)
}
pool.promoteExecutables(state, addrs)
pool.promoteExecutables(addrs)
}
return nil
}
@ -641,24 +819,6 @@ func (pool *TxPool) Get(hash common.Hash) *types.Transaction {
return pool.all[hash]
}
// Remove removes the transaction with the given hash from the pool.
func (pool *TxPool) Remove(hash common.Hash) {
pool.mu.Lock()
defer pool.mu.Unlock()
pool.removeTx(hash)
}
// RemoveBatch removes all given transactions from the pool.
func (pool *TxPool) RemoveBatch(txs types.Transactions) {
pool.mu.Lock()
defer pool.mu.Unlock()
for _, tx := range txs {
pool.removeTx(tx.Hash())
}
}
// removeTx removes a single transaction from the queue, moving all subsequent
// transactions back to the future queue.
func (pool *TxPool) removeTx(hash common.Hash) {
@ -705,9 +865,7 @@ func (pool *TxPool) removeTx(hash common.Hash) {
// promoteExecutables moves transactions that have become processable from the
// future queue to the set of pending transactions. During this process, all
// invalidated transactions (low nonce, low balance) are deleted.
func (pool *TxPool) promoteExecutables(state *state.StateDB, accounts []common.Address) {
gaslimit := pool.gasLimit()
func (pool *TxPool) promoteExecutables(accounts []common.Address) {
// Gather all the accounts potentially needing updates
if accounts == nil {
accounts = make([]common.Address, 0, len(pool.queue))
@ -722,14 +880,14 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB, accounts []common.A
continue // Just in case someone calls with a non existing account
}
// Drop all transactions that are deemed too old (low nonce)
for _, tx := range list.Forward(state.GetNonce(addr)) {
for _, tx := range list.Forward(pool.currentState.GetNonce(addr)) {
hash := tx.Hash()
log.Trace("Removed old queued transaction", "hash", hash)
delete(pool.all, hash)
pool.priced.Removed()
}
// Drop all transactions that are too costly (low balance or out of gas)
drops, _ := list.Filter(state.GetBalance(addr), gaslimit)
drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas)
for _, tx := range drops {
hash := tx.Hash()
log.Trace("Removed unpayable queued transaction", "hash", hash)
@ -874,12 +1032,10 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB, accounts []common.A
// demoteUnexecutables removes invalid and processed transactions from the pools
// executable/pending queue and any subsequent transactions that become unexecutable
// are moved back into the future queue.
func (pool *TxPool) demoteUnexecutables(state *state.StateDB) {
gaslimit := pool.gasLimit()
func (pool *TxPool) demoteUnexecutables() {
// Iterate over all accounts and demote any non-executable transactions
for addr, list := range pool.pending {
nonce := state.GetNonce(addr)
nonce := pool.currentState.GetNonce(addr)
// Drop all transactions that are deemed too old (low nonce)
for _, tx := range list.Forward(nonce) {
@ -889,7 +1045,7 @@ func (pool *TxPool) demoteUnexecutables(state *state.StateDB) {
pool.priced.Removed()
}
// Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later
drops, invalids := list.Filter(state.GetBalance(addr), gaslimit)
drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas)
for _, tx := range drops {
hash := tx.Hash()
log.Trace("Removed unpayable pending transaction", "hash", hash)
@ -902,6 +1058,14 @@ func (pool *TxPool) demoteUnexecutables(state *state.StateDB) {
log.Trace("Demoting pending transaction", "hash", hash)
pool.enqueueTx(hash, tx)
}
// If there's a gap in front, warn (should never happen) and postpone all transactions
if list.Len() > 0 && list.txs.Get(nonce) == nil {
for _, tx := range list.Cap(0) {
hash := tx.Hash()
log.Error("Demoting invalidated transaction", "hash", hash)
pool.enqueueTx(hash, tx)
}
}
// Delete the entire queue entry if it became empty.
if list.Empty() {
delete(pool.pending, addr)
@ -910,39 +1074,6 @@ func (pool *TxPool) demoteUnexecutables(state *state.StateDB) {
}
}
// expirationLoop is a loop that periodically iterates over all accounts with
// queued transactions and drop all that have been inactive for a prolonged amount
// of time.
func (pool *TxPool) expirationLoop() {
defer pool.wg.Done()
evict := time.NewTicker(evictionInterval)
defer evict.Stop()
for {
select {
case <-evict.C:
pool.mu.Lock()
for addr := range pool.queue {
// Skip local transactions from the eviction mechanism
if pool.locals.contains(addr) {
continue
}
// Any non-locals old enough should be removed
if time.Since(pool.beats[addr]) > pool.config.Lifetime {
for _, tx := range pool.queue[addr].Flatten() {
pool.removeTx(tx.Hash())
}
}
}
pool.mu.Unlock()
case <-pool.quit:
return
}
}
}
// addressByHeartbeat is an account address tagged with its last activity timestamp.
type addressByHeartbeat struct {
address common.Address
@ -955,7 +1086,7 @@ func (a addresssByHeartbeat) Len() int { return len(a) }
func (a addresssByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) }
func (a addresssByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// accountSet is simply a set of addresses to check for existance, and a signer
// accountSet is simply a set of addresses to check for existence, and a signer
// capable of deriving addresses from transactions.
type accountSet struct {
accounts map[common.Address]struct{}

View File

@ -28,10 +28,16 @@ type bytesBacked interface {
Bytes() []byte
}
const bloomLength = 256
const (
// BloomByteLength represents the number of bytes used in a header log bloom.
BloomByteLength = 256
// Bloom represents a 256 bit bloom filter.
type Bloom [bloomLength]byte
// BloomBitLength represents the number of bits used in a header log bloom.
BloomBitLength = 8 * BloomByteLength
)
// Bloom represents a 2048 bit bloom filter.
type Bloom [BloomByteLength]byte
// BytesToBloom converts a byte slice to a bloom filter.
// It panics if b is not of suitable size.
@ -47,7 +53,7 @@ func (b *Bloom) SetBytes(d []byte) {
if len(b) < len(d) {
panic(fmt.Sprintf("bloom bytes too big %d %d", len(b), len(d)))
}
copy(b[bloomLength-len(d):], d)
copy(b[BloomByteLength-len(d):], d)
}
// Add adds d to the filter. Future calls of Test(d) will return true.

Some files were not shown because too many files have changed in this diff Show More