Merge pull request #154 from farazdagi/feature/whisper-notifications-rebased
Whisper: Push Notifications (on Geth 1.6.1)
This commit is contained in:
commit
0b72544d8d
|
@ -14,6 +14,7 @@
|
|||
*/**/*tx_database*
|
||||
*/**/*dapps*
|
||||
vendor/github.com/ethereum/go-ethereum/vendor
|
||||
/node_modules/
|
||||
|
||||
#*
|
||||
.#*
|
||||
|
|
2
Makefile
2
Makefile
|
@ -53,7 +53,9 @@ ci:
|
|||
build/env.sh go test -v -cover ./extkeys
|
||||
|
||||
generate:
|
||||
cp ./node_modules/web3/dist/web3.js ./static/scripts/web3.js
|
||||
build/env.sh go generate ./static
|
||||
rm ./static/scripts/web3.js
|
||||
|
||||
test:
|
||||
@build/env.sh echo "mode: set" > coverage-all.out
|
||||
|
|
|
@ -199,7 +199,7 @@ func DiscardTransactions(ids *C.char) *C.char {
|
|||
|
||||
//export GenerateConfig
|
||||
func GenerateConfig(datadir *C.char, networkId C.int) *C.char {
|
||||
config, err := params.NewNodeConfig(C.GoString(datadir), int(networkId))
|
||||
config, err := params.NewNodeConfig(C.GoString(datadir), uint64(networkId))
|
||||
if err != nil {
|
||||
return makeJSONErrorResponse(err)
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ var (
|
|||
}
|
||||
LogLevelFlag = cli.StringFlag{
|
||||
Name: "log",
|
||||
Usage: `Log level, one of: ""ERROR", "WARNING", "INFO", "DEBUG", and "DETAIL"`,
|
||||
Usage: `Log level, one of: ""ERROR", "WARNING", "INFO", "DEBUG", and "TRACE"`,
|
||||
Value: "INFO",
|
||||
}
|
||||
)
|
||||
|
@ -121,7 +121,7 @@ func statusd(ctx *cli.Context) error {
|
|||
|
||||
// makeNodeConfig parses incoming CLI options and returns node configuration object
|
||||
func makeNodeConfig(ctx *cli.Context) (*params.NodeConfig, error) {
|
||||
nodeConfig, err := params.NewNodeConfig(ctx.GlobalString(DataDirFlag.Name), ctx.GlobalInt(NetworkIdFlag.Name))
|
||||
nodeConfig, err := params.NewNodeConfig(ctx.GlobalString(DataDirFlag.Name), ctx.GlobalUint64(NetworkIdFlag.Name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/les/status"
|
||||
gethparams "github.com/ethereum/go-ethereum/params"
|
||||
"github.com/status-im/status-go/geth"
|
||||
|
@ -111,7 +112,12 @@ func testGetDefaultConfig(t *testing.T) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
chainConfig := nodeConfig.ChainConfig
|
||||
genesis := new(core.Genesis)
|
||||
if err := json.Unmarshal([]byte(nodeConfig.LightEthConfig.Genesis), genesis); err != nil {
|
||||
t.Error(err)
|
||||
return false
|
||||
}
|
||||
chainConfig := genesis.Config
|
||||
if chainConfig.HomesteadBlock.Cmp(gethparams.MainNetHomesteadBlock) != 0 {
|
||||
t.Error("invalid chainConfig.HomesteadBlock")
|
||||
return false
|
||||
|
@ -153,7 +159,12 @@ func testGetDefaultConfig(t *testing.T) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
chainConfig = nodeConfig.ChainConfig
|
||||
genesis = new(core.Genesis)
|
||||
if err := json.Unmarshal([]byte(nodeConfig.LightEthConfig.Genesis), genesis); err != nil {
|
||||
t.Error(err)
|
||||
return false
|
||||
}
|
||||
chainConfig = genesis.Config
|
||||
refChainConfig := gethparams.TestnetChainConfig
|
||||
|
||||
if chainConfig.HomesteadBlock.Cmp(refChainConfig.HomesteadBlock) != 0 {
|
||||
|
@ -229,7 +240,7 @@ func testStopResumeNode(t *testing.T) bool {
|
|||
t.Logf("account created: {address: %s, key: %s}", address1, pubKey1)
|
||||
|
||||
// make sure that identity is not (yet injected)
|
||||
if whisperService.HasIdentity(pubKey1) {
|
||||
if whisperService.HasKeyPair(pubKey1) {
|
||||
t.Error("identity already present in whisper")
|
||||
}
|
||||
|
||||
|
@ -246,7 +257,7 @@ func testStopResumeNode(t *testing.T) bool {
|
|||
t.Errorf("could not select account: %v", err)
|
||||
return false
|
||||
}
|
||||
if !whisperService.HasIdentity(pubKey1) {
|
||||
if !whisperService.HasKeyPair(pubKey1) {
|
||||
t.Errorf("identity not injected into whisper: %v", err)
|
||||
}
|
||||
|
||||
|
@ -295,7 +306,7 @@ func testStopResumeNode(t *testing.T) bool {
|
|||
if err != nil {
|
||||
t.Errorf("whisper service not running: %v", err)
|
||||
}
|
||||
if !whisperService.HasIdentity(pubKey1) {
|
||||
if !whisperService.HasKeyPair(pubKey1) {
|
||||
t.Errorf("identity evicted from whisper on node restart: %v", err)
|
||||
}
|
||||
|
||||
|
@ -580,7 +591,7 @@ func testRecoverAccount(t *testing.T) bool {
|
|||
}
|
||||
|
||||
// make sure that identity is not (yet injected)
|
||||
if whisperService.HasIdentity(pubKeyCheck) {
|
||||
if whisperService.HasKeyPair(pubKeyCheck) {
|
||||
t.Error("identity already present in whisper")
|
||||
}
|
||||
err = geth.SelectAccount(addressCheck, testConfig.Account1.Password)
|
||||
|
@ -588,7 +599,7 @@ func testRecoverAccount(t *testing.T) bool {
|
|||
t.Errorf("Test failed: could not select account: %v", err)
|
||||
return false
|
||||
}
|
||||
if !whisperService.HasIdentity(pubKeyCheck) {
|
||||
if !whisperService.HasKeyPair(pubKeyCheck) {
|
||||
t.Errorf("identity not injected into whisper: %v", err)
|
||||
}
|
||||
|
||||
|
@ -618,7 +629,7 @@ func testAccountSelect(t *testing.T) bool {
|
|||
t.Logf("Account created: {address: %s, key: %s}", address2, pubKey2)
|
||||
|
||||
// make sure that identity is not (yet injected)
|
||||
if whisperService.HasIdentity(pubKey1) {
|
||||
if whisperService.HasKeyPair(pubKey1) {
|
||||
t.Error("identity already present in whisper")
|
||||
}
|
||||
|
||||
|
@ -648,12 +659,12 @@ func testAccountSelect(t *testing.T) bool {
|
|||
t.Errorf("Test failed: could not select account: %v", err)
|
||||
return false
|
||||
}
|
||||
if !whisperService.HasIdentity(pubKey1) {
|
||||
if !whisperService.HasKeyPair(pubKey1) {
|
||||
t.Errorf("identity not injected into whisper: %v", err)
|
||||
}
|
||||
|
||||
// select another account, make sure that previous account is wiped out from Whisper cache
|
||||
if whisperService.HasIdentity(pubKey2) {
|
||||
if whisperService.HasKeyPair(pubKey2) {
|
||||
t.Error("identity already present in whisper")
|
||||
}
|
||||
|
||||
|
@ -669,10 +680,10 @@ func testAccountSelect(t *testing.T) bool {
|
|||
t.Errorf("Test failed: could not select account: %v", loginResponse.Error)
|
||||
return false
|
||||
}
|
||||
if !whisperService.HasIdentity(pubKey2) {
|
||||
if !whisperService.HasKeyPair(pubKey2) {
|
||||
t.Errorf("identity not injected into whisper: %v", err)
|
||||
}
|
||||
if whisperService.HasIdentity(pubKey1) {
|
||||
if whisperService.HasKeyPair(pubKey1) {
|
||||
t.Error("identity should be removed, but it is still present in whisper")
|
||||
}
|
||||
|
||||
|
@ -694,7 +705,7 @@ func testAccountLogout(t *testing.T) bool {
|
|||
}
|
||||
|
||||
// make sure that identity doesn't exist (yet) in Whisper
|
||||
if whisperService.HasIdentity(pubKey) {
|
||||
if whisperService.HasKeyPair(pubKey) {
|
||||
t.Error("identity already present in whisper")
|
||||
return false
|
||||
}
|
||||
|
@ -705,7 +716,7 @@ func testAccountLogout(t *testing.T) bool {
|
|||
t.Errorf("Test failed: could not select account: %v", err)
|
||||
return false
|
||||
}
|
||||
if !whisperService.HasIdentity(pubKey) {
|
||||
if !whisperService.HasKeyPair(pubKey) {
|
||||
t.Error("identity not injected into whisper")
|
||||
return false
|
||||
}
|
||||
|
@ -724,7 +735,7 @@ func testAccountLogout(t *testing.T) bool {
|
|||
}
|
||||
|
||||
// now, logout and check if identity is removed indeed
|
||||
if whisperService.HasIdentity(pubKey) {
|
||||
if whisperService.HasKeyPair(pubKey) {
|
||||
t.Error("identity not cleared from whisper")
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -31,9 +31,13 @@ var (
|
|||
Name: "mailserver",
|
||||
Usage: "Delivers expired messages on demand",
|
||||
}
|
||||
WhisperPassword = cli.StringFlag{
|
||||
WhisperIdentityFile = cli.StringFlag{
|
||||
Name: "identity",
|
||||
Usage: "Protocol identity file (private key used for asymetric encryption)",
|
||||
}
|
||||
WhisperPasswordFile = cli.StringFlag{
|
||||
Name: "password",
|
||||
Usage: "Password, will be used for topic keys, as Mail & Notification Server password",
|
||||
Usage: "Password file (password is used for symmetric encryption)",
|
||||
}
|
||||
WhisperPortFlag = cli.IntFlag{
|
||||
Name: "port",
|
||||
|
@ -50,6 +54,14 @@ var (
|
|||
Usage: "Time to live for messages, in seconds",
|
||||
Value: params.WhisperTTL,
|
||||
}
|
||||
WhisperInjectTestAccounts = cli.BoolTFlag{
|
||||
Name: "injectaccounts",
|
||||
Usage: "Whether test account should be injected or not (default: true)",
|
||||
}
|
||||
FirebaseAuthorizationKey = cli.StringFlag{
|
||||
Name: "firebaseauth",
|
||||
Usage: "FCM Authorization Key used for sending Push Notifications",
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -63,10 +75,13 @@ var (
|
|||
WhisperNotificationServerNodeFlag,
|
||||
WhisperForwarderNodeFlag,
|
||||
WhisperMailserverNodeFlag,
|
||||
WhisperPassword,
|
||||
WhisperIdentityFile,
|
||||
WhisperPasswordFile,
|
||||
WhisperPoWFlag,
|
||||
WhisperPortFlag,
|
||||
WhisperTTLFlag,
|
||||
WhisperInjectTestAccounts,
|
||||
FirebaseAuthorizationKey,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
@ -80,20 +95,30 @@ func wnode(ctx *cli.Context) error {
|
|||
|
||||
wnodePrintHeader(config)
|
||||
|
||||
// inject test accounts
|
||||
geth.ImportTestAccount(filepath.Join(config.DataDir, "keystore"), "test-account1.pk")
|
||||
geth.ImportTestAccount(filepath.Join(config.DataDir, "keystore"), "test-account2.pk")
|
||||
// import test accounts
|
||||
if ctx.BoolT(WhisperInjectTestAccounts.Name) {
|
||||
geth.ImportTestAccount(filepath.Join(config.DataDir, "keystore"), "test-account1.pk")
|
||||
geth.ImportTestAccount(filepath.Join(config.DataDir, "keystore"), "test-account2.pk")
|
||||
}
|
||||
|
||||
if err := geth.CreateAndRunNode(config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// inject test accounts into Whisper
|
||||
if ctx.BoolT(WhisperInjectTestAccounts.Name) {
|
||||
testConfig, _ := geth.LoadTestConfig()
|
||||
injectAccountIntoWhisper(testConfig.Account1.Address, testConfig.Account1.Password)
|
||||
injectAccountIntoWhisper(testConfig.Account2.Address, testConfig.Account2.Password)
|
||||
}
|
||||
|
||||
// wait till node has been stopped
|
||||
geth.NodeManagerInstance().Node().GethStack().Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// wnodePrintHeader prints command header
|
||||
func wnodePrintHeader(nodeConfig *params.NodeConfig) {
|
||||
fmt.Println("Starting Whisper/5 node..")
|
||||
|
||||
|
@ -114,25 +139,81 @@ func makeWhisperNodeConfig(ctx *cli.Context) (*params.NodeConfig, error) {
|
|||
whisperConfig := nodeConfig.WhisperConfig
|
||||
|
||||
whisperConfig.Enabled = true
|
||||
whisperConfig.IdentityFile = ctx.String(WhisperIdentityFile.Name)
|
||||
whisperConfig.PasswordFile = ctx.String(WhisperPasswordFile.Name)
|
||||
whisperConfig.EchoMode = ctx.BoolT(WhisperEchoModeFlag.Name)
|
||||
whisperConfig.BootstrapNode = ctx.BoolT(WhisperBootstrapNodeFlag.Name)
|
||||
whisperConfig.ForwarderNode = ctx.Bool(WhisperForwarderNodeFlag.Name)
|
||||
whisperConfig.NotificationServerNode = ctx.Bool(WhisperNotificationServerNodeFlag.Name)
|
||||
whisperConfig.MailServerNode = ctx.Bool(WhisperMailserverNodeFlag.Name)
|
||||
whisperConfig.MailServerPassword = ctx.String(WhisperPassword.Name)
|
||||
whisperConfig.NotificationServerPassword = ctx.String(WhisperPassword.Name) // the same for both mail and notification servers
|
||||
|
||||
whisperConfig.Port = ctx.Int(WhisperPortFlag.Name)
|
||||
whisperConfig.TTL = ctx.Int(WhisperTTLFlag.Name)
|
||||
whisperConfig.MinimumPoW = ctx.Float64(WhisperPoWFlag.Name)
|
||||
|
||||
if whisperConfig.MailServerNode && len(whisperConfig.MailServerPassword) == 0 {
|
||||
if whisperConfig.MailServerNode && len(whisperConfig.PasswordFile) == 0 {
|
||||
return nil, errors.New("mail server requires --password to be specified")
|
||||
}
|
||||
|
||||
if whisperConfig.NotificationServerNode && len(whisperConfig.NotificationServerPassword) == 0 {
|
||||
return nil, errors.New("notification server requires --password to be specified")
|
||||
if whisperConfig.NotificationServerNode && len(whisperConfig.IdentityFile) == 0 {
|
||||
return nil, errors.New("notification server requires either --identity file to be specified")
|
||||
}
|
||||
|
||||
if len(whisperConfig.PasswordFile) > 0 { // make sure that we can load password file
|
||||
if whisperConfig.PasswordFile, err = filepath.Abs(whisperConfig.PasswordFile); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := whisperConfig.ReadPasswordFile(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if len(whisperConfig.IdentityFile) > 0 { // make sure that we can load identity file
|
||||
if whisperConfig.IdentityFile, err = filepath.Abs(whisperConfig.IdentityFile); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := whisperConfig.ReadIdentityFile(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
firebaseConfig := whisperConfig.FirebaseConfig
|
||||
firebaseConfig.AuthorizationKeyFile = ctx.String(FirebaseAuthorizationKey.Name)
|
||||
if len(firebaseConfig.AuthorizationKeyFile) > 0 { // make sure authorization key can be loaded
|
||||
if firebaseConfig.AuthorizationKeyFile, err = filepath.Abs(firebaseConfig.AuthorizationKeyFile); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := firebaseConfig.ReadAuthorizationKeyFile(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return nodeConfig, nil
|
||||
}
|
||||
|
||||
// injectAccountIntoWhisper adds key pair into Whisper. Similar to Select/Login,
|
||||
// but allows multiple accounts to be injected.
|
||||
func injectAccountIntoWhisper(address, password string) error {
|
||||
nodeManager := geth.NodeManagerInstance()
|
||||
keyStore, err := nodeManager.AccountKeyStore()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
account, err := geth.ParseAccountString(address)
|
||||
if err != nil {
|
||||
return geth.ErrAddressToAccountMappingFailure
|
||||
}
|
||||
|
||||
account, accountKey, err := keyStore.AccountDecryptedKey(account, password)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %v", geth.ErrAccountToKeyMappingFailure.Error(), err)
|
||||
}
|
||||
|
||||
whisperService, err := nodeManager.WhisperService()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
whisperService.AddKeyPair(accountKey.PrivateKey)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -149,7 +149,7 @@ func SelectAccount(address, password string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if err := whisperService.InjectIdentity(accountKey.PrivateKey); err != nil {
|
||||
if err := whisperService.SelectKeyPair(accountKey.PrivateKey); err != nil {
|
||||
return ErrWhisperIdentityInjectionFailure
|
||||
}
|
||||
|
||||
|
@ -181,7 +181,7 @@ func ReSelectAccount() error {
|
|||
return err
|
||||
}
|
||||
|
||||
if err := whisperService.InjectIdentity(selectedAccount.AccountKey.PrivateKey); err != nil {
|
||||
if err := whisperService.SelectKeyPair(selectedAccount.AccountKey.PrivateKey); err != nil {
|
||||
return ErrWhisperIdentityInjectionFailure
|
||||
}
|
||||
|
||||
|
@ -196,7 +196,7 @@ func Logout() error {
|
|||
return err
|
||||
}
|
||||
|
||||
err = whisperService.ClearIdentities()
|
||||
err = whisperService.DeleteKeyPairs()
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %v", ErrWhisperClearIdentitiesFailure, err)
|
||||
}
|
||||
|
|
|
@ -287,7 +287,7 @@ func TestAccountsRecoverAccount(t *testing.T) {
|
|||
}
|
||||
|
||||
// make sure that identity is not (yet injected)
|
||||
if whisperService.HasIdentity(pubKeyCheck) {
|
||||
if whisperService.HasKeyPair(pubKeyCheck) {
|
||||
t.Error("identity already present in whisper")
|
||||
}
|
||||
err = geth.SelectAccount(addressCheck, testConfig.Account1.Password)
|
||||
|
@ -295,7 +295,7 @@ func TestAccountsRecoverAccount(t *testing.T) {
|
|||
t.Errorf("Test failed: could not select account: %v", err)
|
||||
return
|
||||
}
|
||||
if !whisperService.HasIdentity(pubKeyCheck) {
|
||||
if !whisperService.HasKeyPair(pubKeyCheck) {
|
||||
t.Errorf("identity not injected into whisper: %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -330,7 +330,7 @@ func TestAccountSelect(t *testing.T) {
|
|||
t.Logf("Account created: {address: %s, key: %s}", address2, pubKey2)
|
||||
|
||||
// make sure that identity is not (yet injected)
|
||||
if whisperService.HasIdentity(pubKey1) {
|
||||
if whisperService.HasKeyPair(pubKey1) {
|
||||
t.Error("identity already present in whisper")
|
||||
}
|
||||
|
||||
|
@ -345,12 +345,12 @@ func TestAccountSelect(t *testing.T) {
|
|||
t.Errorf("Test failed: could not select account: %v", err)
|
||||
return
|
||||
}
|
||||
if !whisperService.HasIdentity(pubKey1) {
|
||||
if !whisperService.HasKeyPair(pubKey1) {
|
||||
t.Errorf("identity not injected into whisper: %v", err)
|
||||
}
|
||||
|
||||
// select another account, make sure that previous account is wiped out from Whisper cache
|
||||
if whisperService.HasIdentity(pubKey2) {
|
||||
if whisperService.HasKeyPair(pubKey2) {
|
||||
t.Error("identity already present in whisper")
|
||||
}
|
||||
err = geth.SelectAccount(address2, testConfig.Account1.Password)
|
||||
|
@ -358,10 +358,10 @@ func TestAccountSelect(t *testing.T) {
|
|||
t.Errorf("Test failed: could not select account: %v", err)
|
||||
return
|
||||
}
|
||||
if !whisperService.HasIdentity(pubKey2) {
|
||||
if !whisperService.HasKeyPair(pubKey2) {
|
||||
t.Errorf("identity not injected into whisper: %v", err)
|
||||
}
|
||||
if whisperService.HasIdentity(pubKey1) {
|
||||
if whisperService.HasKeyPair(pubKey1) {
|
||||
t.Error("identity should be removed, but it is still present in whisper")
|
||||
}
|
||||
}
|
||||
|
@ -387,7 +387,7 @@ func TestAccountsLogout(t *testing.T) {
|
|||
}
|
||||
|
||||
// make sure that identity doesn't exist (yet) in Whisper
|
||||
if whisperService.HasIdentity(pubKey) {
|
||||
if whisperService.HasKeyPair(pubKey) {
|
||||
t.Error("identity already present in whisper")
|
||||
}
|
||||
|
||||
|
@ -397,7 +397,7 @@ func TestAccountsLogout(t *testing.T) {
|
|||
t.Errorf("Test failed: could not select account: %v", err)
|
||||
return
|
||||
}
|
||||
if !whisperService.HasIdentity(pubKey) {
|
||||
if !whisperService.HasKeyPair(pubKey) {
|
||||
t.Error("identity not injected into whisper")
|
||||
}
|
||||
|
||||
|
@ -407,7 +407,7 @@ func TestAccountsLogout(t *testing.T) {
|
|||
}
|
||||
|
||||
// now, logout and check if identity is removed indeed
|
||||
if whisperService.HasIdentity(pubKey) {
|
||||
if whisperService.HasKeyPair(pubKey) {
|
||||
t.Error("identity not cleared from whisper")
|
||||
}
|
||||
}
|
||||
|
@ -440,7 +440,7 @@ func TestAccountsSelectedAccountOnNodeRestart(t *testing.T) {
|
|||
t.Logf("account2 created: {address: %s, key: %s}", address2, pubKey2)
|
||||
|
||||
// make sure that identity is not (yet injected)
|
||||
if whisperService.HasIdentity(pubKey1) {
|
||||
if whisperService.HasKeyPair(pubKey1) {
|
||||
t.Error("identity already present in whisper")
|
||||
}
|
||||
|
||||
|
@ -461,12 +461,12 @@ func TestAccountsSelectedAccountOnNodeRestart(t *testing.T) {
|
|||
t.Errorf("could not select account: %v", err)
|
||||
return
|
||||
}
|
||||
if !whisperService.HasIdentity(pubKey1) {
|
||||
if !whisperService.HasKeyPair(pubKey1) {
|
||||
t.Errorf("identity not injected into whisper: %v", err)
|
||||
}
|
||||
|
||||
// select another account, make sure that previous account is wiped out from Whisper cache
|
||||
if whisperService.HasIdentity(pubKey2) {
|
||||
if whisperService.HasKeyPair(pubKey2) {
|
||||
t.Error("identity already present in whisper")
|
||||
}
|
||||
err = geth.SelectAccount(address2, testConfig.Account1.Password)
|
||||
|
@ -474,10 +474,10 @@ func TestAccountsSelectedAccountOnNodeRestart(t *testing.T) {
|
|||
t.Errorf("Test failed: could not select account: %v", err)
|
||||
return
|
||||
}
|
||||
if !whisperService.HasIdentity(pubKey2) {
|
||||
if !whisperService.HasKeyPair(pubKey2) {
|
||||
t.Errorf("identity not injected into whisper: %v", err)
|
||||
}
|
||||
if whisperService.HasIdentity(pubKey1) {
|
||||
if whisperService.HasKeyPair(pubKey1) {
|
||||
t.Error("identity should be removed, but it is still present in whisper")
|
||||
}
|
||||
|
||||
|
@ -518,10 +518,10 @@ func TestAccountsSelectedAccountOnNodeRestart(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Errorf("whisper service not running: %v", err)
|
||||
}
|
||||
if !whisperService.HasIdentity(pubKey2) {
|
||||
if !whisperService.HasKeyPair(pubKey2) {
|
||||
t.Errorf("identity not injected into whisper: %v", err)
|
||||
}
|
||||
if whisperService.HasIdentity(pubKey1) {
|
||||
if whisperService.HasKeyPair(pubKey1) {
|
||||
t.Error("identity should not be present, but it is still present in whisper")
|
||||
}
|
||||
}
|
||||
|
@ -550,7 +550,7 @@ func TestAccountsNodeRestartWithNoSelectedAccount(t *testing.T) {
|
|||
t.Logf("account1 created: {address: %s, key: %s}", address1, pubKey1)
|
||||
|
||||
// make sure that identity is not present
|
||||
if whisperService.HasIdentity(pubKey1) {
|
||||
if whisperService.HasKeyPair(pubKey1) {
|
||||
t.Error("identity already present in whisper")
|
||||
}
|
||||
|
||||
|
@ -589,7 +589,7 @@ func TestAccountsNodeRestartWithNoSelectedAccount(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Errorf("whisper service not running: %v", err)
|
||||
}
|
||||
if whisperService.HasIdentity(pubKey1) {
|
||||
if whisperService.HasKeyPair(pubKey1) {
|
||||
t.Error("identity should not be present, but it is present in whisper")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,8 +8,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/eapache/go-resiliency/semaphore"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/robertkrimen/otto"
|
||||
"github.com/status-im/status-go/geth"
|
||||
|
@ -323,7 +322,7 @@ func newResultResponse(call otto.FunctionCall, result interface{}) otto.Value {
|
|||
func throwJSException(msg interface{}) otto.Value {
|
||||
val, err := otto.ToValue(msg)
|
||||
if err != nil {
|
||||
glog.V(logger.Error).Infof("Failed to serialize JavaScript exception %v: %v", msg, err)
|
||||
log.Error(fmt.Sprintf("Failed to serialize JavaScript exception %v: %v", msg, err))
|
||||
}
|
||||
panic(val)
|
||||
}
|
||||
|
|
|
@ -783,8 +783,8 @@ func TestJailWhisper(t *testing.T) {
|
|||
}
|
||||
accountKey1Hex := common.ToHex(crypto.FromECDSAPub(&accountKey1.PrivateKey.PublicKey))
|
||||
|
||||
whisperService.AddIdentity(accountKey1.PrivateKey)
|
||||
if ok, err := whisperAPI.HasIdentity(accountKey1Hex); err != nil || !ok {
|
||||
whisperService.AddKeyPair(accountKey1.PrivateKey)
|
||||
if ok, err := whisperAPI.HasKeyPair(accountKey1Hex); err != nil || !ok {
|
||||
t.Fatalf("identity not injected: %v", accountKey1Hex)
|
||||
}
|
||||
|
||||
|
@ -795,8 +795,8 @@ func TestJailWhisper(t *testing.T) {
|
|||
}
|
||||
accountKey2Hex := common.ToHex(crypto.FromECDSAPub(&accountKey2.PrivateKey.PublicKey))
|
||||
|
||||
whisperService.AddIdentity(accountKey2.PrivateKey)
|
||||
if ok, err := whisperAPI.HasIdentity(accountKey2Hex); err != nil || !ok {
|
||||
whisperService.AddKeyPair(accountKey2.PrivateKey)
|
||||
if ok, err := whisperAPI.HasKeyPair(accountKey2Hex); err != nil || !ok {
|
||||
t.Fatalf("identity not injected: %v", accountKey2Hex)
|
||||
}
|
||||
|
||||
|
@ -838,32 +838,35 @@ func TestJailWhisper(t *testing.T) {
|
|||
"test 1: encrypted signed message from us (From != nil && To != nil)",
|
||||
`
|
||||
var identity1 = '` + accountKey1Hex + `';
|
||||
if (!web3.shh.hasIdentity(identity1)) {
|
||||
if (!web3.shh.hasKeyPair(identity1)) {
|
||||
throw 'idenitity "` + accountKey1Hex + `" not found in whisper';
|
||||
}
|
||||
|
||||
var identity2 = '` + accountKey2Hex + `';
|
||||
if (!web3.shh.hasIdentity(identity2)) {
|
||||
if (!web3.shh.hasKeyPair(identity2)) {
|
||||
throw 'idenitity "` + accountKey2Hex + `" not found in whisper';
|
||||
}
|
||||
|
||||
var topic = 'example1';
|
||||
var topic = makeTopic();
|
||||
var payload = '` + whisperMessage1 + `';
|
||||
|
||||
// start watching for messages
|
||||
var filter = shh.filter({
|
||||
from: identity1,
|
||||
to: identity2,
|
||||
topics: [web3.fromAscii(topic)]
|
||||
type: "asym",
|
||||
sig: identity1,
|
||||
key: identity2,
|
||||
topics: [topic]
|
||||
});
|
||||
console.log(JSON.stringify(filter));
|
||||
|
||||
// post message
|
||||
var message = {
|
||||
from: identity1,
|
||||
to: identity2,
|
||||
topics: [web3.fromAscii(topic)],
|
||||
payload: payload,
|
||||
ttl: 20,
|
||||
type: "asym",
|
||||
sig: identity1,
|
||||
key: identity2,
|
||||
topic: topic,
|
||||
payload: payload,
|
||||
ttl: 20,
|
||||
};
|
||||
var err = shh.post(message)
|
||||
if (err !== null) {
|
||||
|
@ -882,27 +885,29 @@ func TestJailWhisper(t *testing.T) {
|
|||
"test 2: encrypted signed message to yourself (From != nil && To != nil)",
|
||||
`
|
||||
var identity = '` + accountKey1Hex + `';
|
||||
if (!web3.shh.hasIdentity(identity)) {
|
||||
if (!web3.shh.hasKeyPair(identity)) {
|
||||
throw 'idenitity "` + accountKey1Hex + `" not found in whisper';
|
||||
}
|
||||
|
||||
var topic = 'example2';
|
||||
var topic = makeTopic();
|
||||
var payload = '` + whisperMessage2 + `';
|
||||
|
||||
// start watching for messages
|
||||
var filter = shh.filter({
|
||||
from: identity,
|
||||
to: identity,
|
||||
topics: [web3.fromAscii(topic)],
|
||||
type: "asym",
|
||||
sig: identity,
|
||||
key: identity,
|
||||
topics: [topic],
|
||||
});
|
||||
|
||||
// post message
|
||||
var message = {
|
||||
from: identity,
|
||||
to: identity,
|
||||
topics: [web3.fromAscii(topic)],
|
||||
payload: payload,
|
||||
ttl: 20,
|
||||
type: "asym",
|
||||
sig: identity,
|
||||
key: identity,
|
||||
topic: topic,
|
||||
payload: payload,
|
||||
ttl: 20,
|
||||
};
|
||||
var err = shh.post(message)
|
||||
if (err !== null) {
|
||||
|
@ -921,33 +926,35 @@ func TestJailWhisper(t *testing.T) {
|
|||
"test 3: signed (known sender) broadcast (From != nil && To == nil)",
|
||||
`
|
||||
var identity = '` + accountKey1Hex + `';
|
||||
if (!web3.shh.hasIdentity(identity)) {
|
||||
if (!web3.shh.hasKeyPair(identity)) {
|
||||
throw 'idenitity "` + accountKey1Hex + `" not found in whisper';
|
||||
}
|
||||
|
||||
var topic = 'example3';
|
||||
var topic = makeTopic();
|
||||
var payload = '` + whisperMessage3 + `';
|
||||
|
||||
// generate symmetric key (if doesn't already exist)
|
||||
if (!shh.hasSymKey(topic)) {
|
||||
shh.addSymKey(topic, "0xdeadbeef"); // alternatively: shh.generateSymKey("example3");
|
||||
// to delete key, rely on: shh.deleteSymKey(topic);
|
||||
// generate symmetric key
|
||||
var keyid = shh.generateSymmetricKey();
|
||||
if (!shh.hasSymmetricKey(keyid)) {
|
||||
throw new Error('key not found');
|
||||
}
|
||||
|
||||
// start watching for messages
|
||||
var filter = shh.filter({
|
||||
from: identity,
|
||||
topics: [web3.fromAscii(topic)],
|
||||
keyname: topic // you can use some other name for key too
|
||||
type: "sym",
|
||||
sig: identity,
|
||||
topics: [topic],
|
||||
key: keyid
|
||||
});
|
||||
|
||||
// post message
|
||||
var message = {
|
||||
from: identity,
|
||||
topics: [web3.fromAscii(topic)],
|
||||
type: "sym",
|
||||
sig: identity,
|
||||
topic: topic,
|
||||
payload: payload,
|
||||
ttl: 20,
|
||||
keyname: topic
|
||||
key: keyid
|
||||
};
|
||||
var err = shh.post(message)
|
||||
if (err !== null) {
|
||||
|
@ -965,27 +972,29 @@ func TestJailWhisper(t *testing.T) {
|
|||
{
|
||||
"test 4: anonymous broadcast (From == nil && To == nil)",
|
||||
`
|
||||
var topic = 'example4';
|
||||
var topic = makeTopic();
|
||||
var payload = '` + whisperMessage4 + `';
|
||||
|
||||
// generate symmetric key (if doesn't already exist)
|
||||
if (!shh.hasSymKey(topic)) {
|
||||
shh.addSymKey(topic, "0xdeadbeef"); // alternatively: shh.generateSymKey("example3");
|
||||
// to delete key, rely on: shh.deleteSymKey(topic);
|
||||
// generate symmetric key
|
||||
var keyid = shh.generateSymmetricKey();
|
||||
if (!shh.hasSymmetricKey(keyid)) {
|
||||
throw new Error('key not found');
|
||||
}
|
||||
|
||||
// start watching for messages
|
||||
var filter = shh.filter({
|
||||
topics: [web3.fromAscii(topic)],
|
||||
keyname: topic // you can use some other name for key too
|
||||
type: "sym",
|
||||
topics: [topic],
|
||||
key: keyid
|
||||
});
|
||||
|
||||
// post message
|
||||
var message = {
|
||||
topics: [web3.fromAscii(topic)],
|
||||
type: "sym",
|
||||
topic: topic,
|
||||
payload: payload,
|
||||
ttl: 20,
|
||||
keyname: topic
|
||||
key: keyid
|
||||
};
|
||||
var err = shh.post(message)
|
||||
if (err !== null) {
|
||||
|
@ -1004,23 +1013,25 @@ func TestJailWhisper(t *testing.T) {
|
|||
"test 5: encrypted anonymous message (From == nil && To != nil)",
|
||||
`
|
||||
var identity = '` + accountKey2Hex + `';
|
||||
if (!web3.shh.hasIdentity(identity)) {
|
||||
if (!web3.shh.hasKeyPair(identity)) {
|
||||
throw 'idenitity "` + accountKey2Hex + `" not found in whisper';
|
||||
}
|
||||
|
||||
var topic = 'example5';
|
||||
var topic = makeTopic();
|
||||
var payload = '` + whisperMessage5 + `';
|
||||
|
||||
// start watching for messages
|
||||
var filter = shh.filter({
|
||||
to: identity,
|
||||
topics: [web3.fromAscii(topic)],
|
||||
type: "asym",
|
||||
key: identity,
|
||||
topics: [topic],
|
||||
});
|
||||
|
||||
// post message
|
||||
var message = {
|
||||
to: identity,
|
||||
topics: [web3.fromAscii(topic)],
|
||||
type: "asym",
|
||||
key: identity,
|
||||
topic: topic,
|
||||
payload: payload,
|
||||
ttl: 20
|
||||
};
|
||||
|
@ -1041,32 +1052,34 @@ func TestJailWhisper(t *testing.T) {
|
|||
"test 6: encrypted signed response to us (From != nil && To != nil)",
|
||||
`
|
||||
var identity1 = '` + accountKey1Hex + `';
|
||||
if (!web3.shh.hasIdentity(identity1)) {
|
||||
if (!web3.shh.hasKeyPair(identity1)) {
|
||||
throw 'idenitity "` + accountKey1Hex + `" not found in whisper';
|
||||
}
|
||||
|
||||
var identity2 = '` + accountKey2Hex + `';
|
||||
if (!web3.shh.hasIdentity(identity2)) {
|
||||
if (!web3.shh.hasKeyPair(identity2)) {
|
||||
throw 'idenitity "` + accountKey2Hex + `" not found in whisper';
|
||||
}
|
||||
|
||||
var topic = 'example6';
|
||||
var topic = makeTopic();
|
||||
var payload = '` + whisperMessage6 + `';
|
||||
|
||||
// start watching for messages
|
||||
var filter = shh.filter({
|
||||
from: identity2,
|
||||
to: identity1,
|
||||
topics: [web3.fromAscii(topic)]
|
||||
type: "asym",
|
||||
sig: identity2,
|
||||
key: identity1,
|
||||
topics: [topic]
|
||||
});
|
||||
|
||||
// post message
|
||||
var message = {
|
||||
from: identity2,
|
||||
to: identity1,
|
||||
topics: [web3.fromAscii(topic)],
|
||||
payload: payload,
|
||||
ttl: 20
|
||||
type: "asym",
|
||||
sig: identity2,
|
||||
key: identity1,
|
||||
topic: topic,
|
||||
payload: payload,
|
||||
ttl: 20
|
||||
};
|
||||
var err = shh.post(message)
|
||||
if (err !== null) {
|
||||
|
@ -1086,7 +1099,15 @@ func TestJailWhisper(t *testing.T) {
|
|||
for _, testCase := range testCases {
|
||||
t.Log(testCase.name)
|
||||
testCaseKey := crypto.Keccak256Hash([]byte(testCase.name)).Hex()
|
||||
jailInstance.Parse(testCaseKey, `var shh = web3.shh;`)
|
||||
jailInstance.Parse(testCaseKey, `
|
||||
var shh = web3.shh;
|
||||
var makeTopic = function () {
|
||||
var min = 1;
|
||||
var max = Math.pow(16, 8);
|
||||
var randInt = Math.floor(Math.random() * (max - min + 1)) + min;
|
||||
return web3.toHex(randInt);
|
||||
};
|
||||
`)
|
||||
vm, err := jailInstance.GetVM(testCaseKey)
|
||||
if err != nil {
|
||||
t.Errorf("cannot get VM: %v", err)
|
||||
|
@ -1127,7 +1148,7 @@ func TestJailWhisper(t *testing.T) {
|
|||
for testKey, filter := range installedFilters {
|
||||
if filter != "" {
|
||||
t.Logf("filter found: %v", filter)
|
||||
for _, message := range whisperAPI.GetFilterChanges(filter) {
|
||||
for _, message := range whisperAPI.GetSubscriptionMessages(filter) {
|
||||
t.Logf("message found: %s", common.FromHex(message.Payload))
|
||||
passedTests[testKey] = true
|
||||
}
|
||||
|
|
164
geth/node.go
164
geth/node.go
|
@ -1,6 +1,7 @@
|
|||
package geth
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -8,21 +9,23 @@ import (
|
|||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/les"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/discv5"
|
||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||
gethparams "github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/whisper/mailserver"
|
||||
"github.com/ethereum/go-ethereum/whisper/notifications"
|
||||
whisper "github.com/ethereum/go-ethereum/whisper/whisperv5"
|
||||
"github.com/status-im/status-go/geth/params"
|
||||
)
|
||||
|
@ -69,8 +72,6 @@ func MakeNode(config *params.NodeConfig) *Node {
|
|||
}
|
||||
|
||||
// setup logging
|
||||
glog.CopyStandardLogTo("INFO")
|
||||
glog.SetToStderr(true)
|
||||
if _, err := params.SetupLogger(config); err != nil {
|
||||
Fatalf(err)
|
||||
}
|
||||
|
@ -82,24 +83,48 @@ func MakeNode(config *params.NodeConfig) *Node {
|
|||
UseLightweightKDF: true,
|
||||
Name: config.Name,
|
||||
Version: config.Version,
|
||||
NoDiscovery: true,
|
||||
DiscoveryV5: false,
|
||||
DiscoveryV5Addr: ":0",
|
||||
BootstrapNodes: makeBootstrapNodes(),
|
||||
BootstrapNodesV5: makeBootstrapNodesV5(),
|
||||
ListenAddr: ":0",
|
||||
NAT: nat.Any(),
|
||||
MaxPeers: config.MaxPeers,
|
||||
MaxPendingPeers: config.MaxPendingPeers,
|
||||
IPCPath: makeIPCPath(config),
|
||||
HTTPHost: config.HTTPHost,
|
||||
HTTPPort: config.HTTPPort,
|
||||
HTTPCors: "*",
|
||||
HTTPModules: strings.Split(config.APIModules, ","),
|
||||
WSHost: makeWSHost(config),
|
||||
WSPort: config.WSPort,
|
||||
WSOrigins: "*",
|
||||
WSModules: strings.Split(config.APIModules, ","),
|
||||
P2P: p2p.Config{
|
||||
NoDiscovery: true,
|
||||
DiscoveryV5: false,
|
||||
DiscoveryV5Addr: ":0",
|
||||
BootstrapNodes: makeBootstrapNodes(),
|
||||
BootstrapNodesV5: makeBootstrapNodesV5(),
|
||||
ListenAddr: ":0",
|
||||
NAT: nat.Any(),
|
||||
MaxPeers: config.MaxPeers,
|
||||
MaxPendingPeers: config.MaxPendingPeers,
|
||||
},
|
||||
IPCPath: makeIPCPath(config),
|
||||
HTTPHost: config.HTTPHost,
|
||||
HTTPPort: config.HTTPPort,
|
||||
HTTPCors: []string{"*"},
|
||||
HTTPModules: strings.Split(config.APIModules, ","),
|
||||
WSHost: makeWSHost(config),
|
||||
WSPort: config.WSPort,
|
||||
WSOrigins: []string{"*"},
|
||||
WSModules: strings.Split(config.APIModules, ","),
|
||||
}
|
||||
|
||||
if len(config.NodeKeyFile) > 0 {
|
||||
log.Info("Loading private key file", "file", config.NodeKeyFile)
|
||||
pk, err := crypto.LoadECDSA(config.NodeKeyFile)
|
||||
if err != nil {
|
||||
log.Warn(fmt.Sprintf("Failed loading private key file '%s': %v", config.NodeKeyFile, err))
|
||||
}
|
||||
|
||||
// override node's private key
|
||||
stackConfig.P2P.PrivateKey = pk
|
||||
}
|
||||
|
||||
if len(config.NodeKeyFile) > 0 {
|
||||
log.Info("Loading private key file", "file", config.NodeKeyFile)
|
||||
pk, err := crypto.LoadECDSA(config.NodeKeyFile)
|
||||
if err != nil {
|
||||
log.Info("Failed loading private key file", "file", config.NodeKeyFile, "err", err)
|
||||
}
|
||||
|
||||
// override node's private key
|
||||
stackConfig.P2P.PrivateKey = pk
|
||||
}
|
||||
|
||||
stack, err := node.New(stackConfig)
|
||||
|
@ -128,34 +153,27 @@ func MakeNode(config *params.NodeConfig) *Node {
|
|||
// activateEthService configures and registers the eth.Ethereum service with a given node.
|
||||
func activateEthService(stack *node.Node, config *params.NodeConfig) error {
|
||||
if !config.LightEthConfig.Enabled {
|
||||
glog.V(logger.Info).Infoln("LES protocol is disabled")
|
||||
log.Info("LES protocol is disabled")
|
||||
return nil
|
||||
}
|
||||
|
||||
ethConf := ð.Config{
|
||||
Etherbase: common.Address{},
|
||||
ChainConfig: makeChainConfig(config),
|
||||
FastSync: false,
|
||||
LightMode: true,
|
||||
MaxPeers: config.MaxPeers,
|
||||
DatabaseCache: config.LightEthConfig.DatabaseCache,
|
||||
DatabaseHandles: makeDatabaseHandles(),
|
||||
NetworkId: config.NetworkId,
|
||||
Genesis: config.LightEthConfig.Genesis,
|
||||
MinerThreads: runtime.NumCPU(),
|
||||
GasPrice: params.GasPrice,
|
||||
GpoMinGasPrice: params.GpoMinGasPrice,
|
||||
GpoMaxGasPrice: params.GpoMaxGasPrice,
|
||||
GpoFullBlockRatio: params.GpoFullBlockRatio,
|
||||
GpobaseStepDown: params.GpobaseStepDown,
|
||||
GpobaseStepUp: params.GpobaseStepUp,
|
||||
GpobaseCorrectionFactor: params.GpobaseCorrectionFactor,
|
||||
SolcPath: "solc",
|
||||
AutoDAG: false,
|
||||
var genesis *core.Genesis
|
||||
if config.LightEthConfig.Genesis != "" {
|
||||
genesis = new(core.Genesis)
|
||||
if err := json.Unmarshal([]byte(config.LightEthConfig.Genesis), genesis); err != nil {
|
||||
return fmt.Errorf("invalid genesis spec: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
ethConf := eth.DefaultConfig
|
||||
ethConf.Genesis = genesis
|
||||
ethConf.SyncMode = downloader.LightSync
|
||||
ethConf.NetworkId = config.NetworkId
|
||||
ethConf.DatabaseCache = config.LightEthConfig.DatabaseCache
|
||||
ethConf.MaxPeers = config.MaxPeers
|
||||
ethConf.DatabaseHandles = makeDatabaseHandles()
|
||||
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
return les.New(ctx, ethConf)
|
||||
return les.New(ctx, ðConf)
|
||||
}); err != nil {
|
||||
return fmt.Errorf("%v: %v", ErrLightEthRegistrationFailure, err)
|
||||
}
|
||||
|
@ -166,11 +184,34 @@ func activateEthService(stack *node.Node, config *params.NodeConfig) error {
|
|||
// activateShhService configures Whisper and adds it to the given node.
|
||||
func activateShhService(stack *node.Node, config *params.NodeConfig) error {
|
||||
if !config.WhisperConfig.Enabled {
|
||||
glog.V(logger.Info).Infoln("SHH protocol is disabled")
|
||||
log.Info("SHH protocol is disabled")
|
||||
return nil
|
||||
}
|
||||
serviceConstructor := func(*node.ServiceContext) (node.Service, error) {
|
||||
return whisper.New(), nil
|
||||
whisperConfig := config.WhisperConfig
|
||||
whisperService := whisper.New()
|
||||
|
||||
// enable mail service
|
||||
if whisperConfig.MailServerNode {
|
||||
password, err := whisperConfig.ReadPasswordFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var mailServer mailserver.WMailServer
|
||||
whisperService.RegisterServer(&mailServer)
|
||||
mailServer.Init(whisperService, whisperConfig.DataDir, string(password), whisperConfig.MinimumPoW)
|
||||
}
|
||||
|
||||
// enable notification service
|
||||
if whisperConfig.NotificationServerNode {
|
||||
var notificationServer notifications.NotificationServer
|
||||
whisperService.RegisterNotificationServer(¬ificationServer)
|
||||
|
||||
notificationServer.Init(whisperService, whisperConfig)
|
||||
}
|
||||
|
||||
return whisperService, nil
|
||||
}
|
||||
if err := stack.Register(serviceConstructor); err != nil {
|
||||
return err
|
||||
|
@ -197,29 +238,6 @@ func makeWSHost(config *params.NodeConfig) string {
|
|||
return config.WSHost
|
||||
}
|
||||
|
||||
// makeChainConfig reads the chain configuration from the database in the datadir.
|
||||
func makeChainConfig(config *params.NodeConfig) *gethparams.ChainConfig {
|
||||
chainConfig := new(gethparams.ChainConfig)
|
||||
|
||||
// Homestead fork
|
||||
chainConfig.HomesteadBlock = config.HomesteadBlock
|
||||
|
||||
// DAO fork
|
||||
chainConfig.DAOForkBlock = config.DAOForkBlock
|
||||
chainConfig.DAOForkSupport = config.DAOForkSupport
|
||||
|
||||
// DoS reprice fork
|
||||
chainConfig.EIP150Block = config.EIP150Block
|
||||
chainConfig.EIP150Hash = config.EIP150Hash
|
||||
|
||||
// DoS state cleanup fork
|
||||
chainConfig.EIP155Block = config.EIP155Block
|
||||
chainConfig.EIP158Block = config.EIP158Block
|
||||
chainConfig.ChainId = config.ChainId
|
||||
|
||||
return chainConfig
|
||||
}
|
||||
|
||||
// makeDatabaseHandles makes sure that enough file descriptors are available to the process
|
||||
// (and returns half of them for node's database to use)
|
||||
func makeDatabaseHandles() int {
|
||||
|
@ -304,7 +322,7 @@ func Fatalf(reason interface{}, args ...interface{}) {
|
|||
// HaltOnPanic recovers from panic, logs issue, sends upward notification, and exits
|
||||
func HaltOnPanic() {
|
||||
if r := recover(); r != nil {
|
||||
err := fmt.Errorf("%v: %v", ErrNodeStartFailure, r)
|
||||
err := fmt.Errorf("%v: %v", ErrNodeRunFailure, r)
|
||||
|
||||
// send signal up to native app
|
||||
SendSignal(SignalEnvelope{
|
||||
|
|
|
@ -13,8 +13,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/les"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
|
@ -54,6 +53,7 @@ var (
|
|||
ErrInvalidJailedRequestQueue = errors.New("jailed request queue is not properly initialized")
|
||||
ErrNodeMakeFailure = errors.New("error creating p2p node")
|
||||
ErrNodeStartFailure = errors.New("error starting p2p node")
|
||||
ErrNodeRunFailure = errors.New("error running p2p node")
|
||||
ErrInvalidNodeAPI = errors.New("no node API connected")
|
||||
ErrAccountKeyStoreMissing = errors.New("account key store is not set")
|
||||
)
|
||||
|
@ -105,13 +105,13 @@ func (m *NodeManager) RunNode() {
|
|||
m.StartNode()
|
||||
|
||||
if _, err := m.AccountManager(); err != nil {
|
||||
glog.V(logger.Warn).Infoln(ErrInvalidAccountManager)
|
||||
log.Warn(ErrInvalidAccountManager.Error())
|
||||
}
|
||||
if err := m.node.geth.Service(&m.services.whisperService); err != nil {
|
||||
glog.V(logger.Warn).Infoln("cannot get whisper service:", err)
|
||||
log.Warn("cannot get whisper service", "error", err)
|
||||
}
|
||||
if err := m.node.geth.Service(&m.services.lightEthereum); err != nil {
|
||||
glog.V(logger.Warn).Infoln("cannot get light ethereum service:", err)
|
||||
log.Warn("cannot get light ethereum service", "error", err)
|
||||
}
|
||||
|
||||
// setup handlers
|
||||
|
@ -124,7 +124,7 @@ func (m *NodeManager) RunNode() {
|
|||
var err error
|
||||
m.services.rpcClient, err = m.node.geth.Attach()
|
||||
if err != nil {
|
||||
glog.V(logger.Warn).Infoln("cannot get RPC client service:", ErrInvalidClient)
|
||||
log.Warn("cannot get RPC client service", "error", ErrInvalidClient)
|
||||
}
|
||||
|
||||
// expose API
|
||||
|
@ -135,7 +135,7 @@ func (m *NodeManager) RunNode() {
|
|||
m.onNodeStarted() // node started, notify listeners
|
||||
m.node.geth.Wait()
|
||||
|
||||
glog.V(logger.Info).Infoln("node stopped")
|
||||
log.Info("node stopped")
|
||||
}()
|
||||
}
|
||||
|
||||
|
@ -149,18 +149,24 @@ func (m *NodeManager) StartNode() {
|
|||
panic(fmt.Sprintf("%v: %v", ErrNodeStartFailure, err))
|
||||
}
|
||||
|
||||
if server := m.node.geth.Server(); server != nil {
|
||||
if nodeInfo := server.NodeInfo(); nodeInfo != nil {
|
||||
log.Info(nodeInfo.Enode)
|
||||
}
|
||||
}
|
||||
|
||||
// allow interrupting running nodes
|
||||
go func() {
|
||||
sigc := make(chan os.Signal, 1)
|
||||
signal.Notify(sigc, os.Interrupt)
|
||||
defer signal.Stop(sigc)
|
||||
<-sigc
|
||||
glog.V(logger.Info).Infoln("Got interrupt, shutting down...")
|
||||
log.Info("Got interrupt, shutting down...")
|
||||
go m.node.geth.Stop()
|
||||
for i := 3; i > 0; i-- {
|
||||
<-sigc
|
||||
if i > 1 {
|
||||
glog.V(logger.Info).Infof("Already shutting down, interrupt %d more times for panic.", i-1)
|
||||
log.Info(fmt.Sprintf("Already shutting down, interrupt %d more times for panic.", i-1))
|
||||
}
|
||||
}
|
||||
panic("interrupted!")
|
||||
|
@ -173,7 +179,9 @@ func (m *NodeManager) StopNode() error {
|
|||
return ErrInvalidGethNode
|
||||
}
|
||||
|
||||
m.node.geth.Stop()
|
||||
if err := m.node.geth.Stop(); err != nil {
|
||||
return err
|
||||
}
|
||||
m.node.started = make(chan struct{})
|
||||
return nil
|
||||
}
|
||||
|
@ -222,10 +230,10 @@ func (m *NodeManager) ResetChainData() error {
|
|||
if _, err := os.Stat(chainDataDir); os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(chainDataDir); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info("chaindata removed", "dir", chainDataDir)
|
||||
|
||||
if err := m.ResumeNode(); err != nil {
|
||||
return err
|
||||
|
@ -245,8 +253,9 @@ func (m *NodeManager) StartNodeRPCServer() (bool, error) {
|
|||
|
||||
config := m.node.gethConfig
|
||||
modules := strings.Join(config.HTTPModules, ",")
|
||||
cors := strings.Join(config.HTTPCors, ",")
|
||||
|
||||
return m.api.StartRPC(&config.HTTPHost, &config.HTTPPort, &config.HTTPCors, &modules)
|
||||
return m.api.StartRPC(&config.HTTPHost, &config.HTTPPort, &cors, &modules)
|
||||
}
|
||||
|
||||
// StopNodeRPCServer stops HTTP RPC service attached to node
|
||||
|
|
|
@ -1,19 +1,19 @@
|
|||
package params
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ecdsa"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
// default node configuration options
|
||||
|
@ -29,37 +29,13 @@ func init() {
|
|||
}
|
||||
|
||||
var (
|
||||
ErrMissingDataDir = errors.New("missing required 'DataDir' parameter")
|
||||
ErrMissingNetworkId = errors.New("missing required 'NetworkId' parameter")
|
||||
ErrMissingDataDir = errors.New("missing required 'DataDir' parameter")
|
||||
ErrMissingNetworkId = errors.New("missing required 'NetworkId' parameter")
|
||||
ErrEmptyPasswordFile = errors.New("password file cannot be empty")
|
||||
ErrEmptyIdentityFile = errors.New("identity file cannot be empty")
|
||||
ErrEmptyAuthorizationKeyFile = errors.New("authorization key file cannot be empty")
|
||||
)
|
||||
|
||||
// ChainConfig holds core blockchain settings. It is stored in the database on a per block basis.
|
||||
type ChainConfig struct {
|
||||
// ChainId identifies the current chain and is used for replay protection
|
||||
ChainId *big.Int
|
||||
|
||||
// HomesteadBlock is Homestead switch block (nil = no fork, 0 = already homestead)
|
||||
HomesteadBlock *big.Int
|
||||
|
||||
// DAOForkBlock TheDAO hard-fork switch block (nil = no fork)
|
||||
DAOForkBlock *big.Int
|
||||
|
||||
// DAOForkSupport Whether the nodes supports or opposes the DAO hard-fork
|
||||
DAOForkSupport bool
|
||||
|
||||
// EIP150Block is EIP150 HF block (nil = no fork)
|
||||
EIP150Block *big.Int
|
||||
|
||||
// EIP150Hash is EIP150 HF hash (fast sync aid)
|
||||
EIP150Hash common.Hash
|
||||
|
||||
// EIP155Block is EIP155 HF block
|
||||
EIP155Block *big.Int
|
||||
|
||||
// EIP158Block is EIP158 HF block
|
||||
EIP158Block *big.Int
|
||||
}
|
||||
|
||||
// LightEthConfig holds LES-related configuration
|
||||
// Status nodes are always lightweight clients (due to mobile platform constraints)
|
||||
type LightEthConfig struct {
|
||||
|
@ -73,11 +49,26 @@ type LightEthConfig struct {
|
|||
DatabaseCache int
|
||||
}
|
||||
|
||||
type FirebaseConfig struct {
|
||||
// AuthorizationKeyFile file path that contains FCM authorization key
|
||||
AuthorizationKeyFile string
|
||||
|
||||
// NotificationTriggerURL URL used to send push notification requests to
|
||||
NotificationTriggerURL string
|
||||
}
|
||||
|
||||
// WhisperConfig holds SHH-related configuration
|
||||
type WhisperConfig struct {
|
||||
// Enabled flag specifies whether protocol is enabled
|
||||
Enabled bool
|
||||
|
||||
// IdentityFile path to private key, that will be loaded as identity into Whisper
|
||||
IdentityFile string
|
||||
|
||||
// PasswordFile path to password file, for non-interactive password entry
|
||||
// (if no account file selected, then this password is used for symmetric encryption)
|
||||
PasswordFile string
|
||||
|
||||
// EchoMode if mode is on, prints some arguments for diagnostics
|
||||
EchoMode bool
|
||||
|
||||
|
@ -90,15 +81,9 @@ type WhisperConfig struct {
|
|||
// MailServerNode is mode when node is capable of delivering expired messages on demand
|
||||
MailServerNode bool
|
||||
|
||||
// MailServerPassword is password for MailServer's symmetric key
|
||||
MailServerPassword string
|
||||
|
||||
// NotificationServerNode is mode when node is capable of sending Push (and probably other kinds) Notifications
|
||||
NotificationServerNode bool
|
||||
|
||||
// NotificationServerPassword is password for NotificationServer's symmetric key (used in discovery)
|
||||
NotificationServerPassword string
|
||||
|
||||
// DataDir is the file system folder Whisper should use for any data storage needs.
|
||||
DataDir string
|
||||
|
||||
|
@ -110,6 +95,9 @@ type WhisperConfig struct {
|
|||
|
||||
// TTL time to live for messages, in seconds
|
||||
TTL int
|
||||
|
||||
// FirebaseConfig extra configuration for Firebase Cloud Messaging
|
||||
FirebaseConfig *FirebaseConfig `json:"FirebaseConfig,"`
|
||||
}
|
||||
|
||||
// SwarmConfig holds Swarm-related configuration
|
||||
|
@ -124,7 +112,7 @@ type NodeConfig struct {
|
|||
TestNet bool
|
||||
|
||||
// NetworkId sets network to use for selecting peers to connect to
|
||||
NetworkId int
|
||||
NetworkId uint64
|
||||
|
||||
// DataDir is the file system folder the node should use for any data storage needs.
|
||||
DataDir string
|
||||
|
@ -187,11 +175,11 @@ type NodeConfig struct {
|
|||
// LogFile is filename where exposed logs get written to
|
||||
LogFile string
|
||||
|
||||
// LogLevel defines minimum log level. Valid names are "ERROR", "WARNING", "INFO", "DEBUG", and "DETAIL".
|
||||
// LogLevel defines minimum log level. Valid names are "ERROR", "WARNING", "INFO", "DEBUG", and "TRACE".
|
||||
LogLevel string
|
||||
|
||||
// ChainConfig extra configuration for blockchain
|
||||
*ChainConfig `json:"ChainConfig,"`
|
||||
// LogToStderr defines whether logged info should also be output to os.Stderr
|
||||
LogToStderr bool
|
||||
|
||||
// LightEthConfig extra configuration for LES
|
||||
LightEthConfig *LightEthConfig `json:"LightEthConfig,"`
|
||||
|
@ -204,7 +192,7 @@ type NodeConfig struct {
|
|||
}
|
||||
|
||||
// NewNodeConfig creates new node configuration object
|
||||
func NewNodeConfig(dataDir string, networkId int) (*NodeConfig, error) {
|
||||
func NewNodeConfig(dataDir string, networkId uint64) (*NodeConfig, error) {
|
||||
nodeConfig := &NodeConfig{
|
||||
NetworkId: networkId,
|
||||
DataDir: dataDir,
|
||||
|
@ -220,7 +208,7 @@ func NewNodeConfig(dataDir string, networkId int) (*NodeConfig, error) {
|
|||
IPCFile: IPCFile,
|
||||
LogFile: LogFile,
|
||||
LogLevel: LogLevel,
|
||||
ChainConfig: &ChainConfig{},
|
||||
LogToStderr: LogToStderr,
|
||||
LightEthConfig: &LightEthConfig{
|
||||
Enabled: true,
|
||||
DatabaseCache: DatabaseCache,
|
||||
|
@ -230,19 +218,26 @@ func NewNodeConfig(dataDir string, networkId int) (*NodeConfig, error) {
|
|||
Port: WhisperPort,
|
||||
MinimumPoW: WhisperMinimumPoW,
|
||||
TTL: WhisperTTL,
|
||||
FirebaseConfig: &FirebaseConfig{
|
||||
NotificationTriggerURL: FirebaseNotificationTriggerURL,
|
||||
},
|
||||
},
|
||||
SwarmConfig: &SwarmConfig{},
|
||||
}
|
||||
|
||||
// auto-populate some dependent values
|
||||
nodeConfig.populateChainConfig()
|
||||
nodeConfig.populateDirs()
|
||||
if err := nodeConfig.populateGenesis(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := nodeConfig.populateDirs(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nodeConfig, nil
|
||||
}
|
||||
|
||||
// populateDirs updates directories that should be wrt to DataDir
|
||||
func (c *NodeConfig) populateDirs() {
|
||||
func (c *NodeConfig) populateDirs() error {
|
||||
makeSubDirPath := func(baseDir, subDir string) string {
|
||||
if len(baseDir) == 0 {
|
||||
return ""
|
||||
|
@ -257,51 +252,32 @@ func (c *NodeConfig) populateDirs() {
|
|||
if len(c.WhisperConfig.DataDir) == 0 {
|
||||
c.WhisperConfig.DataDir = makeSubDirPath(c.DataDir, WhisperDataDir)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// populateChainConfig does necessary adjustments to config object (depending on network node will be running on)
|
||||
func (c *NodeConfig) populateChainConfig() {
|
||||
// populateChainConfig does necessary adjustments to config object (depending on network node will be runnin on)
|
||||
func (c *NodeConfig) populateGenesis() error {
|
||||
c.TestNet = false
|
||||
if c.NetworkId == TestNetworkId {
|
||||
c.TestNet = true
|
||||
}
|
||||
|
||||
var genesis *core.Genesis
|
||||
if c.TestNet {
|
||||
// Homestead fork
|
||||
c.ChainConfig.HomesteadBlock = params.TestnetChainConfig.HomesteadBlock
|
||||
|
||||
// DAO fork
|
||||
c.ChainConfig.DAOForkBlock = params.TestnetChainConfig.DAOForkBlock
|
||||
c.ChainConfig.DAOForkSupport = params.TestnetChainConfig.DAOForkSupport
|
||||
|
||||
// DoS reprice fork
|
||||
c.ChainConfig.EIP150Block = params.TestnetChainConfig.EIP150Block
|
||||
c.ChainConfig.EIP150Hash = params.TestnetChainConfig.EIP150Hash
|
||||
|
||||
// DoS state cleanup fork
|
||||
c.ChainConfig.EIP155Block = params.TestnetChainConfig.EIP155Block
|
||||
c.ChainConfig.EIP158Block = params.TestnetChainConfig.EIP158Block
|
||||
c.ChainConfig.ChainId = params.TestnetChainConfig.ChainId
|
||||
|
||||
c.LightEthConfig.Genesis = core.DefaultTestnetGenesisBlock()
|
||||
genesis = core.DefaultTestnetGenesisBlock()
|
||||
} else {
|
||||
// Homestead fork
|
||||
c.ChainConfig.HomesteadBlock = params.MainNetHomesteadBlock
|
||||
// DAO fork
|
||||
c.ChainConfig.DAOForkBlock = params.MainNetDAOForkBlock
|
||||
c.ChainConfig.DAOForkSupport = true
|
||||
|
||||
// DoS reprice fork
|
||||
c.ChainConfig.EIP150Block = params.MainNetHomesteadGasRepriceBlock
|
||||
c.ChainConfig.EIP150Hash = params.MainNetHomesteadGasRepriceHash
|
||||
|
||||
// DoS state cleanup fork
|
||||
c.ChainConfig.EIP155Block = params.MainNetSpuriousDragon
|
||||
c.ChainConfig.EIP158Block = params.MainNetSpuriousDragon
|
||||
c.ChainConfig.ChainId = params.MainNetChainID
|
||||
|
||||
c.LightEthConfig.Genesis = core.DefaultGenesisBlock()
|
||||
genesis = core.DefaultGenesisBlock()
|
||||
}
|
||||
|
||||
// encode the genesis into JSON
|
||||
enc, err := json.Marshal(genesis)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.LightEthConfig.Genesis = string(enc)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadNodeConfig parses incoming JSON and returned it as Config
|
||||
|
@ -320,8 +296,12 @@ func LoadNodeConfig(configJSON string) (*NodeConfig, error) {
|
|||
}
|
||||
|
||||
// repopulate
|
||||
nodeConfig.populateChainConfig()
|
||||
nodeConfig.populateDirs()
|
||||
if err := nodeConfig.populateGenesis(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := nodeConfig.populateDirs(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(nodeConfig.DataDir) == 0 {
|
||||
return nil, ErrMissingDataDir
|
||||
|
@ -350,7 +330,7 @@ func (c *NodeConfig) Save() error {
|
|||
return err
|
||||
}
|
||||
|
||||
glog.V(logger.Info).Infof("config file saved: %v", configFilePath)
|
||||
log.Info(fmt.Sprintf("config file saved: %v", configFilePath))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -371,3 +351,59 @@ func (c *SwarmConfig) String() string {
|
|||
data, _ := json.MarshalIndent(c, "", " ")
|
||||
return string(data)
|
||||
}
|
||||
|
||||
// ReadPasswordFile reads and returns content of the password file
|
||||
func (c *WhisperConfig) ReadPasswordFile() ([]byte, error) {
|
||||
if len(c.PasswordFile) <= 0 {
|
||||
return nil, ErrEmptyPasswordFile
|
||||
}
|
||||
|
||||
password, err := ioutil.ReadFile(c.PasswordFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
password = bytes.TrimRight(password, "\n")
|
||||
|
||||
if len(password) == 0 {
|
||||
return nil, ErrEmptyPasswordFile
|
||||
}
|
||||
|
||||
return password, nil
|
||||
}
|
||||
|
||||
// ReadIdentityFile reads and loads identity private key
|
||||
func (c *WhisperConfig) ReadIdentityFile() (*ecdsa.PrivateKey, error) {
|
||||
if len(c.IdentityFile) <= 0 {
|
||||
return nil, ErrEmptyIdentityFile
|
||||
}
|
||||
|
||||
identity, err := crypto.LoadECDSA(c.IdentityFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if identity == nil {
|
||||
return nil, ErrEmptyIdentityFile
|
||||
}
|
||||
|
||||
return identity, nil
|
||||
}
|
||||
|
||||
// ReadAuthorizationKeyFile reads and loads FCM authorization key
|
||||
func (c *FirebaseConfig) ReadAuthorizationKeyFile() ([]byte, error) {
|
||||
if len(c.AuthorizationKeyFile) <= 0 {
|
||||
return nil, ErrEmptyAuthorizationKeyFile
|
||||
}
|
||||
|
||||
key, err := ioutil.ReadFile(c.AuthorizationKeyFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key = bytes.TrimRight(key, "\n")
|
||||
|
||||
if key == nil {
|
||||
return nil, ErrEmptyAuthorizationKeyFile
|
||||
}
|
||||
|
||||
return key, nil
|
||||
}
|
||||
|
|
|
@ -1,12 +1,14 @@
|
|||
package params_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
gethparams "github.com/ethereum/go-ethereum/params"
|
||||
"github.com/status-im/status-go/geth"
|
||||
"github.com/status-im/status-go/geth/params"
|
||||
|
@ -190,7 +192,11 @@ var loadConfigTestCases = []struct {
|
|||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
chainConfig := nodeConfig.ChainConfig
|
||||
genesis := new(core.Genesis)
|
||||
if err := json.Unmarshal([]byte(nodeConfig.LightEthConfig.Genesis), genesis); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
chainConfig := genesis.Config
|
||||
refChainConfig := gethparams.TestnetChainConfig
|
||||
|
||||
if chainConfig.HomesteadBlock.Cmp(refChainConfig.HomesteadBlock) != 0 {
|
||||
|
@ -237,7 +243,11 @@ var loadConfigTestCases = []struct {
|
|||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
chainConfig := nodeConfig.ChainConfig
|
||||
genesis := new(core.Genesis)
|
||||
if err := json.Unmarshal([]byte(nodeConfig.LightEthConfig.Genesis), genesis); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
chainConfig := genesis.Config
|
||||
if chainConfig.HomesteadBlock.Cmp(gethparams.MainNetHomesteadBlock) != 0 {
|
||||
t.Fatal("invalid chainConfig.HomesteadBlock")
|
||||
}
|
||||
|
@ -272,10 +282,7 @@ var loadConfigTestCases = []struct {
|
|||
"Name": "TestStatusNode",
|
||||
"WSPort": 8546,
|
||||
"IPCEnabled": true,
|
||||
"WSEnabled": false,
|
||||
"ChainConfig": {
|
||||
"ChainId": 311
|
||||
}
|
||||
"WSEnabled": false
|
||||
}`,
|
||||
func(t *testing.T, dataDir string, nodeConfig *params.NodeConfig, err error) {
|
||||
//nodeConfig.LightEthConfig.Genesis = nodeConfig.LightEthConfig.Genesis[:125]
|
||||
|
@ -284,16 +291,10 @@ var loadConfigTestCases = []struct {
|
|||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
networkId := 311
|
||||
|
||||
networkId := uint64(311)
|
||||
if nodeConfig.NetworkId != networkId {
|
||||
t.Fatalf("unexpected NetworkId, expected: %v, got: %v", networkId, nodeConfig.NetworkId)
|
||||
}
|
||||
|
||||
if nodeConfig.ChainId.Int64() != int64(networkId) {
|
||||
t.Fatalf("unexpected ChainConfig.ChainId, expected: %v, got: %v", networkId, nodeConfig.ChainId)
|
||||
}
|
||||
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -314,31 +315,36 @@ func TestLoadNodeConfig(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConfigWriteRead(t *testing.T) {
|
||||
tmpDir, err := ioutil.TempDir(os.TempDir(), "geth-config-tests")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
configReadWrite := func(networkId uint64, refFile string) {
|
||||
tmpDir, err := ioutil.TempDir(os.TempDir(), "geth-config-tests")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
nodeConfig, err := params.NewNodeConfig(tmpDir, params.TestNetworkId)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create new config object: %v", err)
|
||||
nodeConfig, err := params.NewNodeConfig(tmpDir, networkId)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create new config object: %v", err)
|
||||
}
|
||||
|
||||
if err := nodeConfig.Save(); err != nil {
|
||||
t.Fatalf("cannot persist configuration: %v", err)
|
||||
}
|
||||
|
||||
loadedConfigData, err := ioutil.ReadFile(filepath.Join(nodeConfig.DataDir, "config.json"))
|
||||
if err != nil {
|
||||
t.Fatalf("cannot read configuration from disk: %v", err)
|
||||
}
|
||||
|
||||
refConfigData := geth.LoadFromFile(refFile)
|
||||
|
||||
refConfigData = strings.Replace(refConfigData, "$TMPDIR", nodeConfig.DataDir, -1)
|
||||
refConfigData = strings.Replace(refConfigData, "$VERSION", params.Version, -1)
|
||||
if string(loadedConfigData) != refConfigData {
|
||||
t.Fatalf("configuration mismatch,\nexpected: %v\ngot: %v", refConfigData, string(loadedConfigData))
|
||||
}
|
||||
}
|
||||
|
||||
if err := nodeConfig.Save(); err != nil {
|
||||
t.Fatalf("cannot persist configuration: %v", err)
|
||||
}
|
||||
|
||||
loadedConfigData, err := ioutil.ReadFile(filepath.Join(nodeConfig.DataDir, "config.json"))
|
||||
if err != nil {
|
||||
t.Fatalf("cannot read configuration from disk: %v", err)
|
||||
}
|
||||
|
||||
refConfigData := geth.LoadFromFile("testdata/config.testnet.json")
|
||||
|
||||
refConfigData = strings.Replace(refConfigData, "$TMPDIR", nodeConfig.DataDir, -1)
|
||||
refConfigData = strings.Replace(refConfigData, "$VERSION", params.Version, -1)
|
||||
if string(loadedConfigData) != refConfigData {
|
||||
t.Fatalf("configuration mismatch,\nexpected: %v\ngot: %v", refConfigData, string(loadedConfigData))
|
||||
}
|
||||
configReadWrite(params.TestNetworkId, "testdata/config.testnet.json")
|
||||
configReadWrite(params.MainNetworkId, "testdata/config.mainnet.json")
|
||||
}
|
||||
|
|
|
@ -1,11 +1,5 @@
|
|||
package params
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
const (
|
||||
// ClientIdentifier is client identifier to advertise over the network
|
||||
ClientIdentifier = "StatusIM"
|
||||
|
@ -57,6 +51,9 @@ const (
|
|||
// LogLevel defines the minimum log level to report
|
||||
LogLevel = "INFO"
|
||||
|
||||
// LogToStderr defines whether logged info should also be output to os.Stderr
|
||||
LogToStderr = true
|
||||
|
||||
// WhisperDataDir is directory where Whisper data is stored, relative to DataDir
|
||||
WhisperDataDir = "wnode"
|
||||
|
||||
|
@ -69,17 +66,12 @@ const (
|
|||
// WhisperTTL is time to live for messages, in seconds
|
||||
WhisperTTL = 120
|
||||
|
||||
// FirebaseNotificationTriggerURL is URL where FCM notification requests are sent to
|
||||
FirebaseNotificationTriggerURL = "https://fcm.googleapis.com/fcm/send"
|
||||
|
||||
// MainNetworkId is id of the main network
|
||||
MainNetworkId = 1
|
||||
|
||||
// TestNetworkId is id of a test network
|
||||
TestNetworkId = 3
|
||||
)
|
||||
|
||||
// Gas price settings
|
||||
var (
|
||||
GasPrice = new(big.Int).Mul(big.NewInt(20), common.Shannon) // Minimal gas price to accept for mining a transactions
|
||||
GpoMinGasPrice = new(big.Int).Mul(big.NewInt(20), common.Shannon) // Minimum suggested gas price
|
||||
GpoMaxGasPrice = new(big.Int).Mul(big.NewInt(500), common.Shannon) // Maximum suggested gas price
|
||||
GpoFullBlockRatio = 80 // Full block threshold for gas price calculation (%)
|
||||
GpobaseStepDown = 10 // Suggested gas price base step down ratio (1/1000)
|
||||
GpobaseStepUp = 100 // Suggested gas price base step up ratio (1/1000)
|
||||
GpobaseCorrectionFactor = 110 // Suggested gas price base correction factor (%)
|
||||
)
|
||||
|
|
|
@ -1,145 +1,94 @@
|
|||
package params
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
// Logger is wrapper for custom logging
|
||||
type Logger struct {
|
||||
sync.Mutex
|
||||
logFile *os.File
|
||||
observer chan string
|
||||
started chan struct{}
|
||||
stopped chan struct{}
|
||||
stopFlag bool
|
||||
origHandler log.Handler
|
||||
handler log.Handler
|
||||
config *NodeConfig
|
||||
}
|
||||
|
||||
var onceStartLogger sync.Once
|
||||
var (
|
||||
onceInitNodeLogger sync.Once
|
||||
nodeLoggerInstance *Logger
|
||||
)
|
||||
|
||||
// SetupLogger configs logger using parameters in config
|
||||
func SetupLogger(config *NodeConfig) (nodeLogger *Logger, err error) {
|
||||
func SetupLogger(config *NodeConfig) (*Logger, error) {
|
||||
if !config.LogEnabled {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
nodeLogger = &Logger{
|
||||
started: make(chan struct{}, 1),
|
||||
stopped: make(chan struct{}, 1),
|
||||
}
|
||||
|
||||
onceStartLogger.Do(func() {
|
||||
err = nodeLogger.createAndStartLogger(config)
|
||||
onceInitNodeLogger.Do(func() {
|
||||
nodeLoggerInstance = &Logger{
|
||||
config: config,
|
||||
origHandler: log.Root().GetHandler(),
|
||||
}
|
||||
nodeLoggerInstance.handler = nodeLoggerInstance.makeLogHandler(parseLogLevel(config.LogLevel))
|
||||
})
|
||||
|
||||
return
|
||||
if err := nodeLoggerInstance.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nodeLoggerInstance, nil
|
||||
}
|
||||
|
||||
// SetV allows to dynamically change log level of messages being written
|
||||
func (l *Logger) SetV(logLevel string) {
|
||||
glog.SetV(parseLogLevel(logLevel))
|
||||
log.Root().SetHandler(l.makeLogHandler(parseLogLevel(logLevel)))
|
||||
}
|
||||
|
||||
// Stop marks logger as stopped, forcing to relinquish hold
|
||||
// on os.Stderr and restore it back to the original
|
||||
func (l *Logger) Stop() (stopped chan struct{}) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
|
||||
l.stopFlag = true
|
||||
stopped = l.stopped
|
||||
return
|
||||
}
|
||||
|
||||
// Observe registers extra writer where logs should be written to.
|
||||
// This method is used in unit tests, and should NOT be relied upon otherwise.
|
||||
func (l *Logger) Observe(observer chan string) (started chan struct{}) {
|
||||
l.observer = observer
|
||||
started = l.started
|
||||
return
|
||||
}
|
||||
|
||||
// createAndStartLogger initializes and starts logger by replacing os.Stderr with custom writer.
|
||||
// Custom writer intercepts all requests to os.Stderr, then forwards to multiple readers, which
|
||||
// include log file and the original os.Stderr (so that logs output on screen as well)
|
||||
func (l *Logger) createAndStartLogger(config *NodeConfig) error {
|
||||
// customize glog
|
||||
glog.CopyStandardLogTo("INFO")
|
||||
glog.SetToStderr(true)
|
||||
glog.SetV(parseLogLevel(config.LogLevel))
|
||||
|
||||
// create log file
|
||||
logFile, err := os.OpenFile(filepath.Join(config.DataDir, config.LogFile), os.O_CREATE|os.O_APPEND|os.O_RDWR, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// inject reader to pipe all writes to Stderr
|
||||
r, w, err := os.Pipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// replace Stderr
|
||||
origStderr := os.Stderr
|
||||
os.Stderr = w
|
||||
scanner := bufio.NewScanner(r)
|
||||
|
||||
// configure writer, send to the original os.Stderr and log file
|
||||
logWriter := io.MultiWriter(origStderr, logFile)
|
||||
|
||||
go func() {
|
||||
defer func() { // restore original Stderr
|
||||
os.Stderr = origStderr
|
||||
logFile.Close()
|
||||
close(l.stopped)
|
||||
}()
|
||||
|
||||
// notify observer that it can start polling (unit test, normally)
|
||||
close(l.started)
|
||||
|
||||
for scanner.Scan() {
|
||||
fmt.Fprintln(logWriter, scanner.Text())
|
||||
|
||||
if l.observer != nil {
|
||||
l.observer <- scanner.Text()
|
||||
}
|
||||
|
||||
// allow to restore original os.Stderr if logger is stopped
|
||||
if l.stopFlag {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
fmt.Fprintf(origStderr, "error reading logs: %v\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Start installs logger handler
|
||||
func (l *Logger) Start() error {
|
||||
log.Root().SetHandler(l.handler)
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseLogLevel parses string and returns logger.* constant
|
||||
func parseLogLevel(logLevel string) int {
|
||||
switch logLevel {
|
||||
case "ERROR":
|
||||
return logger.Error
|
||||
case "WARNING":
|
||||
return logger.Warn
|
||||
case "INFO":
|
||||
return logger.Info
|
||||
case "DEBUG":
|
||||
return logger.Debug
|
||||
case "DETAIL":
|
||||
return logger.Detail
|
||||
// Stop replaces our handler back to the original log handler
|
||||
func (l *Logger) Stop() error {
|
||||
log.Root().SetHandler(l.origHandler)
|
||||
return nil
|
||||
}
|
||||
|
||||
// makeLogHandler creates a log handler for a given level and node configuration
|
||||
func (l *Logger) makeLogHandler(lvl log.Lvl) log.Handler {
|
||||
var handler log.Handler
|
||||
logFilePath := filepath.Join(l.config.DataDir, l.config.LogFile)
|
||||
fileHandler := log.Must.FileHandler(logFilePath, log.LogfmtFormat())
|
||||
stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true))
|
||||
if l.config.LogToStderr {
|
||||
handler = log.MultiHandler(
|
||||
log.LvlFilterHandler(lvl, log.CallerFileHandler(log.CallerFuncHandler(stderrHandler))),
|
||||
log.LvlFilterHandler(lvl, fileHandler))
|
||||
} else {
|
||||
handler = log.LvlFilterHandler(lvl, fileHandler)
|
||||
}
|
||||
|
||||
return logger.Info
|
||||
return handler
|
||||
}
|
||||
|
||||
// parseLogLevel parses string and returns logger.* constant
|
||||
func parseLogLevel(logLevel string) log.Lvl {
|
||||
switch logLevel {
|
||||
case "ERROR":
|
||||
return log.LvlError
|
||||
case "WARNING":
|
||||
return log.LvlWarn
|
||||
case "INFO":
|
||||
return log.LvlInfo
|
||||
case "DEBUG":
|
||||
return log.LvlDebug
|
||||
case "TRACE":
|
||||
return log.LvlTrace
|
||||
}
|
||||
|
||||
return log.LvlInfo
|
||||
}
|
||||
|
|
|
@ -1,16 +1,13 @@
|
|||
package params_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/status-im/status-go/geth/params"
|
||||
)
|
||||
|
||||
|
@ -19,7 +16,7 @@ func TestLogger(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
//defer os.RemoveAll(tmpDir)
|
||||
|
||||
nodeConfig, err := params.NewNodeConfig(tmpDir, params.TestNetworkId)
|
||||
if err != nil {
|
||||
|
@ -34,6 +31,7 @@ func TestLogger(t *testing.T) {
|
|||
}
|
||||
|
||||
nodeConfig.LogEnabled = true
|
||||
nodeConfig.LogToStderr = false // just capture logs to file
|
||||
nodeLogger, err = params.SetupLogger(nodeConfig)
|
||||
if err != nil {
|
||||
t.Fatal("cannot create logger object")
|
||||
|
@ -42,104 +40,41 @@ func TestLogger(t *testing.T) {
|
|||
t.Fatal("logger is empty (while logs are enabled)")
|
||||
}
|
||||
|
||||
logReader := make(chan string, 100)
|
||||
loggerStarted := nodeLogger.Observe(logReader)
|
||||
<-loggerStarted // allow logger to setup itself
|
||||
|
||||
expectedLogTextInLogFile := "" // aggregate log contents accross all tests
|
||||
validateLoggerObserverText := func(observer chan string, expectedLogText string) {
|
||||
logText := ""
|
||||
|
||||
select {
|
||||
case logText = <-observer:
|
||||
expectedLogTextInLogFile += logText + "\n"
|
||||
logText = logText[len(logText)-len(expectedLogText):] // as logs can be prepended with glog info
|
||||
case <-time.After(3 * time.Second):
|
||||
validateLogText := func(expectedLogText string) {
|
||||
logFilePath := filepath.Join(nodeConfig.DataDir, nodeConfig.LogFile)
|
||||
logBytes, err := ioutil.ReadFile(logFilePath)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
logText := string(logBytes)
|
||||
logText = strings.Trim(logText, "\n")
|
||||
logText = logText[len(logText)-len(expectedLogText):] // as logs can be prepended with log info
|
||||
|
||||
if logText != expectedLogText {
|
||||
t.Fatalf("invalid log, expected: %#v, got: %#v", expectedLogText, logText)
|
||||
if expectedLogText != logText {
|
||||
t.Fatalf("invalid log, expected: [%s], got: [%s]", expectedLogText, string(logText))
|
||||
} else {
|
||||
t.Logf("log match found, expected: [%s], got: [%s]", expectedLogText, string(logText))
|
||||
}
|
||||
}
|
||||
|
||||
loggerTestCases := []struct {
|
||||
name string
|
||||
log func()
|
||||
validate func()
|
||||
}{
|
||||
{
|
||||
"log using standard log package",
|
||||
func() {
|
||||
log.Println("use standard log package")
|
||||
},
|
||||
func() {
|
||||
validateLoggerObserverText(logReader, "use standard log package")
|
||||
},
|
||||
},
|
||||
{
|
||||
"log using standard glog package",
|
||||
func() {
|
||||
glog.V(logger.Info).Infoln("use glog package")
|
||||
},
|
||||
func() {
|
||||
validateLoggerObserverText(logReader, "use glog package")
|
||||
},
|
||||
},
|
||||
{
|
||||
"log using os.Stderr (write directly to it)",
|
||||
func() {
|
||||
fmt.Fprintln(os.Stderr, "use os.Stderr package")
|
||||
},
|
||||
func() {
|
||||
validateLoggerObserverText(logReader, "use os.Stderr package")
|
||||
},
|
||||
},
|
||||
{
|
||||
"log using DEBUG log level (with appropriate level set)",
|
||||
func() {
|
||||
nodeLogger.SetV("DEBUG")
|
||||
glog.V(logger.Debug).Info("logged DEBUG log level message")
|
||||
},
|
||||
func() {
|
||||
validateLoggerObserverText(logReader, "logged DEBUG log level message")
|
||||
},
|
||||
},
|
||||
{
|
||||
"log using DEBUG log level (with appropriate level NOT set)",
|
||||
func() {
|
||||
nodeLogger.SetV("INFO")
|
||||
glog.V(logger.Info).Info("logged INFO log level message")
|
||||
glog.V(logger.Debug).Info("logged DEBUG log level message")
|
||||
},
|
||||
func() {
|
||||
validateLoggerObserverText(logReader, "logged INFO log level message")
|
||||
},
|
||||
},
|
||||
}
|
||||
// sample log message
|
||||
log.Info("use log package")
|
||||
validateLogText(`msg="use log package"`)
|
||||
|
||||
for _, testCase := range loggerTestCases {
|
||||
t.Log("test: " + testCase.name)
|
||||
testCase.log()
|
||||
testCase.validate()
|
||||
}
|
||||
// log using DEBUG log level (with appropriate level set)
|
||||
nodeLogger.SetV("DEBUG")
|
||||
log.Info("logged DEBUG log level message")
|
||||
validateLogText(`msg="logged DEBUG log level message"`)
|
||||
|
||||
logFileContents, err := ioutil.ReadFile(filepath.Join(tmpDir, nodeConfig.LogFile))
|
||||
if err != nil {
|
||||
t.Fatalf("cannot read logs file: %v", err)
|
||||
}
|
||||
if string(logFileContents) != expectedLogTextInLogFile {
|
||||
t.Fatalf("wrong content of log file, expected:\n%v\ngot:\n%v", expectedLogTextInLogFile, string(logFileContents))
|
||||
}
|
||||
// log using DEBUG log level (with appropriate level set)
|
||||
nodeLogger.SetV("INFO")
|
||||
log.Info("logged INFO log level message")
|
||||
validateLogText(`msg="logged INFO log level message"`)
|
||||
log.Debug("logged DEBUG log level message")
|
||||
validateLogText(`msg="logged INFO log level message"`) // debug level message is NOT logged
|
||||
|
||||
go func() {
|
||||
for i := 0; i < 10; i++ {
|
||||
glog.Infoln("logging message: ", i)
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
// stop logger and see if os.Stderr and gethlog continue functioning
|
||||
nodeLogger.Stop()
|
||||
|
||||
// stop logger and see if os.Stderr and glog continue functioning
|
||||
<-nodeLogger.Stop()
|
||||
|
||||
glog.Infoln("logging message: this message happens after custom logger has been stopped")
|
||||
log.Info("logging message: this message happens after custom logger has been stopped")
|
||||
}
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -20,8 +20,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/status-im/status-go/geth/params"
|
||||
"github.com/status-im/status-go/static"
|
||||
)
|
||||
|
@ -59,7 +58,7 @@ func SetDefaultNodeNotificationHandler(fn NodeNotificationHandler) {
|
|||
|
||||
// TriggerDefaultNodeNotificationHandler triggers default notification handler (helpful in tests)
|
||||
func TriggerDefaultNodeNotificationHandler(jsonEvent string) {
|
||||
glog.V(logger.Info).Infof("notification received (default notification handler): %s\n", jsonEvent)
|
||||
log.Info("notification received (default notification handler)", "event", jsonEvent)
|
||||
}
|
||||
|
||||
// SendSignal sends application signal (JSON, normally) upwards to application (via default notification handler)
|
||||
|
@ -164,7 +163,7 @@ func PrepareTestNode() (err error) {
|
|||
|
||||
// prepare node directory
|
||||
if err := os.MkdirAll(filepath.Join(TestDataDir, "keystore"), os.ModePerm); err != nil {
|
||||
glog.V(logger.Warn).Infoln("make node failed:", err)
|
||||
log.Warn("make node failed", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -206,7 +205,7 @@ func PrepareTestNode() (err error) {
|
|||
}
|
||||
|
||||
if syncRequired {
|
||||
glog.V(logger.Warn).Infof("Sync is required, it will take %d seconds", testConfig.Node.SyncSeconds)
|
||||
log.Warn("Sync is required", "duration", testConfig.Node.SyncSeconds)
|
||||
time.Sleep(testConfig.Node.SyncSeconds * time.Second) // LES syncs headers, so that we are up do date when it is done
|
||||
}
|
||||
|
||||
|
@ -216,7 +215,7 @@ func PrepareTestNode() (err error) {
|
|||
func RemoveTestNode() {
|
||||
err := os.RemoveAll(TestDataDir)
|
||||
if err != nil {
|
||||
glog.V(logger.Warn).Infof("could not clean up temporary datadir")
|
||||
log.Warn("could not clean up temporary datadir")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -288,7 +287,7 @@ func ImportTestAccount(keystoreDir, accountFile string) error {
|
|||
if _, err := os.Stat(dst); os.IsNotExist(err) {
|
||||
err = ioutil.WriteFile(dst, static.MustAsset("keys/"+accountFile), 0644)
|
||||
if err != nil {
|
||||
glog.V(logger.Warn).Infof("cannot copy test account PK: %v", err)
|
||||
log.Warn("cannot copy test account PK", "error", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,8 +30,8 @@ func TestWhisperFilterRace(t *testing.T) {
|
|||
}
|
||||
accountKey1Hex := common.ToHex(crypto.FromECDSAPub(&accountKey1.PrivateKey.PublicKey))
|
||||
|
||||
whisperService.AddIdentity(accountKey1.PrivateKey)
|
||||
if ok, err := whisperAPI.HasIdentity(accountKey1Hex); err != nil || !ok {
|
||||
whisperService.AddKeyPair(accountKey1.PrivateKey)
|
||||
if ok, err := whisperAPI.HasKeyPair(accountKey1Hex); err != nil || !ok {
|
||||
t.Fatalf("identity not injected: %v", accountKey1Hex)
|
||||
}
|
||||
|
||||
|
@ -42,8 +42,8 @@ func TestWhisperFilterRace(t *testing.T) {
|
|||
}
|
||||
accountKey2Hex := common.ToHex(crypto.FromECDSAPub(&accountKey2.PrivateKey.PublicKey))
|
||||
|
||||
whisperService.AddIdentity(accountKey2.PrivateKey)
|
||||
if ok, err := whisperAPI.HasIdentity(accountKey2Hex); err != nil || !ok {
|
||||
whisperService.AddKeyPair(accountKey2.PrivateKey)
|
||||
if ok, err := whisperAPI.HasKeyPair(accountKey2Hex); err != nil || !ok {
|
||||
t.Fatalf("identity not injected: %v", accountKey2Hex)
|
||||
}
|
||||
|
||||
|
@ -65,10 +65,10 @@ func TestWhisperFilterRace(t *testing.T) {
|
|||
|
||||
for i := 0; i < 10; i++ {
|
||||
go func() {
|
||||
whisperAPI.NewFilter(whisper.WhisperFilterArgs{
|
||||
From: accountKey1Hex,
|
||||
To: accountKey2Hex,
|
||||
Topics: []whisper.TopicType{
|
||||
whisperAPI.Subscribe(whisper.WhisperFilterArgs{
|
||||
Sig: accountKey1Hex,
|
||||
Key: accountKey2Hex,
|
||||
Topics: [][]byte{
|
||||
{0x4e, 0x03, 0x65, 0x7a}, {0x34, 0x60, 0x7c, 0x9b}, {0x21, 0x41, 0x7d, 0xf9},
|
||||
},
|
||||
})
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
{
|
||||
"name": "status-go",
|
||||
"version": "0.9.6",
|
||||
"description": "JavaScript tests for RPC API (Whisper/5, Swarm)",
|
||||
"main": "index.js",
|
||||
"dependencies": {
|
||||
},
|
||||
"devDependencies": {
|
||||
"chai": "^3.5.0",
|
||||
"mocha": "^3.3.0",
|
||||
"requirejs": "^2.3.3",
|
||||
"web3": "github:farazdagi/web3.js#geth/1.6.1-unstable"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "mocha --bail --slow 1000 --full-trace static/tests"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/farazdagi/status-go.git"
|
||||
},
|
||||
"author": "Victor Farazdagi",
|
||||
"license": "ISC",
|
||||
"bugs": {
|
||||
"url": "https://github.com/farazdagi/status-go/issues"
|
||||
},
|
||||
"homepage": "https://github.com/farazdagi/status-go#readme"
|
||||
}
|
File diff suppressed because one or more lines are too long
|
@ -1,87 +0,0 @@
|
|||
|
||||
<html lang="en">
|
||||
<head>
|
||||
<title>web3.js sample</title>
|
||||
<link rel="stylesheet" href="http://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css">
|
||||
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.min.css">
|
||||
<script
|
||||
src="https://code.jquery.com/jquery-3.1.1.js"
|
||||
integrity="sha256-16cdPddA6VdVInumRGo6IbivbERE8p7CQR3HzTBuELA="
|
||||
crossorigin="anonymous"></script>
|
||||
<script type="text/javascript" src="http://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/js/bootstrap.min.js"></script>
|
||||
<script type="text/javascript" src="./bignumber.js"></script>
|
||||
<script type="text/javascript" src="./web3.js"></script>
|
||||
|
||||
<script type="text/javascript">
|
||||
var web3 = new Web3();
|
||||
var shh = web3.shh;
|
||||
web3.setProvider(new web3.providers.HttpProvider('http://localhost:8645'));
|
||||
|
||||
var identity = '0x04eedbaafd6adf4a9233a13e7b1c3c14461fffeba2e9054b8d456ce5f6ebeafadcbf3dce3716253fbc391277fa5a086b60b283daf61fb5b1f26895f456c2f31ae3';
|
||||
if (!web3.shh.hasIdentity(identity)) {
|
||||
throw 'idenitity "0x04eedbaafd6adf4a9233a13e7b1c3c14461fffeba2e9054b8d456ce5f6ebeafadcbf3dce3716253fbc391277fa5a086b60b283daf61fb5b1f26895f456c2f31ae3" not found in whisper';
|
||||
}
|
||||
|
||||
var topic = 'example3';
|
||||
var payload = 'test message 3 (K1 -> "", signed broadcast)';
|
||||
|
||||
// generate symmetric key (if doesn't already exist)
|
||||
if (!shh.hasSymKey(topic)) {
|
||||
shh.addSymKey(topic, "0xdeadbeef"); // alternatively: shh.generateSymKey("example3");
|
||||
// to delete key, rely on: shh.deleteSymKey(topic);
|
||||
}
|
||||
|
||||
// start watching for messages
|
||||
var filter = shh.filter({
|
||||
from: identity,
|
||||
topics: [web3.fromAscii(topic)],
|
||||
keyname: topic // you can use some other name for key too
|
||||
});
|
||||
filter.watch(function(error, result){
|
||||
if (!error) {
|
||||
console.log("Message received1: ", result);
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
setTimeout(function () {
|
||||
var message = {
|
||||
from: identity,
|
||||
topics: [web3.fromAscii(topic)],
|
||||
payload: payload,
|
||||
ttl: 20,
|
||||
keyname: topic
|
||||
};
|
||||
var err = shh.post(message)
|
||||
if (err !== null) {
|
||||
console.log("message NOT sent")
|
||||
} else {
|
||||
console.log("message sent OK")
|
||||
}
|
||||
}, 3000)
|
||||
|
||||
|
||||
$(document).ready(function () {
|
||||
});
|
||||
|
||||
</script>
|
||||
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<!-- Static navbar -->
|
||||
<nav class="navbar navbar-default navbar-static-top">
|
||||
<div class="container">
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
<div class="container">
|
||||
<div class="jumbotron">
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
</body>
|
||||
|
||||
</html>
|
|
@ -0,0 +1 @@
|
|||
AAAAHO5M3eI:APA91bFutoRd69Nq8-AsZJbwM8MFyH6vVUtkJLlrKXD0EMZ61vQxHA0FhUvJhOArmq-LBTEclB85WgKNYD-RSYKZ7pXKn8VKvFYBqoto6nL15cNlfpx4wCBJkHERlo7lLZx9-g6iQDks
|
|
@ -0,0 +1 @@
|
|||
77d185965daa460ee7a8cb44f6001bb9884a04ed27a49ba6ea0f81cd4e5ac40b
|
|
@ -0,0 +1 @@
|
|||
asdfasdf
|
|
@ -0,0 +1,2 @@
|
|||
- see `make generate` which outputs web3.js into this directory
|
||||
- in future, some static JavaScript files may be added to this folder
|
File diff suppressed because it is too large
Load Diff
16180
static/scripts/web3.js
16180
static/scripts/web3.js
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,754 @@
|
|||
var chai = require("chai");
|
||||
var expect = chai.expect;
|
||||
var assert = chai.assert;
|
||||
var Web3 = require('web3');
|
||||
|
||||
describe('Whisper Tests', function () {
|
||||
var node1 = new Web3();
|
||||
var node2 = new Web3();
|
||||
var web3 = node1;
|
||||
node1.setProvider(new web3.providers.HttpProvider('http://localhost:8645'));
|
||||
node2.setProvider(new web3.providers.HttpProvider('http://localhost:8745'));
|
||||
|
||||
console.log('Node is expected: statusd --datadir app1 --http --httpport 8645 wnode');
|
||||
console.log('Node is expected: statusd --datadir app2 --http --httpport 8745 wnode');
|
||||
console.log('Node is expected: statusd --datadir wnode1 wnode --notify --injectaccounts=false --identity ./static/keys/wnodekey --firebaseauth ./static/keys/firebaseauthkey');
|
||||
|
||||
// some common vars
|
||||
var topic1 = '0xdeadbeef'; // each topic 4 bytes, as hex
|
||||
var topic2 = '0xbeefdead'; // each topic 4 bytes, as hex
|
||||
var topic3 = '0xbebebebe'; // each topic 4 bytes, as hex
|
||||
var topic4 = '0xdadadada'; // each topic 4 bytes, as hex
|
||||
var identity1 = '0x04eedbaafd6adf4a9233a13e7b1c3c14461fffeba2e9054b8d456ce5f6ebeafadcbf3dce3716253fbc391277fa5a086b60b283daf61fb5b1f26895f456c2f31ae3';
|
||||
var identity2 = '0x0490161b00f2c47542d28c2e8908e77159b1720dccceb6393d7c001850122efc3b1709bcea490fd8f5634ba1a145aa0722d86b9330b0e39a8d493cb981fd459da2';
|
||||
|
||||
// watchFilter makes sure that we halt the filter on first message received
|
||||
var watchFilter = function (filter, done) {
|
||||
var messageReceived = false;
|
||||
filter.watch(function (error, message) {
|
||||
if (messageReceived) return; // avoid double calling
|
||||
messageReceived = true; // no need to watch for the filter any more
|
||||
filter.stopWatching();
|
||||
done(error, message);
|
||||
});
|
||||
};
|
||||
|
||||
// makeTopic generates random topic (4 bytes, in hex)
|
||||
var makeTopic = function () {
|
||||
var min = 1;
|
||||
var max = Math.pow(16, 8);
|
||||
var randInt = Math.floor(Math.random() * (max - min + 1)) + min;
|
||||
return web3.toHex(randInt);
|
||||
};
|
||||
|
||||
context('shh/5 API verification', function () {
|
||||
it('statusd node is running', function () {
|
||||
var web3 = new Web3();
|
||||
var provider = new web3.providers.HttpProvider('http://localhost:8645');
|
||||
var result = provider.send({});
|
||||
assert.equal(typeof result, 'object');
|
||||
});
|
||||
|
||||
it('shh.version()', function () {
|
||||
var version = node1.shh.version();
|
||||
assert.equal(version, '0x5', 'Whisper version does not match');
|
||||
});
|
||||
|
||||
it('shh.info()', function () {
|
||||
var info = node1.shh.info();
|
||||
if (info == "") {
|
||||
throw new Error('no Whisper info provided')
|
||||
}
|
||||
});
|
||||
|
||||
context('symmetric key management', function () {
|
||||
var keyId = ''; // symmetric key ID (to be populated)
|
||||
var keyVal = ''; // symmetric key value (to be populated)
|
||||
|
||||
it('shh.generateSymmetricKey()', function () {
|
||||
keyId = node1.shh.generateSymmetricKey();
|
||||
assert.lengthOf(keyId, 64, 'invalid keyId length');
|
||||
});
|
||||
|
||||
it('shh.getSymmetricKey(keyId)', function () {
|
||||
keyVal = node1.shh.getSymmetricKey(keyId);
|
||||
assert.lengthOf(keyVal, 66, 'invalid key value length'); // 2 bytes for "0x"
|
||||
});
|
||||
|
||||
it('shh.hasSymmetricKey(keyId)', function () {
|
||||
expect(node1.shh.hasSymmetricKey(keyId)).to.equal(true);
|
||||
});
|
||||
|
||||
it('shh.deleteSymmetricKey(keyId)', function () {
|
||||
expect(node1.shh.hasSymmetricKey(keyId)).to.equal(true);
|
||||
node1.shh.deleteSymmetricKey(keyId);
|
||||
expect(node1.shh.hasSymmetricKey(keyId)).to.equal(false);
|
||||
});
|
||||
|
||||
it('shh.addSymmetricKeyDirect(keyVal)', function () {
|
||||
keyIdOriginal = keyId;
|
||||
keyId = node1.shh.addSymmetricKeyDirect(keyVal);
|
||||
assert.notEqual(keyId, keyIdOriginal);
|
||||
assert.lengthOf(keyId, 64, 'invalid keyId length');
|
||||
expect(node1.shh.hasSymmetricKey(keyId)).to.equal(true);
|
||||
});
|
||||
|
||||
it('shh.addSymmetricKeyFromPassword(password)', function () {
|
||||
var password = 'foobar';
|
||||
var keyId = node1.shh.addSymmetricKeyFromPassword(password);
|
||||
var keyVal = node1.shh.getSymmetricKey(keyId);
|
||||
|
||||
assert.lengthOf(keyId, 64, 'invalid keyId length');
|
||||
expect(node1.shh.hasSymmetricKey(keyId)).to.equal(true);
|
||||
assert.equal(keyVal, '0xa582720d74d463589df14c11538189a1c07778c47e86f70bab7b5ba27e2de3cc');
|
||||
});
|
||||
});
|
||||
|
||||
context('assymmetric key management', function () {
|
||||
var keyId = ''; // to be populated
|
||||
var pubKey = ''; // to be populated
|
||||
|
||||
it('shh.newKeyPair()', function () {
|
||||
keyId = node1.shh.newKeyPair();
|
||||
assert.lengthOf(keyId, 64);
|
||||
});
|
||||
|
||||
it('shh.hasKeyPair(id)', function () {
|
||||
expect(node1.shh.hasKeyPair(keyId)).to.equal(true);
|
||||
});
|
||||
|
||||
it('shh.getPublicKey(id)', function () {
|
||||
pubKey = node1.shh.getPublicKey(keyId);
|
||||
assert.lengthOf(pubKey, 132);
|
||||
});
|
||||
|
||||
it('shh.hasKeyPair(pubKey)', function () {
|
||||
expect(node1.shh.hasKeyPair(pubKey)).to.equal(true);
|
||||
});
|
||||
|
||||
it('shh.getPrivateKey(id)', function () {
|
||||
var prvkey = node1.shh.getPrivateKey(keyId);
|
||||
assert.lengthOf(prvkey, 66);
|
||||
});
|
||||
|
||||
it('shh.deleteKeyPair(id)', function () {
|
||||
expect(node1.shh.hasKeyPair(pubKey)).to.equal(true);
|
||||
expect(node1.shh.hasKeyPair(keyId)).to.equal(true);
|
||||
node1.shh.deleteKeyPair(keyId);
|
||||
expect(node1.shh.hasKeyPair(pubKey)).to.equal(false);
|
||||
expect(node1.shh.hasKeyPair(keyId)).to.equal(false);
|
||||
|
||||
// re-create
|
||||
keyId = node1.shh.newKeyPair();
|
||||
assert.lengthOf(keyId, 64);
|
||||
pubKey = node1.shh.getPublicKey(keyId);
|
||||
assert.lengthOf(pubKey, 132);
|
||||
});
|
||||
|
||||
it('shh.deleteKeyPair(pubKey)', function () {
|
||||
expect(node1.shh.hasKeyPair(pubKey)).to.equal(true);
|
||||
expect(node1.shh.hasKeyPair(keyId)).to.equal(true);
|
||||
node1.shh.deleteKeyPair(pubKey);
|
||||
expect(node1.shh.hasKeyPair(pubKey)).to.equal(false);
|
||||
expect(node1.shh.hasKeyPair(keyId)).to.equal(false);
|
||||
|
||||
// re-create
|
||||
keyId = node1.shh.newKeyPair();
|
||||
assert.lengthOf(keyId, 64);
|
||||
pubKey = node1.shh.getPublicKey(keyId);
|
||||
assert.lengthOf(pubKey, 132);
|
||||
});
|
||||
});
|
||||
|
||||
context('subscribe and manually get messages', function () {
|
||||
// NOTE: you can still use shh.filter to poll for messages automatically, see other examples
|
||||
|
||||
var filterid1 = ''; // sym filter, to be populated
|
||||
var filterid2 = ''; // asym filter, to be populated
|
||||
var keyId = ''; // symkey, to be populated
|
||||
var uniqueTopic = makeTopic();
|
||||
|
||||
var payloadBeforeSymFilter = 'sent before filter was active (symmetric)';
|
||||
var payloadAfterSymFilter = 'sent after filter was active (symmetric)';
|
||||
var payloadBeforeAsymFilter = 'sent before filter was active (asymmetric)';
|
||||
var payloadAfterAsymFilter = 'sent after filter was active (asymmetric)';
|
||||
|
||||
it('shh.subscribe(filterParams) - symmetric filter', function () {
|
||||
keyId = node1.shh.generateSymmetricKey();
|
||||
assert.lengthOf(keyId, 64);
|
||||
|
||||
// send message, which will be floating around *before* filter is even created
|
||||
var message = {
|
||||
type: "sym",
|
||||
key: keyId,
|
||||
topic: uniqueTopic,
|
||||
payload: payloadBeforeSymFilter
|
||||
};
|
||||
expect(node1.shh.post(message)).to.equal(null);
|
||||
|
||||
// symmetric filter
|
||||
filterid1 = node1.shh.subscribe({
|
||||
type: "sym",
|
||||
key: keyId,
|
||||
sig: identity1,
|
||||
topics: [topic1, topic2, uniqueTopic]
|
||||
});
|
||||
assert.lengthOf(filterid1, 64);
|
||||
});
|
||||
|
||||
it('shh.subscribe(filterParams) - asymmetric filter', function () {
|
||||
// send message, which will be floating around *before* filter is even created
|
||||
var message = {
|
||||
type: "asym",
|
||||
key: identity2,
|
||||
topic: uniqueTopic,
|
||||
payload: payloadBeforeAsymFilter
|
||||
};
|
||||
expect(node1.shh.post(message)).to.equal(null);
|
||||
|
||||
// asymmetric filter
|
||||
filterid2 = node1.shh.subscribe({
|
||||
type: "asym",
|
||||
key: identity2,
|
||||
sig: identity1,
|
||||
topics: [topic1, topic2, uniqueTopic]
|
||||
});
|
||||
assert.lengthOf(filterid1, 64);
|
||||
});
|
||||
|
||||
it('shh.getMessages(filterID) - symmetric filter', function () {
|
||||
// let's try to capture message that was there *before* filter is created
|
||||
var messages = node1.shh.getMessages(filterid1);
|
||||
assert.typeOf(messages, 'array');
|
||||
assert.lengthOf(messages, 1);
|
||||
assert.equal(web3.toAscii(messages[0].payload), payloadBeforeSymFilter);
|
||||
|
||||
// send message, after the filter has been already installed
|
||||
var message = {
|
||||
type: "sym",
|
||||
key: keyId,
|
||||
topic: uniqueTopic,
|
||||
payload: payloadAfterSymFilter
|
||||
};
|
||||
expect(node1.shh.post(message)).to.equal(null);
|
||||
});
|
||||
|
||||
it('shh.getMessages(filterID) - asymmetric filter', function () {
|
||||
// let's try to capture message that was there *before* filter is created
|
||||
var messages = node1.shh.getMessages(filterid2);
|
||||
assert.typeOf(messages, 'array');
|
||||
assert.lengthOf(messages, 1);
|
||||
assert.equal(web3.toAscii(messages[0].payload), payloadBeforeAsymFilter);
|
||||
|
||||
// send message, after the filter has been already installed
|
||||
var message = {
|
||||
type: "asym",
|
||||
key: identity2,
|
||||
topic: uniqueTopic,
|
||||
payload: payloadAfterAsymFilter
|
||||
};
|
||||
expect(node1.shh.post(message)).to.equal(null);
|
||||
});
|
||||
|
||||
it('shh.getSubscriptionMessages(filterID) - symmetric filter', function (done) {
|
||||
// allow some time for message to propagate
|
||||
setTimeout(function () {
|
||||
// now let's try to capture new messages from our last capture
|
||||
var messages = node1.shh.getSubscriptionMessages(filterid1);
|
||||
assert.typeOf(messages, 'array');
|
||||
assert.lengthOf(messages, 1);
|
||||
assert.equal(web3.toAscii(messages[0].payload), payloadAfterSymFilter);
|
||||
|
||||
// no more messages should be returned
|
||||
messages = node1.shh.getSubscriptionMessages(filterid1);
|
||||
assert.typeOf(messages, 'array');
|
||||
assert.lengthOf(messages, 0);
|
||||
|
||||
done();
|
||||
}, 200);
|
||||
});
|
||||
|
||||
it('shh.getSubscriptionMessages(filterID) - asymmetric filter', function () {
|
||||
// allow some time for message to propagate
|
||||
setTimeout(function () {
|
||||
// now let's try to capture new messages from our last capture
|
||||
var messages = node1.shh.getSubscriptionMessages(filterid2);
|
||||
assert.typeOf(messages, 'array');
|
||||
assert.lengthOf(messages, 1);
|
||||
assert.equal(web3.toAscii(messages[0].payload), payloadAfterAsymFilter);
|
||||
|
||||
// no more messages should be returned
|
||||
messages = node1.shh.getSubscriptionMessages(filterid2);
|
||||
assert.typeOf(messages, 'array');
|
||||
assert.lengthOf(messages, 0);
|
||||
|
||||
done();
|
||||
}, 200);
|
||||
});
|
||||
|
||||
it('shh.unsubscribe(filterID)', function () {
|
||||
node1.shh.unsubscribe(filterid1);
|
||||
node1.shh.unsubscribe(filterid2);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
context('symmetrically encrypted messages send/recieve', function () {
|
||||
this.timeout(0);
|
||||
|
||||
var keyId = ''; // symmetric key ID (to be populated)
|
||||
var keyVal = ''; // symmetric key value (to be populated)
|
||||
var payload = 'here come the dragons';
|
||||
|
||||
it('default test identity is present', function () {
|
||||
if (!node1.shh.hasKeyPair(identity1)) {
|
||||
throw new Error('identity not found in whisper: ' + identity1);
|
||||
}
|
||||
});
|
||||
|
||||
it('ensure symkey exists', function () {
|
||||
keyId = node1.shh.generateSymmetricKey();
|
||||
assert.lengthOf(keyId, 64);
|
||||
expect(node1.shh.hasSymmetricKey(keyId)).to.equal(true);
|
||||
});
|
||||
|
||||
it('read the generated symkey', function () {
|
||||
keyVal = node1.shh.getSymmetricKey(keyId);
|
||||
assert.lengthOf(keyVal, 66); // 2 bytes for "0x"
|
||||
});
|
||||
|
||||
it('send/receive symmetrically encrypted message', function (done) {
|
||||
// start watching for messages
|
||||
watchFilter(node1.shh.filter({
|
||||
type: "sym",
|
||||
key: keyId,
|
||||
sig: identity1,
|
||||
topics: [topic1, topic2]
|
||||
}), function (err, message) {
|
||||
done(err);
|
||||
});
|
||||
|
||||
// send message
|
||||
var message = {
|
||||
type: "sym",
|
||||
key: keyId,
|
||||
sig: identity1,
|
||||
topic: topic1,
|
||||
payload: web3.fromAscii(payload),
|
||||
ttl: 20,
|
||||
powTime: 2,
|
||||
powTarget: 0.001
|
||||
};
|
||||
expect(node1.shh.post(message)).to.equal(null);
|
||||
});
|
||||
|
||||
it('send the minimal symmetric message possible', function (done) {
|
||||
var uniqueTopic = makeTopic();
|
||||
|
||||
// start watching for messages
|
||||
watchFilter(node1.shh.filter({
|
||||
type: "sym",
|
||||
key: keyId,
|
||||
topics: [uniqueTopic]
|
||||
}), function (err, message) {
|
||||
done(err);
|
||||
});
|
||||
|
||||
// send message
|
||||
var message = {
|
||||
type: "sym",
|
||||
key: keyId,
|
||||
topic: uniqueTopic
|
||||
};
|
||||
expect(node1.shh.post(message)).to.equal(null);
|
||||
});
|
||||
});
|
||||
|
||||
context('message travelling from one node to another', function () {
|
||||
this.timeout(0);
|
||||
|
||||
var keyId1 = ''; // symmetric key ID on node 1 (to be populated)
|
||||
var keyId2 = ''; // symmetric key ID on node 2 (to be populated)
|
||||
|
||||
it('statusd node1 is running', function () {
|
||||
var web3 = new Web3();
|
||||
var provider = new web3.providers.HttpProvider('http://localhost:8645');
|
||||
var result = provider.send({});
|
||||
assert.equal(typeof result, 'object');
|
||||
});
|
||||
|
||||
it('statusd node2 is running', function () {
|
||||
var web3 = new Web3();
|
||||
var provider = new web3.providers.HttpProvider('http://localhost:8745');
|
||||
var result = provider.send({});
|
||||
assert.equal(typeof result, 'object');
|
||||
});
|
||||
|
||||
it('test identities injected', function () {
|
||||
if (!node1.shh.hasKeyPair(identity1)) {
|
||||
throw new Error('identity not found in whisper (node1): ' + identity1);
|
||||
}
|
||||
if (!node1.shh.hasKeyPair(identity2)) {
|
||||
throw new Error('identity not found in whisper (node1): ' + identity2);
|
||||
}
|
||||
if (!node2.shh.hasKeyPair(identity1)) {
|
||||
throw new Error('identity not found in whisper (node2): ' + identity1);
|
||||
}
|
||||
if (!node2.shh.hasKeyPair(identity2)) {
|
||||
throw new Error('identity not found in whisper (node2): ' + identity2);
|
||||
}
|
||||
});
|
||||
|
||||
it('ensure symkey exists', function () {
|
||||
keyId1 = node1.shh.generateSymmetricKey();
|
||||
assert.lengthOf(keyId1, 64);
|
||||
expect(node1.shh.hasSymmetricKey(keyId1)).to.equal(true);
|
||||
|
||||
// obtain key value
|
||||
var keyVal = node1.shh.getSymmetricKey(keyId1);
|
||||
assert.lengthOf(keyVal, 66); // 2 bytes of "0x"
|
||||
|
||||
// share the value with the node2
|
||||
keyId2 = node2.shh.addSymmetricKeyDirect(keyVal);
|
||||
assert.lengthOf(keyId2, 64);
|
||||
expect(node2.shh.hasSymmetricKey(keyId2)).to.equal(true);
|
||||
});
|
||||
|
||||
it('send symmetrically encrypted, signed message (node1 -> node2)', function (done) {
|
||||
var payload = 'send symmetrically encrypted, signed message (node1 -> node2)';
|
||||
var topic = makeTopic();
|
||||
// start watching for messages
|
||||
watchFilter(node2.shh.filter({
|
||||
type: "sym",
|
||||
sig: identity1,
|
||||
key: keyId2,
|
||||
topics: [topic]
|
||||
}), function (err, message) {
|
||||
done(err);
|
||||
});
|
||||
|
||||
// send message
|
||||
var message = {
|
||||
type: "sym",
|
||||
sig: identity1,
|
||||
key: keyId1,
|
||||
topic: topic,
|
||||
payload: payload,
|
||||
ttl: 20
|
||||
};
|
||||
expect(node1.shh.post(message)).to.equal(null);
|
||||
});
|
||||
|
||||
it('send asymmetrically encrypted, signed message (node1.id1 -> node2.id2)', function (done) {
|
||||
var payload = 'send asymmetrically encrypted, signed message (node1.id1 -> node2.id2)';
|
||||
var topic = makeTopic();
|
||||
// start watching for messages
|
||||
watchFilter(node2.shh.filter({
|
||||
type: "asym",
|
||||
sig: identity1,
|
||||
key: identity2
|
||||
}), function (err, message) {
|
||||
done(err);
|
||||
});
|
||||
|
||||
// send message
|
||||
var message = {
|
||||
type: "asym",
|
||||
sig: identity1,
|
||||
key: identity2,
|
||||
topic: topic,
|
||||
payload: payload,
|
||||
ttl: 20
|
||||
};
|
||||
expect(node1.shh.post(message)).to.equal(null);
|
||||
});
|
||||
});
|
||||
|
||||
context('push notifications', function () {
|
||||
this.timeout(5000);
|
||||
var discoveryPubKey = '0x040edb0d71a3dbe928e154fcb696ffbda359b153a90efc2b46f0043ce9f5dbe55b77b9328fd841a1db5273758624afadd5b39638d4c35b36b3a96e1a586c1b4c2a';
|
||||
var discoverServerTopic = '0x268302f3'; // DISCOVER_NOTIFICATION_SERVER
|
||||
var proposeServerTopic = '0x08e3d8c0'; // PROPOSE_NOTIFICATION_SERVER
|
||||
var acceptServerTopic = '0x04f7dea6'; // ACCEPT_NOTIFICATION_SERVER
|
||||
var ackClientSubscriptionTopic = '0x93dafe28'; // ACK_NOTIFICATION_SERVER_SUBSCRIPTION
|
||||
var sendNotificationTopic = '0x69915296'; // SEND_NOTIFICATION
|
||||
var newChatSessionTopic = '0x509579a2'; // NEW_CHAT_SESSION
|
||||
var ackNewChatSessionTopic = '0xd012aae8'; // ACK_NEW_CHAT_SESSION
|
||||
var newDeviceRegistrationTopic = '0x14621a51'; // NEW_DEVICE_REGISTRATION
|
||||
var ackDeviceRegistrationTopic = '0x424358d6'; // ACK_DEVICE_REGISTRATION
|
||||
var checkClientSessionTopic = '0x8745d931'; // CHECK_CLIENT_SESSION
|
||||
var confirmClientSessionTopic = '0xd3202c5f'; // CONFIRM_CLIENT_SESSION
|
||||
var dropClientSessionTopic = '0x3a6656bb'; // DROP_CLIENT_SESSION
|
||||
|
||||
// ensures that message had payload (which is HEX-encoded JSON)
|
||||
var extractPayload = function (message) {
|
||||
expect(message).to.have.property('payload');
|
||||
return JSON.parse(web3.toAscii(message.payload));
|
||||
};
|
||||
|
||||
var identity1 = ''; // pub key of device 1
|
||||
var identity2 = ''; // pub key of device 2
|
||||
var chatKeySharingTopic = makeTopic(); // topic used by device1 to send chat key to device 2
|
||||
|
||||
context('prepare devices', function () {
|
||||
it('create key pair to be used as main identity on device1', function () {
|
||||
var keyId = node1.shh.newKeyPair();
|
||||
assert.lengthOf(keyId, 64);
|
||||
|
||||
identity1 = node1.shh.getPublicKey(keyId);
|
||||
assert.lengthOf(identity1, 132);
|
||||
|
||||
expect(node1.shh.hasKeyPair(identity1)).to.equal(true);
|
||||
expect(node1.shh.hasKeyPair(identity2)).to.equal(false);
|
||||
});
|
||||
|
||||
it('create key pair to be used as main identity on device2', function () {
|
||||
var keyId = node2.shh.newKeyPair();
|
||||
assert.lengthOf(keyId, 64);
|
||||
|
||||
identity2 = node2.shh.getPublicKey(keyId);
|
||||
assert.lengthOf(identity1, 132);
|
||||
|
||||
expect(node2.shh.hasKeyPair(identity1)).to.equal(false);
|
||||
expect(node2.shh.hasKeyPair(identity2)).to.equal(true);
|
||||
});
|
||||
});
|
||||
|
||||
context('run device1', function () {
|
||||
var serverId = ''; // accepted/selected server id
|
||||
var subscriptionKeyId = ''; // symkey provided by server, and used to configure client-server subscription
|
||||
var chatKeyId = ''; // symkey provided by server, and shared among clients so that they can trigger notifications
|
||||
var appChatId = ''; // chat id that identifies device1-device2 interaction session on RN app level
|
||||
|
||||
|
||||
it('start discovery by sending discovery request', function () {
|
||||
var message = {
|
||||
type: "asym",
|
||||
sig: identity1,
|
||||
key: discoveryPubKey,
|
||||
topic: discoverServerTopic,
|
||||
ttl: 20
|
||||
};
|
||||
expect(node1.shh.post(message)).to.equal(null);
|
||||
});
|
||||
|
||||
it('watch for server proposals', function (done) {
|
||||
watchFilter(node1.shh.filter({
|
||||
type: "asym",
|
||||
sig: discoveryPubKey,
|
||||
key: identity1,
|
||||
topics: [proposeServerTopic]
|
||||
}), function (err, message) {
|
||||
if (err) return done(err);
|
||||
|
||||
// process payload
|
||||
var payload = extractPayload(message);
|
||||
expect(payload).to.have.property('server');
|
||||
serverId = payload.server;
|
||||
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('client accepts server', function () {
|
||||
var message = {
|
||||
type: "asym",
|
||||
sig: identity1,
|
||||
key: discoveryPubKey,
|
||||
topic: acceptServerTopic,
|
||||
payload: '{"server": "' + serverId + '"}',
|
||||
ttl: 20
|
||||
};
|
||||
expect(node1.shh.post(message)).to.equal(null);
|
||||
});
|
||||
|
||||
it('watch for server ACK response and save provided subscription key', function (done) {
|
||||
watchFilter(node1.shh.filter({
|
||||
type: "asym",
|
||||
key: identity1,
|
||||
topics: [ackClientSubscriptionTopic]
|
||||
}), function (err, message) {
|
||||
if (err) return done(err);
|
||||
|
||||
// process payload
|
||||
var payload = extractPayload(message);
|
||||
expect(payload).to.have.property('server');
|
||||
expect(payload).to.have.property('key');
|
||||
|
||||
// save subscription key
|
||||
subscriptionKeyId = node1.shh.addSymmetricKeyDirect(payload.key);
|
||||
assert.lengthOf(subscriptionKeyId, 64);
|
||||
expect(node1.shh.hasSymmetricKey(subscriptionKeyId)).to.equal(true);
|
||||
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('create chat session', function () {
|
||||
appChatId = makeTopic(); // globally unique chat id
|
||||
var message = {
|
||||
type: "sym",
|
||||
sig: identity1,
|
||||
key: subscriptionKeyId,
|
||||
topic: newChatSessionTopic,
|
||||
payload: '{"chat": "' + appChatId + '"}',
|
||||
ttl: 20
|
||||
};
|
||||
expect(node1.shh.post(message)).to.equal(null);
|
||||
});
|
||||
|
||||
it('watch for server to respond with chat key', function (done) {
|
||||
watchFilter(node1.shh.filter({
|
||||
type: "asym",
|
||||
key: identity1,
|
||||
topics: [ackNewChatSessionTopic]
|
||||
}), function (err, message) {
|
||||
if (err) return done(err);
|
||||
|
||||
// process payload
|
||||
var payload = extractPayload(message);
|
||||
expect(payload).to.have.property('server');
|
||||
expect(payload).to.have.property('key');
|
||||
|
||||
// save subscription key
|
||||
chatKeyId = node1.shh.addSymmetricKeyDirect(payload.key);
|
||||
assert.lengthOf(chatKeyId, 64);
|
||||
expect(node1.shh.hasSymmetricKey(chatKeyId)).to.equal(true);
|
||||
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('register device with a given chat', function (done) {
|
||||
// this obtained from https://status-sandbox-c1b34.firebaseapp.com/
|
||||
var deviceId = 'ca5pRJc6L8s:APA91bHpYFtpxvXx6uOayGmnNVnktA4PEEZdquCCt3fWR5ldLzSy1A37Tsbzk5Gavlmk1d_fvHRVnK7xPAhFFl-erF7O87DnIEstW6DEyhyiKZYA4dXFh6uy323f9A3uw5hEtT_kQVhT';
|
||||
var message = {
|
||||
type: "sym",
|
||||
sig: identity1,
|
||||
key: chatKeyId,
|
||||
topic: newDeviceRegistrationTopic,
|
||||
payload: '{"device": "' + deviceId + '"}',
|
||||
ttl: 20
|
||||
};
|
||||
expect(node1.shh.post(message)).to.equal(null);
|
||||
|
||||
// watch for server server ACK
|
||||
watchFilter(node1.shh.filter({
|
||||
type: "asym",
|
||||
key: identity1,
|
||||
topics: [ackDeviceRegistrationTopic]
|
||||
}), function (err, message) {
|
||||
if (err) return done(err);
|
||||
|
||||
// process payload
|
||||
var payload = extractPayload(message);
|
||||
expect(payload).to.have.property('server');
|
||||
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('share chat key, so that another device can send us notifications', function () {
|
||||
var chatKey = node1.shh.getSymmetricKey(chatKeyId);
|
||||
assert.lengthOf(chatKey, 66);
|
||||
var message = {
|
||||
type: "asym",
|
||||
sig: identity1,
|
||||
key: identity2,
|
||||
topic: chatKeySharingTopic,
|
||||
payload: '{"chat": "' + appChatId + '", "key": "' + chatKey + '"}',
|
||||
ttl: 20
|
||||
};
|
||||
expect(node1.shh.post(message)).to.equal(null);
|
||||
});
|
||||
});
|
||||
|
||||
context('run device2', function () {
|
||||
var chatKeyId = '';
|
||||
|
||||
it('watch for device1 to send us chat key', function (done) {
|
||||
watchFilter(node2.shh.filter({
|
||||
type: "asym",
|
||||
key: identity2,
|
||||
topics: [chatKeySharingTopic]
|
||||
}), function (err, message) {
|
||||
if (err) return done(err);
|
||||
|
||||
// process payload
|
||||
var payload = extractPayload(message);
|
||||
expect(payload).to.have.property('chat');
|
||||
expect(payload).to.have.property('key');
|
||||
|
||||
// persist chat key
|
||||
chatKeyId = node2.shh.addSymmetricKeyDirect(payload.key);
|
||||
assert.lengthOf(chatKeyId, 64);
|
||||
expect(node2.shh.hasSymmetricKey(chatKeyId)).to.equal(true);
|
||||
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('trigger notification (from device2, on device1)', function () {
|
||||
var message = {
|
||||
type: "sym",
|
||||
sig: identity2,
|
||||
key: chatKeyId,
|
||||
topic: sendNotificationTopic,
|
||||
payload: '{' // see https://firebase.google.com/docs/cloud-messaging/http-server-ref
|
||||
+ '"notification": {'
|
||||
+ '"title": "status.im notification",'
|
||||
+ '"body": "Hello this is test notification!",'
|
||||
+ '"icon": "https://status.im/img/logo.png",'
|
||||
+ '"click_action": "https://status.im"'
|
||||
+ '},'
|
||||
+ '"to": "{{ ID }}"' // this get replaced by device id your've registered
|
||||
+ '}',
|
||||
ttl: 20
|
||||
};
|
||||
expect(node2.shh.post(message)).to.equal(null);
|
||||
});
|
||||
});
|
||||
|
||||
context('misc methods and cleanup', function () {
|
||||
|
||||
it('check client session', function (done) {
|
||||
// request status
|
||||
var message = {
|
||||
type: "asym",
|
||||
sig: identity1,
|
||||
key: discoveryPubKey,
|
||||
topic: checkClientSessionTopic,
|
||||
ttl: 20
|
||||
};
|
||||
expect(node1.shh.post(message)).to.equal(null);
|
||||
|
||||
// process server's response
|
||||
watchFilter(node1.shh.filter({
|
||||
type: "asym",
|
||||
key: identity1,
|
||||
topics: [confirmClientSessionTopic]
|
||||
}), function (err, message) {
|
||||
if (err) return done(err);
|
||||
|
||||
// process payload
|
||||
var payload = extractPayload(message);
|
||||
expect(payload).to.have.property('server');
|
||||
expect(payload).to.have.property('key');
|
||||
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('remove client session', function () {
|
||||
var message = {
|
||||
type: "asym",
|
||||
sig: identity1,
|
||||
key: discoveryPubKey,
|
||||
topic: dropClientSessionTopic,
|
||||
ttl: 20
|
||||
};
|
||||
expect(node1.shh.post(message)).to.equal(null);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
|
@ -0,0 +1,25 @@
|
|||
Copyright (c) 2011, Evan Shaw <edsrzf@gmail.com>
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
* Neither the name of the copyright holder nor the
|
||||
names of its contributors may be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
|
||||
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
mmap-go
|
||||
=======
|
||||
|
||||
mmap-go is a portable mmap package for the [Go programming language](http://golang.org).
|
||||
It has been tested on Linux (386, amd64), OS X, and Windows (386). It should also
|
||||
work on other Unix-like platforms, but hasn't been tested with them. I'm interested
|
||||
to hear about the results.
|
||||
|
||||
I haven't been able to add more features without adding significant complexity,
|
||||
so mmap-go doesn't support mprotect, mincore, and maybe a few other things.
|
||||
If you're running on a Unix-like platform and need some of these features,
|
||||
I suggest Gustavo Niemeyer's [gommap](http://labix.org/gommap).
|
|
@ -0,0 +1,116 @@
|
|||
// Copyright 2011 Evan Shaw. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file defines the common package interface and contains a little bit of
|
||||
// factored out logic.
|
||||
|
||||
// Package mmap allows mapping files into memory. It tries to provide a simple, reasonably portable interface,
|
||||
// but doesn't go out of its way to abstract away every little platform detail.
|
||||
// This specifically means:
|
||||
// * forked processes may or may not inherit mappings
|
||||
// * a file's timestamp may or may not be updated by writes through mappings
|
||||
// * specifying a size larger than the file's actual size can increase the file's size
|
||||
// * If the mapped file is being modified by another process while your program's running, don't expect consistent results between platforms
|
||||
package mmap
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// RDONLY maps the memory read-only.
|
||||
// Attempts to write to the MMap object will result in undefined behavior.
|
||||
RDONLY = 0
|
||||
// RDWR maps the memory as read-write. Writes to the MMap object will update the
|
||||
// underlying file.
|
||||
RDWR = 1 << iota
|
||||
// COPY maps the memory as copy-on-write. Writes to the MMap object will affect
|
||||
// memory, but the underlying file will remain unchanged.
|
||||
COPY
|
||||
// If EXEC is set, the mapped memory is marked as executable.
|
||||
EXEC
|
||||
)
|
||||
|
||||
const (
|
||||
// If the ANON flag is set, the mapped memory will not be backed by a file.
|
||||
ANON = 1 << iota
|
||||
)
|
||||
|
||||
// MMap represents a file mapped into memory.
|
||||
type MMap []byte
|
||||
|
||||
// Map maps an entire file into memory.
|
||||
// If ANON is set in flags, f is ignored.
|
||||
func Map(f *os.File, prot, flags int) (MMap, error) {
|
||||
return MapRegion(f, -1, prot, flags, 0)
|
||||
}
|
||||
|
||||
// MapRegion maps part of a file into memory.
|
||||
// The offset parameter must be a multiple of the system's page size.
|
||||
// If length < 0, the entire file will be mapped.
|
||||
// If ANON is set in flags, f is ignored.
|
||||
func MapRegion(f *os.File, length int, prot, flags int, offset int64) (MMap, error) {
|
||||
if offset%int64(os.Getpagesize()) != 0 {
|
||||
return nil, errors.New("offset parameter must be a multiple of the system's page size")
|
||||
}
|
||||
|
||||
var fd uintptr
|
||||
if flags&ANON == 0 {
|
||||
fd = uintptr(f.Fd())
|
||||
if length < 0 {
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
length = int(fi.Size())
|
||||
}
|
||||
} else {
|
||||
if length <= 0 {
|
||||
return nil, errors.New("anonymous mapping requires non-zero length")
|
||||
}
|
||||
fd = ^uintptr(0)
|
||||
}
|
||||
return mmap(length, uintptr(prot), uintptr(flags), fd, offset)
|
||||
}
|
||||
|
||||
func (m *MMap) header() *reflect.SliceHeader {
|
||||
return (*reflect.SliceHeader)(unsafe.Pointer(m))
|
||||
}
|
||||
|
||||
// Lock keeps the mapped region in physical memory, ensuring that it will not be
|
||||
// swapped out.
|
||||
func (m MMap) Lock() error {
|
||||
dh := m.header()
|
||||
return lock(dh.Data, uintptr(dh.Len))
|
||||
}
|
||||
|
||||
// Unlock reverses the effect of Lock, allowing the mapped region to potentially
|
||||
// be swapped out.
|
||||
// If m is already unlocked, aan error will result.
|
||||
func (m MMap) Unlock() error {
|
||||
dh := m.header()
|
||||
return unlock(dh.Data, uintptr(dh.Len))
|
||||
}
|
||||
|
||||
// Flush synchronizes the mapping's contents to the file's contents on disk.
|
||||
func (m MMap) Flush() error {
|
||||
dh := m.header()
|
||||
return flush(dh.Data, uintptr(dh.Len))
|
||||
}
|
||||
|
||||
// Unmap deletes the memory mapped region, flushes any remaining changes, and sets
|
||||
// m to nil.
|
||||
// Trying to read or write any remaining references to m after Unmap is called will
|
||||
// result in undefined behavior.
|
||||
// Unmap should only be called on the slice value that was originally returned from
|
||||
// a call to Map. Calling Unmap on a derived slice may cause errors.
|
||||
func (m *MMap) Unmap() error {
|
||||
dh := m.header()
|
||||
err := unmap(dh.Data, uintptr(dh.Len))
|
||||
*m = nil
|
||||
return err
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
// Copyright 2011 Evan Shaw. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin dragonfly freebsd linux openbsd solaris netbsd
|
||||
|
||||
package mmap
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func mmap(len int, inprot, inflags, fd uintptr, off int64) ([]byte, error) {
|
||||
flags := syscall.MAP_SHARED
|
||||
prot := syscall.PROT_READ
|
||||
switch {
|
||||
case inprot© != 0:
|
||||
prot |= syscall.PROT_WRITE
|
||||
flags = syscall.MAP_PRIVATE
|
||||
case inprot&RDWR != 0:
|
||||
prot |= syscall.PROT_WRITE
|
||||
}
|
||||
if inprot&EXEC != 0 {
|
||||
prot |= syscall.PROT_EXEC
|
||||
}
|
||||
if inflags&ANON != 0 {
|
||||
flags |= syscall.MAP_ANON
|
||||
}
|
||||
|
||||
b, err := syscall.Mmap(int(fd), off, len, prot, flags)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func flush(addr, len uintptr) error {
|
||||
_, _, errno := syscall.Syscall(_SYS_MSYNC, addr, len, _MS_SYNC)
|
||||
if errno != 0 {
|
||||
return syscall.Errno(errno)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func lock(addr, len uintptr) error {
|
||||
_, _, errno := syscall.Syscall(syscall.SYS_MLOCK, addr, len, 0)
|
||||
if errno != 0 {
|
||||
return syscall.Errno(errno)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unlock(addr, len uintptr) error {
|
||||
_, _, errno := syscall.Syscall(syscall.SYS_MUNLOCK, addr, len, 0)
|
||||
if errno != 0 {
|
||||
return syscall.Errno(errno)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmap(addr, len uintptr) error {
|
||||
_, _, errno := syscall.Syscall(syscall.SYS_MUNMAP, addr, len, 0)
|
||||
if errno != 0 {
|
||||
return syscall.Errno(errno)
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,125 @@
|
|||
// Copyright 2011 Evan Shaw. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package mmap
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// mmap on Windows is a two-step process.
|
||||
// First, we call CreateFileMapping to get a handle.
|
||||
// Then, we call MapviewToFile to get an actual pointer into memory.
|
||||
// Because we want to emulate a POSIX-style mmap, we don't want to expose
|
||||
// the handle -- only the pointer. We also want to return only a byte slice,
|
||||
// not a struct, so it's convenient to manipulate.
|
||||
|
||||
// We keep this map so that we can get back the original handle from the memory address.
|
||||
var handleLock sync.Mutex
|
||||
var handleMap = map[uintptr]syscall.Handle{}
|
||||
|
||||
func mmap(len int, prot, flags, hfile uintptr, off int64) ([]byte, error) {
|
||||
flProtect := uint32(syscall.PAGE_READONLY)
|
||||
dwDesiredAccess := uint32(syscall.FILE_MAP_READ)
|
||||
switch {
|
||||
case prot© != 0:
|
||||
flProtect = syscall.PAGE_WRITECOPY
|
||||
dwDesiredAccess = syscall.FILE_MAP_COPY
|
||||
case prot&RDWR != 0:
|
||||
flProtect = syscall.PAGE_READWRITE
|
||||
dwDesiredAccess = syscall.FILE_MAP_WRITE
|
||||
}
|
||||
if prot&EXEC != 0 {
|
||||
flProtect <<= 4
|
||||
dwDesiredAccess |= syscall.FILE_MAP_EXECUTE
|
||||
}
|
||||
|
||||
// The maximum size is the area of the file, starting from 0,
|
||||
// that we wish to allow to be mappable. It is the sum of
|
||||
// the length the user requested, plus the offset where that length
|
||||
// is starting from. This does not map the data into memory.
|
||||
maxSizeHigh := uint32((off + int64(len)) >> 32)
|
||||
maxSizeLow := uint32((off + int64(len)) & 0xFFFFFFFF)
|
||||
// TODO: Do we need to set some security attributes? It might help portability.
|
||||
h, errno := syscall.CreateFileMapping(syscall.Handle(hfile), nil, flProtect, maxSizeHigh, maxSizeLow, nil)
|
||||
if h == 0 {
|
||||
return nil, os.NewSyscallError("CreateFileMapping", errno)
|
||||
}
|
||||
|
||||
// Actually map a view of the data into memory. The view's size
|
||||
// is the length the user requested.
|
||||
fileOffsetHigh := uint32(off >> 32)
|
||||
fileOffsetLow := uint32(off & 0xFFFFFFFF)
|
||||
addr, errno := syscall.MapViewOfFile(h, dwDesiredAccess, fileOffsetHigh, fileOffsetLow, uintptr(len))
|
||||
if addr == 0 {
|
||||
return nil, os.NewSyscallError("MapViewOfFile", errno)
|
||||
}
|
||||
handleLock.Lock()
|
||||
handleMap[addr] = h
|
||||
handleLock.Unlock()
|
||||
|
||||
m := MMap{}
|
||||
dh := m.header()
|
||||
dh.Data = addr
|
||||
dh.Len = len
|
||||
dh.Cap = dh.Len
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func flush(addr, len uintptr) error {
|
||||
errno := syscall.FlushViewOfFile(addr, len)
|
||||
if errno != nil {
|
||||
return os.NewSyscallError("FlushViewOfFile", errno)
|
||||
}
|
||||
|
||||
handleLock.Lock()
|
||||
defer handleLock.Unlock()
|
||||
handle, ok := handleMap[addr]
|
||||
if !ok {
|
||||
// should be impossible; we would've errored above
|
||||
return errors.New("unknown base address")
|
||||
}
|
||||
|
||||
errno = syscall.FlushFileBuffers(handle)
|
||||
return os.NewSyscallError("FlushFileBuffers", errno)
|
||||
}
|
||||
|
||||
func lock(addr, len uintptr) error {
|
||||
errno := syscall.VirtualLock(addr, len)
|
||||
return os.NewSyscallError("VirtualLock", errno)
|
||||
}
|
||||
|
||||
func unlock(addr, len uintptr) error {
|
||||
errno := syscall.VirtualUnlock(addr, len)
|
||||
return os.NewSyscallError("VirtualUnlock", errno)
|
||||
}
|
||||
|
||||
func unmap(addr, len uintptr) error {
|
||||
flush(addr, len)
|
||||
// Lock the UnmapViewOfFile along with the handleMap deletion.
|
||||
// As soon as we unmap the view, the OS is free to give the
|
||||
// same addr to another new map. We don't want another goroutine
|
||||
// to insert and remove the same addr into handleMap while
|
||||
// we're trying to remove our old addr/handle pair.
|
||||
handleLock.Lock()
|
||||
defer handleLock.Unlock()
|
||||
err := syscall.UnmapViewOfFile(addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
handle, ok := handleMap[addr]
|
||||
if !ok {
|
||||
// should be impossible; we would've errored above
|
||||
return errors.New("unknown base address")
|
||||
}
|
||||
delete(handleMap, addr)
|
||||
|
||||
e := syscall.CloseHandle(syscall.Handle(handle))
|
||||
return os.NewSyscallError("CloseHandle", e)
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
// Copyright 2011 Evan Shaw. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package mmap
|
||||
|
||||
const _SYS_MSYNC = 277
|
||||
const _MS_SYNC = 0x04
|
|
@ -0,0 +1,14 @@
|
|||
// Copyright 2011 Evan Shaw. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin dragonfly freebsd linux openbsd solaris
|
||||
|
||||
package mmap
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const _SYS_MSYNC = syscall.SYS_MSYNC
|
||||
const _MS_SYNC = syscall.MS_SYNC
|
|
@ -1,12 +0,0 @@
|
|||
.idea/
|
||||
.DS_Store
|
||||
*/**/*un~
|
||||
.vagrant/
|
||||
*.pyc
|
||||
build/
|
||||
pyethash.egg-info/
|
||||
*.so
|
||||
*~
|
||||
*.swp
|
||||
MANIFEST
|
||||
dist/
|
|
@ -1,23 +0,0 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.4.2
|
||||
|
||||
before_install:
|
||||
# for g++4.8 and C++11
|
||||
- sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
|
||||
|
||||
# Set up go-ethereum
|
||||
- sudo apt-get update -y -qq
|
||||
- sudo apt-get install -yqq libgmp3-dev
|
||||
- git clone --depth=10 https://github.com/ethereum/go-ethereum ${GOPATH}/src/github.com/ethereum/go-ethereum
|
||||
# use canned dependencies from the go-ethereum repository
|
||||
- export GOPATH=$GOPATH:$GOPATH/src/github.com/ethereum/go-ethereum/Godeps/_workspace/
|
||||
- echo $GOPATH
|
||||
|
||||
install:
|
||||
# need to explicitly request version 1.48 since by default we get 1.46 which does not work with C++11
|
||||
- sudo apt-get install -qq --yes --force-yes g++-4.8
|
||||
- sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-4.8 50
|
||||
- sudo apt-get install -qq wget cmake bash libboost-test1.48-dev libboost-system1.48-dev libboost-filesystem1.48-dev nodejs python-pip python-dev valgrind
|
||||
- sudo pip install virtualenv -q
|
||||
script: "./test/test.sh"
|
|
@ -1,14 +0,0 @@
|
|||
cmake_minimum_required(VERSION 2.8.7)
|
||||
project(ethash)
|
||||
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/modules/")
|
||||
set(ETHHASH_LIBS ethash)
|
||||
|
||||
if (WIN32 AND WANT_CRYPTOPP)
|
||||
add_subdirectory(cryptopp)
|
||||
endif()
|
||||
|
||||
add_subdirectory(src/libethash)
|
||||
|
||||
add_subdirectory(src/benchmark EXCLUDE_FROM_ALL)
|
||||
add_subdirectory(test/c)
|
|
@ -1,17 +0,0 @@
|
|||
include setup.py
|
||||
|
||||
# C sources
|
||||
include src/libethash/internal.c
|
||||
include src/libethash/sha3.c
|
||||
include src/libethash/util.c
|
||||
include src/python/core.c
|
||||
|
||||
# Headers
|
||||
include src/libethash/compiler.h
|
||||
include src/libethash/data_sizes.h
|
||||
include src/libethash/endian.h
|
||||
include src/libethash/ethash.h
|
||||
include src/libethash/fnv.h
|
||||
include src/libethash/internal.h
|
||||
include src/libethash/sha3.h
|
||||
include src/libethash/util.h
|
|
@ -1,6 +0,0 @@
|
|||
.PHONY: clean test
|
||||
test:
|
||||
./test/test.sh
|
||||
|
||||
clean:
|
||||
rm -rf *.so pyethash.egg-info/ build/ test/python/python-virtual-env/ test/c/build/ pyethash.so test/python/*.pyc dist/ MANIFEST
|
|
@ -1,22 +0,0 @@
|
|||
[![Build Status](https://travis-ci.org/ethereum/ethash.svg?branch=master)](https://travis-ci.org/ethereum/ethash)
|
||||
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/debris/ethash?branch=master&svg=true)](https://ci.appveyor.com/project/debris/ethash-nr37r/branch/master)
|
||||
|
||||
# Ethash
|
||||
|
||||
For details on this project, please see the Ethereum wiki:
|
||||
https://github.com/ethereum/wiki/wiki/Ethash
|
||||
|
||||
### Coding Style for C++ code:
|
||||
|
||||
Follow the same exact style as in [cpp-ethereum](https://github.com/ethereum/cpp-ethereum/blob/develop/CodingStandards.txt)
|
||||
|
||||
### Coding Style for C code:
|
||||
|
||||
The main thing above all is code consistency.
|
||||
|
||||
- Tabs for indentation. A tab is 4 spaces
|
||||
- Try to stick to the [K&R](http://en.wikipedia.org/wiki/Indent_style#K.26R_style),
|
||||
especially for the C code.
|
||||
- Keep the line lengths reasonable. No hard limit on 80 characters but don't go further
|
||||
than 110. Some people work with multiple buffers next to each other.
|
||||
Make them like you :)
|
|
@ -1,7 +0,0 @@
|
|||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
Vagrant.configure(2) do |config|
|
||||
config.vm.box = "Ubuntu 12.04"
|
||||
config.vm.box_url = "https://cloud-images.ubuntu.com/vagrant/precise/current/precise-server-cloudimg-amd64-vagrant-disk1.box"
|
||||
end
|
|
@ -1,43 +0,0 @@
|
|||
version: 1.0.0.{build}
|
||||
|
||||
environment:
|
||||
BOOST_ROOT: "c:/projects/ethash/deps/boost"
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- develop
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\projects\ethash
|
||||
|
||||
#platform: Any CPU
|
||||
#configuration: Debug
|
||||
|
||||
install:
|
||||
# by default, all script lines are interpreted as batch
|
||||
|
||||
# scripts to run before build
|
||||
before_build:
|
||||
- echo "Downloading boost..."
|
||||
- mkdir c:\projects\ethash\deps
|
||||
- cd c:\projects\ethash\deps
|
||||
- curl -O https://build.ethdev.com/builds/windows-precompiled/boost.tar.gz
|
||||
- echo "Unzipping boost..."
|
||||
- 7z x boost.tar.gz > nul
|
||||
- 7z x boost.tar > nul
|
||||
- ls
|
||||
- echo "Running cmake..."
|
||||
- cd c:\projects\ethash
|
||||
- cmake .
|
||||
|
||||
build:
|
||||
project: ALL_BUILD.vcxproj # path to Visual Studio solution or project
|
||||
|
||||
after_build:
|
||||
- echo "Running tests..."
|
||||
- cd c:\projects\ethash\test\c\Debug
|
||||
- Test.exe
|
||||
- echo "Finished!"
|
||||
|
|
@ -1,441 +0,0 @@
|
|||
// Copyright 2015 The go-ethereum Authors
|
||||
// Copyright 2015 Lefteris Karapetsas <lefteris@refu.co>
|
||||
// Copyright 2015 Matthew Wampler-Doty <matthew.wampler.doty@gmail.com>
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ethash
|
||||
|
||||
/*
|
||||
#include "src/libethash/internal.h"
|
||||
|
||||
int ethashGoCallback_cgo(unsigned);
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/pow"
|
||||
)
|
||||
|
||||
var (
|
||||
maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
|
||||
sharedLight = new(Light)
|
||||
)
|
||||
|
||||
const (
|
||||
epochLength uint64 = 30000
|
||||
cacheSizeForTesting C.uint64_t = 1024
|
||||
dagSizeForTesting C.uint64_t = 1024 * 32
|
||||
)
|
||||
|
||||
var DefaultDir = defaultDir()
|
||||
|
||||
func defaultDir() string {
|
||||
home := os.Getenv("HOME")
|
||||
if user, err := user.Current(); err == nil {
|
||||
home = user.HomeDir
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
return filepath.Join(home, "AppData", "Ethash")
|
||||
}
|
||||
return filepath.Join(home, ".ethash")
|
||||
}
|
||||
|
||||
// cache wraps an ethash_light_t with some metadata
|
||||
// and automatic memory management.
|
||||
type cache struct {
|
||||
epoch uint64
|
||||
used time.Time
|
||||
test bool
|
||||
|
||||
gen sync.Once // ensures cache is only generated once.
|
||||
ptr *C.struct_ethash_light
|
||||
}
|
||||
|
||||
// generate creates the actual cache. it can be called from multiple
|
||||
// goroutines. the first call will generate the cache, subsequent
|
||||
// calls wait until it is generated.
|
||||
func (cache *cache) generate() {
|
||||
cache.gen.Do(func() {
|
||||
started := time.Now()
|
||||
seedHash := makeSeedHash(cache.epoch)
|
||||
glog.V(logger.Debug).Infof("Generating cache for epoch %d (%x)", cache.epoch, seedHash)
|
||||
size := C.ethash_get_cachesize(C.uint64_t(cache.epoch * epochLength))
|
||||
if cache.test {
|
||||
size = cacheSizeForTesting
|
||||
}
|
||||
cache.ptr = C.ethash_light_new_internal(size, (*C.ethash_h256_t)(unsafe.Pointer(&seedHash[0])))
|
||||
runtime.SetFinalizer(cache, freeCache)
|
||||
glog.V(logger.Debug).Infof("Done generating cache for epoch %d, it took %v", cache.epoch, time.Since(started))
|
||||
})
|
||||
}
|
||||
|
||||
func freeCache(cache *cache) {
|
||||
C.ethash_light_delete(cache.ptr)
|
||||
cache.ptr = nil
|
||||
}
|
||||
|
||||
func (cache *cache) compute(dagSize uint64, hash common.Hash, nonce uint64) (ok bool, mixDigest, result common.Hash) {
|
||||
ret := C.ethash_light_compute_internal(cache.ptr, C.uint64_t(dagSize), hashToH256(hash), C.uint64_t(nonce))
|
||||
// Make sure cache is live until after the C call.
|
||||
// This is important because a GC might happen and execute
|
||||
// the finalizer before the call completes.
|
||||
_ = cache
|
||||
return bool(ret.success), h256ToHash(ret.mix_hash), h256ToHash(ret.result)
|
||||
}
|
||||
|
||||
// Light implements the Verify half of the proof of work. It uses a few small
|
||||
// in-memory caches to verify the nonces found by Full.
|
||||
type Light struct {
|
||||
test bool // If set, use a smaller cache size
|
||||
|
||||
mu sync.Mutex // Protects the per-epoch map of verification caches
|
||||
caches map[uint64]*cache // Currently maintained verification caches
|
||||
future *cache // Pre-generated cache for the estimated future DAG
|
||||
|
||||
NumCaches int // Maximum number of caches to keep before eviction (only init, don't modify)
|
||||
}
|
||||
|
||||
// Verify checks whether the block's nonce is valid.
|
||||
func (l *Light) Verify(block pow.Block) bool {
|
||||
// TODO: do ethash_quick_verify before getCache in order
|
||||
// to prevent DOS attacks.
|
||||
blockNum := block.NumberU64()
|
||||
if blockNum >= epochLength*2048 {
|
||||
glog.V(logger.Debug).Infof("block number %d too high, limit is %d", epochLength*2048)
|
||||
return false
|
||||
}
|
||||
|
||||
difficulty := block.Difficulty()
|
||||
/* Cannot happen if block header diff is validated prior to PoW, but can
|
||||
happen if PoW is checked first due to parallel PoW checking.
|
||||
We could check the minimum valid difficulty but for SoC we avoid (duplicating)
|
||||
Ethereum protocol consensus rules here which are not in scope of Ethash
|
||||
*/
|
||||
if difficulty.Cmp(common.Big0) == 0 {
|
||||
glog.V(logger.Debug).Infof("invalid block difficulty")
|
||||
return false
|
||||
}
|
||||
|
||||
cache := l.getCache(blockNum)
|
||||
dagSize := C.ethash_get_datasize(C.uint64_t(blockNum))
|
||||
if l.test {
|
||||
dagSize = dagSizeForTesting
|
||||
}
|
||||
// Recompute the hash using the cache.
|
||||
ok, mixDigest, result := cache.compute(uint64(dagSize), block.HashNoNonce(), block.Nonce())
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// avoid mixdigest malleability as it's not included in a block's "hashNononce"
|
||||
if block.MixDigest() != mixDigest {
|
||||
return false
|
||||
}
|
||||
|
||||
// The actual check.
|
||||
target := new(big.Int).Div(maxUint256, difficulty)
|
||||
return result.Big().Cmp(target) <= 0
|
||||
}
|
||||
|
||||
func h256ToHash(in C.ethash_h256_t) common.Hash {
|
||||
return *(*common.Hash)(unsafe.Pointer(&in.b))
|
||||
}
|
||||
|
||||
func hashToH256(in common.Hash) C.ethash_h256_t {
|
||||
return C.ethash_h256_t{b: *(*[32]C.uint8_t)(unsafe.Pointer(&in[0]))}
|
||||
}
|
||||
|
||||
func (l *Light) getCache(blockNum uint64) *cache {
|
||||
var c *cache
|
||||
epoch := blockNum / epochLength
|
||||
|
||||
// If we have a PoW for that epoch, use that
|
||||
l.mu.Lock()
|
||||
if l.caches == nil {
|
||||
l.caches = make(map[uint64]*cache)
|
||||
}
|
||||
if l.NumCaches == 0 {
|
||||
l.NumCaches = 3
|
||||
}
|
||||
c = l.caches[epoch]
|
||||
if c == nil {
|
||||
// No cached DAG, evict the oldest if the cache limit was reached
|
||||
if len(l.caches) >= l.NumCaches {
|
||||
var evict *cache
|
||||
for _, cache := range l.caches {
|
||||
if evict == nil || evict.used.After(cache.used) {
|
||||
evict = cache
|
||||
}
|
||||
}
|
||||
glog.V(logger.Debug).Infof("Evicting DAG for epoch %d in favour of epoch %d", evict.epoch, epoch)
|
||||
delete(l.caches, evict.epoch)
|
||||
}
|
||||
// If we have the new DAG pre-generated, use that, otherwise create a new one
|
||||
if l.future != nil && l.future.epoch == epoch {
|
||||
glog.V(logger.Debug).Infof("Using pre-generated DAG for epoch %d", epoch)
|
||||
c, l.future = l.future, nil
|
||||
} else {
|
||||
glog.V(logger.Debug).Infof("No pre-generated DAG available, creating new for epoch %d", epoch)
|
||||
c = &cache{epoch: epoch, test: l.test}
|
||||
}
|
||||
l.caches[epoch] = c
|
||||
|
||||
// If we just used up the future cache, or need a refresh, regenerate
|
||||
if l.future == nil || l.future.epoch <= epoch {
|
||||
glog.V(logger.Debug).Infof("Pre-generating DAG for epoch %d", epoch+1)
|
||||
l.future = &cache{epoch: epoch + 1, test: l.test}
|
||||
go l.future.generate()
|
||||
}
|
||||
}
|
||||
c.used = time.Now()
|
||||
l.mu.Unlock()
|
||||
|
||||
// Wait for generation finish and return the cache
|
||||
c.generate()
|
||||
return c
|
||||
}
|
||||
|
||||
// dag wraps an ethash_full_t with some metadata
|
||||
// and automatic memory management.
|
||||
type dag struct {
|
||||
epoch uint64
|
||||
test bool
|
||||
dir string
|
||||
|
||||
gen sync.Once // ensures DAG is only generated once.
|
||||
ptr *C.struct_ethash_full
|
||||
}
|
||||
|
||||
// generate creates the actual DAG. it can be called from multiple
|
||||
// goroutines. the first call will generate the DAG, subsequent
|
||||
// calls wait until it is generated.
|
||||
func (d *dag) generate() {
|
||||
d.gen.Do(func() {
|
||||
var (
|
||||
started = time.Now()
|
||||
seedHash = makeSeedHash(d.epoch)
|
||||
blockNum = C.uint64_t(d.epoch * epochLength)
|
||||
cacheSize = C.ethash_get_cachesize(blockNum)
|
||||
dagSize = C.ethash_get_datasize(blockNum)
|
||||
)
|
||||
if d.test {
|
||||
cacheSize = cacheSizeForTesting
|
||||
dagSize = dagSizeForTesting
|
||||
}
|
||||
if d.dir == "" {
|
||||
d.dir = DefaultDir
|
||||
}
|
||||
glog.V(logger.Info).Infof("Generating DAG for epoch %d (size %d) (%x)", d.epoch, dagSize, seedHash)
|
||||
// Generate a temporary cache.
|
||||
// TODO: this could share the cache with Light
|
||||
cache := C.ethash_light_new_internal(cacheSize, (*C.ethash_h256_t)(unsafe.Pointer(&seedHash[0])))
|
||||
defer C.ethash_light_delete(cache)
|
||||
// Generate the actual DAG.
|
||||
d.ptr = C.ethash_full_new_internal(
|
||||
C.CString(d.dir),
|
||||
hashToH256(seedHash),
|
||||
dagSize,
|
||||
cache,
|
||||
(C.ethash_callback_t)(unsafe.Pointer(C.ethashGoCallback_cgo)),
|
||||
)
|
||||
if d.ptr == nil {
|
||||
panic("ethash_full_new IO or memory error")
|
||||
}
|
||||
runtime.SetFinalizer(d, freeDAG)
|
||||
glog.V(logger.Info).Infof("Done generating DAG for epoch %d, it took %v", d.epoch, time.Since(started))
|
||||
})
|
||||
}
|
||||
|
||||
func freeDAG(d *dag) {
|
||||
C.ethash_full_delete(d.ptr)
|
||||
d.ptr = nil
|
||||
}
|
||||
|
||||
func (d *dag) Ptr() unsafe.Pointer {
|
||||
return unsafe.Pointer(d.ptr.data)
|
||||
}
|
||||
|
||||
//export ethashGoCallback
|
||||
func ethashGoCallback(percent C.unsigned) C.int {
|
||||
glog.V(logger.Info).Infof("Generating DAG: %d%%", percent)
|
||||
return 0
|
||||
}
|
||||
|
||||
// MakeDAG pre-generates a DAG file for the given block number in the
|
||||
// given directory. If dir is the empty string, the default directory
|
||||
// is used.
|
||||
func MakeDAG(blockNum uint64, dir string) error {
|
||||
d := &dag{epoch: blockNum / epochLength, dir: dir}
|
||||
if blockNum >= epochLength*2048 {
|
||||
return fmt.Errorf("block number too high, limit is %d", epochLength*2048)
|
||||
}
|
||||
d.generate()
|
||||
if d.ptr == nil {
|
||||
return errors.New("failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Full implements the Search half of the proof of work.
|
||||
type Full struct {
|
||||
Dir string // use this to specify a non-default DAG directory
|
||||
|
||||
test bool // if set use a smaller DAG size
|
||||
turbo bool
|
||||
hashRate int32
|
||||
|
||||
mu sync.Mutex // protects dag
|
||||
current *dag // current full DAG
|
||||
}
|
||||
|
||||
func (pow *Full) getDAG(blockNum uint64) (d *dag) {
|
||||
epoch := blockNum / epochLength
|
||||
pow.mu.Lock()
|
||||
if pow.current != nil && pow.current.epoch == epoch {
|
||||
d = pow.current
|
||||
} else {
|
||||
d = &dag{epoch: epoch, test: pow.test, dir: pow.Dir}
|
||||
pow.current = d
|
||||
}
|
||||
pow.mu.Unlock()
|
||||
// wait for it to finish generating.
|
||||
d.generate()
|
||||
return d
|
||||
}
|
||||
|
||||
func (pow *Full) Search(block pow.Block, stop <-chan struct{}, index int) (nonce uint64, mixDigest []byte) {
|
||||
dag := pow.getDAG(block.NumberU64())
|
||||
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
diff := block.Difficulty()
|
||||
|
||||
i := int64(0)
|
||||
starti := i
|
||||
start := time.Now().UnixNano()
|
||||
previousHashrate := int32(0)
|
||||
|
||||
nonce = uint64(r.Int63())
|
||||
hash := hashToH256(block.HashNoNonce())
|
||||
target := new(big.Int).Div(maxUint256, diff)
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
atomic.AddInt32(&pow.hashRate, -previousHashrate)
|
||||
return 0, nil
|
||||
default:
|
||||
i++
|
||||
|
||||
// we don't have to update hash rate on every nonce, so update after
|
||||
// first nonce check and then after 2^X nonces
|
||||
if i == 2 || ((i % (1 << 16)) == 0) {
|
||||
elapsed := time.Now().UnixNano() - start
|
||||
hashes := (float64(1e9) / float64(elapsed)) * float64(i-starti)
|
||||
hashrateDiff := int32(hashes) - previousHashrate
|
||||
previousHashrate = int32(hashes)
|
||||
atomic.AddInt32(&pow.hashRate, hashrateDiff)
|
||||
}
|
||||
|
||||
ret := C.ethash_full_compute(dag.ptr, hash, C.uint64_t(nonce))
|
||||
result := h256ToHash(ret.result).Big()
|
||||
|
||||
// TODO: disagrees with the spec https://github.com/ethereum/wiki/wiki/Ethash#mining
|
||||
if ret.success && result.Cmp(target) <= 0 {
|
||||
mixDigest = C.GoBytes(unsafe.Pointer(&ret.mix_hash), C.int(32))
|
||||
atomic.AddInt32(&pow.hashRate, -previousHashrate)
|
||||
return nonce, mixDigest
|
||||
}
|
||||
nonce += 1
|
||||
}
|
||||
|
||||
if !pow.turbo {
|
||||
time.Sleep(20 * time.Microsecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pow *Full) GetHashrate() int64 {
|
||||
return int64(atomic.LoadInt32(&pow.hashRate))
|
||||
}
|
||||
|
||||
func (pow *Full) Turbo(on bool) {
|
||||
// TODO: this needs to use an atomic operation.
|
||||
pow.turbo = on
|
||||
}
|
||||
|
||||
// Ethash combines block verification with Light and
|
||||
// nonce searching with Full into a single proof of work.
|
||||
type Ethash struct {
|
||||
*Light
|
||||
*Full
|
||||
}
|
||||
|
||||
// New creates an instance of the proof of work.
|
||||
func New() *Ethash {
|
||||
return &Ethash{new(Light), &Full{turbo: true}}
|
||||
}
|
||||
|
||||
// NewShared creates an instance of the proof of work., where a single instance
|
||||
// of the Light cache is shared across all instances created with NewShared.
|
||||
func NewShared() *Ethash {
|
||||
return &Ethash{sharedLight, &Full{turbo: true}}
|
||||
}
|
||||
|
||||
// NewForTesting creates a proof of work for use in unit tests.
|
||||
// It uses a smaller DAG and cache size to keep test times low.
|
||||
// DAG files are stored in a temporary directory.
|
||||
//
|
||||
// Nonces found by a testing instance are not verifiable with a
|
||||
// regular-size cache.
|
||||
func NewForTesting() (*Ethash, error) {
|
||||
dir, err := ioutil.TempDir("", "ethash-test")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Ethash{&Light{test: true}, &Full{Dir: dir, test: true}}, nil
|
||||
}
|
||||
|
||||
func GetSeedHash(blockNum uint64) ([]byte, error) {
|
||||
if blockNum >= epochLength*2048 {
|
||||
return nil, fmt.Errorf("block number too high, limit is %d", epochLength*2048)
|
||||
}
|
||||
sh := makeSeedHash(blockNum / epochLength)
|
||||
return sh[:], nil
|
||||
}
|
||||
|
||||
func makeSeedHash(epoch uint64) (sh common.Hash) {
|
||||
for ; epoch > 0; epoch-- {
|
||||
sh = crypto.Sha3Hash(sh[:])
|
||||
}
|
||||
return sh
|
||||
}
|
|
@ -1,628 +0,0 @@
|
|||
// Copyright 2014 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build opencl
|
||||
|
||||
package ethash
|
||||
|
||||
//#cgo LDFLAGS: -w
|
||||
//#include <stdint.h>
|
||||
//#include <string.h>
|
||||
//#include "src/libethash/internal.h"
|
||||
import "C"
|
||||
|
||||
import (
|
||||
crand "crypto/rand"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
mrand "math/rand"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Gustav-Simonsson/go-opencl/cl"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/pow"
|
||||
)
|
||||
|
||||
/*
|
||||
|
||||
This code have two main entry points:
|
||||
|
||||
1. The initCL(...) function configures one or more OpenCL device
|
||||
(for now only GPU) and loads the Ethash DAG onto device memory
|
||||
|
||||
2. The Search(...) function loads a Ethash nonce into device(s) memory and
|
||||
executes the Ethash OpenCL kernel.
|
||||
|
||||
Throughout the code, we refer to "host memory" and "device memory".
|
||||
For most systems (e.g. regular PC GPU miner) the host memory is RAM and
|
||||
device memory is the GPU global memory (e.g. GDDR5).
|
||||
|
||||
References mentioned in code comments:
|
||||
|
||||
1. https://github.com/ethereum/wiki/wiki/Ethash
|
||||
2. https://github.com/ethereum/cpp-ethereum/blob/develop/libethash-cl/ethash_cl_miner.cpp
|
||||
3. https://www.khronos.org/registry/cl/sdk/1.2/docs/man/xhtml/
|
||||
4. http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2013/12/AMD_OpenCL_Programming_User_Guide.pdf
|
||||
|
||||
*/
|
||||
|
||||
type OpenCLDevice struct {
|
||||
deviceId int
|
||||
device *cl.Device
|
||||
openCL11 bool // OpenCL version 1.1 and 1.2 are handled a bit different
|
||||
openCL12 bool
|
||||
|
||||
dagBuf *cl.MemObject // Ethash full DAG in device mem
|
||||
headerBuf *cl.MemObject // Hash of block-to-mine in device mem
|
||||
searchBuffers []*cl.MemObject
|
||||
|
||||
searchKernel *cl.Kernel
|
||||
hashKernel *cl.Kernel
|
||||
|
||||
queue *cl.CommandQueue
|
||||
ctx *cl.Context
|
||||
workGroupSize int
|
||||
|
||||
nonceRand *mrand.Rand // seeded by crypto/rand, see comments where it's initialised
|
||||
result common.Hash
|
||||
}
|
||||
|
||||
type OpenCLMiner struct {
|
||||
mu sync.Mutex
|
||||
|
||||
ethash *Ethash // Ethash full DAG & cache in host mem
|
||||
|
||||
deviceIds []int
|
||||
devices []*OpenCLDevice
|
||||
|
||||
dagSize uint64
|
||||
|
||||
hashRate int32 // Go atomics & uint64 have some issues; int32 is supported on all platforms
|
||||
}
|
||||
|
||||
type pendingSearch struct {
|
||||
bufIndex uint32
|
||||
startNonce uint64
|
||||
}
|
||||
|
||||
const (
|
||||
SIZEOF_UINT32 = 4
|
||||
|
||||
// See [1]
|
||||
ethashMixBytesLen = 128
|
||||
ethashAccesses = 64
|
||||
|
||||
// See [4]
|
||||
workGroupSize = 32 // must be multiple of 8
|
||||
maxSearchResults = 63
|
||||
searchBufSize = 2
|
||||
globalWorkSize = 1024 * 256
|
||||
)
|
||||
|
||||
func NewCL(deviceIds []int) *OpenCLMiner {
|
||||
ids := make([]int, len(deviceIds))
|
||||
copy(ids, deviceIds)
|
||||
return &OpenCLMiner{
|
||||
ethash: New(),
|
||||
dagSize: 0, // to see if we need to update DAG.
|
||||
deviceIds: ids,
|
||||
}
|
||||
}
|
||||
|
||||
func PrintDevices() {
|
||||
fmt.Println("=============================================")
|
||||
fmt.Println("============ OpenCL Device Info =============")
|
||||
fmt.Println("=============================================")
|
||||
|
||||
var found []*cl.Device
|
||||
|
||||
platforms, err := cl.GetPlatforms()
|
||||
if err != nil {
|
||||
fmt.Println("Plaform error (check your OpenCL installation):", err)
|
||||
return
|
||||
}
|
||||
|
||||
for i, p := range platforms {
|
||||
fmt.Println("Platform id ", i)
|
||||
fmt.Println("Platform Name ", p.Name())
|
||||
fmt.Println("Platform Vendor ", p.Vendor())
|
||||
fmt.Println("Platform Version ", p.Version())
|
||||
fmt.Println("Platform Extensions ", p.Extensions())
|
||||
fmt.Println("Platform Profile ", p.Profile())
|
||||
fmt.Println("")
|
||||
|
||||
devices, err := cl.GetDevices(p, cl.DeviceTypeGPU)
|
||||
if err != nil {
|
||||
fmt.Println("Device error (check your GPU drivers) :", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, d := range devices {
|
||||
fmt.Println("Device OpenCL id ", i)
|
||||
fmt.Println("Device id for mining ", len(found))
|
||||
fmt.Println("Device Name ", d.Name())
|
||||
fmt.Println("Vendor ", d.Vendor())
|
||||
fmt.Println("Version ", d.Version())
|
||||
fmt.Println("Driver version ", d.DriverVersion())
|
||||
fmt.Println("Address bits ", d.AddressBits())
|
||||
fmt.Println("Max clock freq ", d.MaxClockFrequency())
|
||||
fmt.Println("Global mem size ", d.GlobalMemSize())
|
||||
fmt.Println("Max constant buffer size", d.MaxConstantBufferSize())
|
||||
fmt.Println("Max mem alloc size ", d.MaxMemAllocSize())
|
||||
fmt.Println("Max compute units ", d.MaxComputeUnits())
|
||||
fmt.Println("Max work group size ", d.MaxWorkGroupSize())
|
||||
fmt.Println("Max work item sizes ", d.MaxWorkItemSizes())
|
||||
fmt.Println("=============================================")
|
||||
|
||||
found = append(found, d)
|
||||
}
|
||||
}
|
||||
if len(found) == 0 {
|
||||
fmt.Println("Found no GPU(s). Check that your OS can see the GPU(s)")
|
||||
} else {
|
||||
var idsFormat string
|
||||
for i := 0; i < len(found); i++ {
|
||||
idsFormat += strconv.Itoa(i)
|
||||
if i != len(found)-1 {
|
||||
idsFormat += ","
|
||||
}
|
||||
}
|
||||
fmt.Printf("Found %v devices. Benchmark first GPU: geth gpubench 0\n", len(found))
|
||||
fmt.Printf("Mine using all GPUs: geth --minegpu %v\n", idsFormat)
|
||||
}
|
||||
}
|
||||
|
||||
// See [2]. We basically do the same here, but the Go OpenCL bindings
|
||||
// are at a slightly higher abtraction level.
|
||||
func InitCL(blockNum uint64, c *OpenCLMiner) error {
|
||||
platforms, err := cl.GetPlatforms()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Plaform error: %v\nCheck your OpenCL installation and then run geth gpuinfo", err)
|
||||
}
|
||||
|
||||
var devices []*cl.Device
|
||||
for _, p := range platforms {
|
||||
ds, err := cl.GetDevices(p, cl.DeviceTypeGPU)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Devices error: %v\nCheck your GPU drivers and then run geth gpuinfo", err)
|
||||
}
|
||||
for _, d := range ds {
|
||||
devices = append(devices, d)
|
||||
}
|
||||
}
|
||||
|
||||
pow := New()
|
||||
_ = pow.getDAG(blockNum) // generates DAG if we don't have it
|
||||
pow.Light.getCache(blockNum) // and cache
|
||||
|
||||
c.ethash = pow
|
||||
dagSize := uint64(C.ethash_get_datasize(C.uint64_t(blockNum)))
|
||||
c.dagSize = dagSize
|
||||
|
||||
for _, id := range c.deviceIds {
|
||||
if id > len(devices)-1 {
|
||||
return fmt.Errorf("Device id not found. See available device ids with: geth gpuinfo")
|
||||
} else {
|
||||
err := initCLDevice(id, devices[id], c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(c.devices) == 0 {
|
||||
return fmt.Errorf("No GPU devices found")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func initCLDevice(deviceId int, device *cl.Device, c *OpenCLMiner) error {
|
||||
devMaxAlloc := uint64(device.MaxMemAllocSize())
|
||||
devGlobalMem := uint64(device.GlobalMemSize())
|
||||
|
||||
// TODO: more fine grained version logic
|
||||
if device.Version() == "OpenCL 1.0" {
|
||||
fmt.Println("Device OpenCL version not supported: ", device.Version())
|
||||
return fmt.Errorf("opencl version not supported")
|
||||
}
|
||||
|
||||
var cl11, cl12 bool
|
||||
if device.Version() == "OpenCL 1.1" {
|
||||
cl11 = true
|
||||
}
|
||||
if device.Version() == "OpenCL 1.2" {
|
||||
cl12 = true
|
||||
}
|
||||
|
||||
// log warnings but carry on; some device drivers report inaccurate values
|
||||
if c.dagSize > devGlobalMem {
|
||||
fmt.Printf("WARNING: device memory may be insufficient: %v. DAG size: %v.\n", devGlobalMem, c.dagSize)
|
||||
}
|
||||
|
||||
if c.dagSize > devMaxAlloc {
|
||||
fmt.Printf("WARNING: DAG size (%v) larger than device max memory allocation size (%v).\n", c.dagSize, devMaxAlloc)
|
||||
fmt.Printf("You probably have to export GPU_MAX_ALLOC_PERCENT=95\n")
|
||||
}
|
||||
|
||||
fmt.Printf("Initialising device %v: %v\n", deviceId, device.Name())
|
||||
|
||||
context, err := cl.CreateContext([]*cl.Device{device})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed creating context: %v", err)
|
||||
}
|
||||
|
||||
// TODO: test running with CL_QUEUE_PROFILING_ENABLE for profiling?
|
||||
queue, err := context.CreateCommandQueue(device, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("command queue err: %v", err)
|
||||
}
|
||||
|
||||
// See [4] section 3.2 and [3] "clBuildProgram".
|
||||
// The OpenCL kernel code is compiled at run-time.
|
||||
kvs := make(map[string]string, 4)
|
||||
kvs["GROUP_SIZE"] = strconv.FormatUint(workGroupSize, 10)
|
||||
kvs["DAG_SIZE"] = strconv.FormatUint(c.dagSize/ethashMixBytesLen, 10)
|
||||
kvs["ACCESSES"] = strconv.FormatUint(ethashAccesses, 10)
|
||||
kvs["MAX_OUTPUTS"] = strconv.FormatUint(maxSearchResults, 10)
|
||||
kernelCode := replaceWords(kernel, kvs)
|
||||
|
||||
program, err := context.CreateProgramWithSource([]string{kernelCode})
|
||||
if err != nil {
|
||||
return fmt.Errorf("program err: %v", err)
|
||||
}
|
||||
|
||||
/* if using AMD OpenCL impl, you can set this to debug on x86 CPU device.
|
||||
see AMD OpenCL programming guide section 4.2
|
||||
|
||||
export in shell before running:
|
||||
export AMD_OCL_BUILD_OPTIONS_APPEND="-g -O0"
|
||||
export CPU_MAX_COMPUTE_UNITS=1
|
||||
|
||||
buildOpts := "-g -cl-opt-disable"
|
||||
|
||||
*/
|
||||
buildOpts := ""
|
||||
err = program.BuildProgram([]*cl.Device{device}, buildOpts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("program build err: %v", err)
|
||||
}
|
||||
|
||||
var searchKernelName, hashKernelName string
|
||||
searchKernelName = "ethash_search"
|
||||
hashKernelName = "ethash_hash"
|
||||
|
||||
searchKernel, err := program.CreateKernel(searchKernelName)
|
||||
hashKernel, err := program.CreateKernel(hashKernelName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("kernel err: %v", err)
|
||||
}
|
||||
|
||||
// TODO: when this DAG size appears, patch the Go bindings
|
||||
// (context.go) to work with uint64 as size_t
|
||||
if c.dagSize > math.MaxInt32 {
|
||||
fmt.Println("DAG too large for allocation.")
|
||||
return fmt.Errorf("DAG too large for alloc")
|
||||
}
|
||||
|
||||
// TODO: patch up Go bindings to work with size_t, will overflow if > maxint32
|
||||
// TODO: fuck. shit's gonna overflow around 2017-06-09 12:17:02
|
||||
dagBuf := *(new(*cl.MemObject))
|
||||
dagBuf, err = context.CreateEmptyBuffer(cl.MemReadOnly, int(c.dagSize))
|
||||
if err != nil {
|
||||
return fmt.Errorf("allocating dag buf failed: %v", err)
|
||||
}
|
||||
|
||||
// write DAG to device mem
|
||||
dagPtr := unsafe.Pointer(c.ethash.Full.current.ptr.data)
|
||||
_, err = queue.EnqueueWriteBuffer(dagBuf, true, 0, int(c.dagSize), dagPtr, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writing to dag buf failed: %v", err)
|
||||
}
|
||||
|
||||
searchBuffers := make([]*cl.MemObject, searchBufSize)
|
||||
for i := 0; i < searchBufSize; i++ {
|
||||
searchBuff, err := context.CreateEmptyBuffer(cl.MemWriteOnly, (1+maxSearchResults)*SIZEOF_UINT32)
|
||||
if err != nil {
|
||||
return fmt.Errorf("search buffer err: %v", err)
|
||||
}
|
||||
searchBuffers[i] = searchBuff
|
||||
}
|
||||
|
||||
headerBuf, err := context.CreateEmptyBuffer(cl.MemReadOnly, 32)
|
||||
if err != nil {
|
||||
return fmt.Errorf("header buffer err: %v", err)
|
||||
}
|
||||
|
||||
// Unique, random nonces are crucial for mining efficieny.
|
||||
// While we do not need cryptographically secure PRNG for nonces,
|
||||
// we want to have uniform distribution and minimal repetition of nonces.
|
||||
// We could guarantee strict uniqueness of nonces by generating unique ranges,
|
||||
// but a int64 seed from crypto/rand should be good enough.
|
||||
// we then use math/rand for speed and to avoid draining OS entropy pool
|
||||
seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nonceRand := mrand.New(mrand.NewSource(seed.Int64()))
|
||||
|
||||
deviceStruct := &OpenCLDevice{
|
||||
deviceId: deviceId,
|
||||
device: device,
|
||||
openCL11: cl11,
|
||||
openCL12: cl12,
|
||||
|
||||
dagBuf: dagBuf,
|
||||
headerBuf: headerBuf,
|
||||
searchBuffers: searchBuffers,
|
||||
|
||||
searchKernel: searchKernel,
|
||||
hashKernel: hashKernel,
|
||||
|
||||
queue: queue,
|
||||
ctx: context,
|
||||
|
||||
workGroupSize: workGroupSize,
|
||||
|
||||
nonceRand: nonceRand,
|
||||
}
|
||||
c.devices = append(c.devices, deviceStruct)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *OpenCLMiner) Search(block pow.Block, stop <-chan struct{}, index int) (uint64, []byte) {
|
||||
c.mu.Lock()
|
||||
newDagSize := uint64(C.ethash_get_datasize(C.uint64_t(block.NumberU64())))
|
||||
if newDagSize > c.dagSize {
|
||||
// TODO: clean up buffers from previous DAG?
|
||||
err := InitCL(block.NumberU64(), c)
|
||||
if err != nil {
|
||||
fmt.Println("OpenCL init error: ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
}
|
||||
defer c.mu.Unlock()
|
||||
|
||||
// Avoid unneeded OpenCL initialisation if we received stop while running InitCL
|
||||
select {
|
||||
case <-stop:
|
||||
return 0, []byte{0}
|
||||
default:
|
||||
}
|
||||
|
||||
headerHash := block.HashNoNonce()
|
||||
diff := block.Difficulty()
|
||||
target256 := new(big.Int).Div(maxUint256, diff)
|
||||
target64 := new(big.Int).Rsh(target256, 192).Uint64()
|
||||
var zero uint32 = 0
|
||||
|
||||
d := c.devices[index]
|
||||
|
||||
_, err := d.queue.EnqueueWriteBuffer(d.headerBuf, false, 0, 32, unsafe.Pointer(&headerHash[0]), nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clEnqueueWriterBuffer : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
for i := 0; i < searchBufSize; i++ {
|
||||
_, err := d.queue.EnqueueWriteBuffer(d.searchBuffers[i], false, 0, 4, unsafe.Pointer(&zero), nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clEnqueueWriterBuffer : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
}
|
||||
|
||||
// wait for all search buffers to complete
|
||||
err = d.queue.Finish()
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clFinish : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
err = d.searchKernel.SetArg(1, d.headerBuf)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clSetKernelArg : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
err = d.searchKernel.SetArg(2, d.dagBuf)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clSetKernelArg : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
err = d.searchKernel.SetArg(4, target64)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clSetKernelArg : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
err = d.searchKernel.SetArg(5, uint32(math.MaxUint32))
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clSetKernelArg : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
// wait on this before returning
|
||||
var preReturnEvent *cl.Event
|
||||
if d.openCL12 {
|
||||
preReturnEvent, err = d.ctx.CreateUserEvent()
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search create CL user event : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
}
|
||||
|
||||
pending := make([]pendingSearch, 0, searchBufSize)
|
||||
var p *pendingSearch
|
||||
searchBufIndex := uint32(0)
|
||||
var checkNonce uint64
|
||||
loops := int64(0)
|
||||
prevHashRate := int32(0)
|
||||
start := time.Now().UnixNano()
|
||||
// we grab a single random nonce and sets this as argument to the kernel search function
|
||||
// the device will then add each local threads gid to the nonce, creating a unique nonce
|
||||
// for each device computing unit executing in parallel
|
||||
initNonce := uint64(d.nonceRand.Int63())
|
||||
for nonce := initNonce; ; nonce += uint64(globalWorkSize) {
|
||||
select {
|
||||
case <-stop:
|
||||
|
||||
/*
|
||||
if d.openCL12 {
|
||||
err = cl.WaitForEvents([]*cl.Event{preReturnEvent})
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search WaitForEvents: ", err)
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
atomic.AddInt32(&c.hashRate, -prevHashRate)
|
||||
return 0, []byte{0}
|
||||
default:
|
||||
}
|
||||
|
||||
if (loops % (1 << 7)) == 0 {
|
||||
elapsed := time.Now().UnixNano() - start
|
||||
// TODO: verify if this is correct hash rate calculation
|
||||
hashes := (float64(1e9) / float64(elapsed)) * float64(loops*1024*256)
|
||||
hashrateDiff := int32(hashes) - prevHashRate
|
||||
prevHashRate = int32(hashes)
|
||||
atomic.AddInt32(&c.hashRate, hashrateDiff)
|
||||
}
|
||||
loops++
|
||||
|
||||
err = d.searchKernel.SetArg(0, d.searchBuffers[searchBufIndex])
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clSetKernelArg : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
err = d.searchKernel.SetArg(3, nonce)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clSetKernelArg : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
// execute kernel
|
||||
_, err := d.queue.EnqueueNDRangeKernel(
|
||||
d.searchKernel,
|
||||
[]int{0},
|
||||
[]int{globalWorkSize},
|
||||
[]int{d.workGroupSize},
|
||||
nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clEnqueueNDRangeKernel : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
pending = append(pending, pendingSearch{bufIndex: searchBufIndex, startNonce: nonce})
|
||||
searchBufIndex = (searchBufIndex + 1) % searchBufSize
|
||||
|
||||
if len(pending) == searchBufSize {
|
||||
p = &(pending[searchBufIndex])
|
||||
cres, _, err := d.queue.EnqueueMapBuffer(d.searchBuffers[p.bufIndex], true,
|
||||
cl.MapFlagRead, 0, (1+maxSearchResults)*SIZEOF_UINT32,
|
||||
nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clEnqueueMapBuffer: ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
results := cres.ByteSlice()
|
||||
nfound := binary.LittleEndian.Uint32(results)
|
||||
nfound = uint32(math.Min(float64(nfound), float64(maxSearchResults)))
|
||||
// OpenCL returns the offsets from the start nonce
|
||||
for i := uint32(0); i < nfound; i++ {
|
||||
lo := (i + 1) * SIZEOF_UINT32
|
||||
hi := (i + 2) * SIZEOF_UINT32
|
||||
upperNonce := uint64(binary.LittleEndian.Uint32(results[lo:hi]))
|
||||
checkNonce = p.startNonce + upperNonce
|
||||
if checkNonce != 0 {
|
||||
// We verify that the nonce is indeed a solution by
|
||||
// executing the Ethash verification function (on the CPU).
|
||||
cache := c.ethash.Light.getCache(block.NumberU64())
|
||||
ok, mixDigest, result := cache.compute(c.dagSize, headerHash, checkNonce)
|
||||
|
||||
// TODO: return result first
|
||||
if ok && result.Big().Cmp(target256) <= 0 {
|
||||
_, err = d.queue.EnqueueUnmapMemObject(d.searchBuffers[p.bufIndex], cres, nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clEnqueueUnmapMemObject: ", err)
|
||||
}
|
||||
if d.openCL12 {
|
||||
err = cl.WaitForEvents([]*cl.Event{preReturnEvent})
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search WaitForEvents: ", err)
|
||||
}
|
||||
}
|
||||
return checkNonce, mixDigest.Bytes()
|
||||
}
|
||||
_, err := d.queue.EnqueueWriteBuffer(d.searchBuffers[p.bufIndex], false, 0, 4, unsafe.Pointer(&zero), nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search cl: EnqueueWriteBuffer", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
}
|
||||
}
|
||||
_, err = d.queue.EnqueueUnmapMemObject(d.searchBuffers[p.bufIndex], cres, nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clEnqueueUnMapMemObject: ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
pending = append(pending[:searchBufIndex], pending[searchBufIndex+1:]...)
|
||||
}
|
||||
}
|
||||
if d.openCL12 {
|
||||
err := cl.WaitForEvents([]*cl.Event{preReturnEvent})
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clWaitForEvents: ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
}
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
func (c *OpenCLMiner) Verify(block pow.Block) bool {
|
||||
return c.ethash.Light.Verify(block)
|
||||
}
|
||||
func (c *OpenCLMiner) GetHashrate() int64 {
|
||||
return int64(atomic.LoadInt32(&c.hashRate))
|
||||
}
|
||||
func (c *OpenCLMiner) Turbo(on bool) {
|
||||
// This is GPU mining. Always be turbo.
|
||||
}
|
||||
|
||||
func replaceWords(text string, kvs map[string]string) string {
|
||||
for k, v := range kvs {
|
||||
text = strings.Replace(text, k, v, -1)
|
||||
}
|
||||
return text
|
||||
}
|
||||
|
||||
func logErr(err error) {
|
||||
if err != nil {
|
||||
fmt.Println("Error in OpenCL call:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func argErr(err error) error {
|
||||
return fmt.Errorf("arg err: %v", err)
|
||||
}
|
|
@ -1,600 +0,0 @@
|
|||
package ethash
|
||||
|
||||
/* DO NOT EDIT!!!
|
||||
|
||||
This code is version controlled at
|
||||
https://github.com/ethereum/cpp-ethereum/blob/develop/libethash-cl/ethash_cl_miner_kernel.cl
|
||||
|
||||
If needed change it there first, then copy over here.
|
||||
*/
|
||||
|
||||
const kernel = `
|
||||
// author Tim Hughes <tim@twistedfury.com>
|
||||
// Tested on Radeon HD 7850
|
||||
// Hashrate: 15940347 hashes/s
|
||||
// Bandwidth: 124533 MB/s
|
||||
// search kernel should fit in <= 84 VGPRS (3 wavefronts)
|
||||
|
||||
#define THREADS_PER_HASH (128 / 16)
|
||||
#define HASHES_PER_LOOP (GROUP_SIZE / THREADS_PER_HASH)
|
||||
|
||||
#define FNV_PRIME 0x01000193
|
||||
|
||||
__constant uint2 const Keccak_f1600_RC[24] = {
|
||||
(uint2)(0x00000001, 0x00000000),
|
||||
(uint2)(0x00008082, 0x00000000),
|
||||
(uint2)(0x0000808a, 0x80000000),
|
||||
(uint2)(0x80008000, 0x80000000),
|
||||
(uint2)(0x0000808b, 0x00000000),
|
||||
(uint2)(0x80000001, 0x00000000),
|
||||
(uint2)(0x80008081, 0x80000000),
|
||||
(uint2)(0x00008009, 0x80000000),
|
||||
(uint2)(0x0000008a, 0x00000000),
|
||||
(uint2)(0x00000088, 0x00000000),
|
||||
(uint2)(0x80008009, 0x00000000),
|
||||
(uint2)(0x8000000a, 0x00000000),
|
||||
(uint2)(0x8000808b, 0x00000000),
|
||||
(uint2)(0x0000008b, 0x80000000),
|
||||
(uint2)(0x00008089, 0x80000000),
|
||||
(uint2)(0x00008003, 0x80000000),
|
||||
(uint2)(0x00008002, 0x80000000),
|
||||
(uint2)(0x00000080, 0x80000000),
|
||||
(uint2)(0x0000800a, 0x00000000),
|
||||
(uint2)(0x8000000a, 0x80000000),
|
||||
(uint2)(0x80008081, 0x80000000),
|
||||
(uint2)(0x00008080, 0x80000000),
|
||||
(uint2)(0x80000001, 0x00000000),
|
||||
(uint2)(0x80008008, 0x80000000),
|
||||
};
|
||||
|
||||
void keccak_f1600_round(uint2* a, uint r, uint out_size)
|
||||
{
|
||||
#if !__ENDIAN_LITTLE__
|
||||
for (uint i = 0; i != 25; ++i)
|
||||
a[i] = a[i].yx;
|
||||
#endif
|
||||
|
||||
uint2 b[25];
|
||||
uint2 t;
|
||||
|
||||
// Theta
|
||||
b[0] = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20];
|
||||
b[1] = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21];
|
||||
b[2] = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22];
|
||||
b[3] = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23];
|
||||
b[4] = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24];
|
||||
t = b[4] ^ (uint2)(b[1].x << 1 | b[1].y >> 31, b[1].y << 1 | b[1].x >> 31);
|
||||
a[0] ^= t;
|
||||
a[5] ^= t;
|
||||
a[10] ^= t;
|
||||
a[15] ^= t;
|
||||
a[20] ^= t;
|
||||
t = b[0] ^ (uint2)(b[2].x << 1 | b[2].y >> 31, b[2].y << 1 | b[2].x >> 31);
|
||||
a[1] ^= t;
|
||||
a[6] ^= t;
|
||||
a[11] ^= t;
|
||||
a[16] ^= t;
|
||||
a[21] ^= t;
|
||||
t = b[1] ^ (uint2)(b[3].x << 1 | b[3].y >> 31, b[3].y << 1 | b[3].x >> 31);
|
||||
a[2] ^= t;
|
||||
a[7] ^= t;
|
||||
a[12] ^= t;
|
||||
a[17] ^= t;
|
||||
a[22] ^= t;
|
||||
t = b[2] ^ (uint2)(b[4].x << 1 | b[4].y >> 31, b[4].y << 1 | b[4].x >> 31);
|
||||
a[3] ^= t;
|
||||
a[8] ^= t;
|
||||
a[13] ^= t;
|
||||
a[18] ^= t;
|
||||
a[23] ^= t;
|
||||
t = b[3] ^ (uint2)(b[0].x << 1 | b[0].y >> 31, b[0].y << 1 | b[0].x >> 31);
|
||||
a[4] ^= t;
|
||||
a[9] ^= t;
|
||||
a[14] ^= t;
|
||||
a[19] ^= t;
|
||||
a[24] ^= t;
|
||||
|
||||
// Rho Pi
|
||||
b[0] = a[0];
|
||||
b[10] = (uint2)(a[1].x << 1 | a[1].y >> 31, a[1].y << 1 | a[1].x >> 31);
|
||||
b[7] = (uint2)(a[10].x << 3 | a[10].y >> 29, a[10].y << 3 | a[10].x >> 29);
|
||||
b[11] = (uint2)(a[7].x << 6 | a[7].y >> 26, a[7].y << 6 | a[7].x >> 26);
|
||||
b[17] = (uint2)(a[11].x << 10 | a[11].y >> 22, a[11].y << 10 | a[11].x >> 22);
|
||||
b[18] = (uint2)(a[17].x << 15 | a[17].y >> 17, a[17].y << 15 | a[17].x >> 17);
|
||||
b[3] = (uint2)(a[18].x << 21 | a[18].y >> 11, a[18].y << 21 | a[18].x >> 11);
|
||||
b[5] = (uint2)(a[3].x << 28 | a[3].y >> 4, a[3].y << 28 | a[3].x >> 4);
|
||||
b[16] = (uint2)(a[5].y << 4 | a[5].x >> 28, a[5].x << 4 | a[5].y >> 28);
|
||||
b[8] = (uint2)(a[16].y << 13 | a[16].x >> 19, a[16].x << 13 | a[16].y >> 19);
|
||||
b[21] = (uint2)(a[8].y << 23 | a[8].x >> 9, a[8].x << 23 | a[8].y >> 9);
|
||||
b[24] = (uint2)(a[21].x << 2 | a[21].y >> 30, a[21].y << 2 | a[21].x >> 30);
|
||||
b[4] = (uint2)(a[24].x << 14 | a[24].y >> 18, a[24].y << 14 | a[24].x >> 18);
|
||||
b[15] = (uint2)(a[4].x << 27 | a[4].y >> 5, a[4].y << 27 | a[4].x >> 5);
|
||||
b[23] = (uint2)(a[15].y << 9 | a[15].x >> 23, a[15].x << 9 | a[15].y >> 23);
|
||||
b[19] = (uint2)(a[23].y << 24 | a[23].x >> 8, a[23].x << 24 | a[23].y >> 8);
|
||||
b[13] = (uint2)(a[19].x << 8 | a[19].y >> 24, a[19].y << 8 | a[19].x >> 24);
|
||||
b[12] = (uint2)(a[13].x << 25 | a[13].y >> 7, a[13].y << 25 | a[13].x >> 7);
|
||||
b[2] = (uint2)(a[12].y << 11 | a[12].x >> 21, a[12].x << 11 | a[12].y >> 21);
|
||||
b[20] = (uint2)(a[2].y << 30 | a[2].x >> 2, a[2].x << 30 | a[2].y >> 2);
|
||||
b[14] = (uint2)(a[20].x << 18 | a[20].y >> 14, a[20].y << 18 | a[20].x >> 14);
|
||||
b[22] = (uint2)(a[14].y << 7 | a[14].x >> 25, a[14].x << 7 | a[14].y >> 25);
|
||||
b[9] = (uint2)(a[22].y << 29 | a[22].x >> 3, a[22].x << 29 | a[22].y >> 3);
|
||||
b[6] = (uint2)(a[9].x << 20 | a[9].y >> 12, a[9].y << 20 | a[9].x >> 12);
|
||||
b[1] = (uint2)(a[6].y << 12 | a[6].x >> 20, a[6].x << 12 | a[6].y >> 20);
|
||||
|
||||
// Chi
|
||||
a[0] = bitselect(b[0] ^ b[2], b[0], b[1]);
|
||||
a[1] = bitselect(b[1] ^ b[3], b[1], b[2]);
|
||||
a[2] = bitselect(b[2] ^ b[4], b[2], b[3]);
|
||||
a[3] = bitselect(b[3] ^ b[0], b[3], b[4]);
|
||||
if (out_size >= 4)
|
||||
{
|
||||
a[4] = bitselect(b[4] ^ b[1], b[4], b[0]);
|
||||
a[5] = bitselect(b[5] ^ b[7], b[5], b[6]);
|
||||
a[6] = bitselect(b[6] ^ b[8], b[6], b[7]);
|
||||
a[7] = bitselect(b[7] ^ b[9], b[7], b[8]);
|
||||
a[8] = bitselect(b[8] ^ b[5], b[8], b[9]);
|
||||
if (out_size >= 8)
|
||||
{
|
||||
a[9] = bitselect(b[9] ^ b[6], b[9], b[5]);
|
||||
a[10] = bitselect(b[10] ^ b[12], b[10], b[11]);
|
||||
a[11] = bitselect(b[11] ^ b[13], b[11], b[12]);
|
||||
a[12] = bitselect(b[12] ^ b[14], b[12], b[13]);
|
||||
a[13] = bitselect(b[13] ^ b[10], b[13], b[14]);
|
||||
a[14] = bitselect(b[14] ^ b[11], b[14], b[10]);
|
||||
a[15] = bitselect(b[15] ^ b[17], b[15], b[16]);
|
||||
a[16] = bitselect(b[16] ^ b[18], b[16], b[17]);
|
||||
a[17] = bitselect(b[17] ^ b[19], b[17], b[18]);
|
||||
a[18] = bitselect(b[18] ^ b[15], b[18], b[19]);
|
||||
a[19] = bitselect(b[19] ^ b[16], b[19], b[15]);
|
||||
a[20] = bitselect(b[20] ^ b[22], b[20], b[21]);
|
||||
a[21] = bitselect(b[21] ^ b[23], b[21], b[22]);
|
||||
a[22] = bitselect(b[22] ^ b[24], b[22], b[23]);
|
||||
a[23] = bitselect(b[23] ^ b[20], b[23], b[24]);
|
||||
a[24] = bitselect(b[24] ^ b[21], b[24], b[20]);
|
||||
}
|
||||
}
|
||||
|
||||
// Iota
|
||||
a[0] ^= Keccak_f1600_RC[r];
|
||||
|
||||
#if !__ENDIAN_LITTLE__
|
||||
for (uint i = 0; i != 25; ++i)
|
||||
a[i] = a[i].yx;
|
||||
#endif
|
||||
}
|
||||
|
||||
void keccak_f1600_no_absorb(ulong* a, uint in_size, uint out_size, uint isolate)
|
||||
{
|
||||
for (uint i = in_size; i != 25; ++i)
|
||||
{
|
||||
a[i] = 0;
|
||||
}
|
||||
#if __ENDIAN_LITTLE__
|
||||
a[in_size] ^= 0x0000000000000001;
|
||||
a[24-out_size*2] ^= 0x8000000000000000;
|
||||
#else
|
||||
a[in_size] ^= 0x0100000000000000;
|
||||
a[24-out_size*2] ^= 0x0000000000000080;
|
||||
#endif
|
||||
|
||||
// Originally I unrolled the first and last rounds to interface
|
||||
// better with surrounding code, however I haven't done this
|
||||
// without causing the AMD compiler to blow up the VGPR usage.
|
||||
uint r = 0;
|
||||
do
|
||||
{
|
||||
// This dynamic branch stops the AMD compiler unrolling the loop
|
||||
// and additionally saves about 33% of the VGPRs, enough to gain another
|
||||
// wavefront. Ideally we'd get 4 in flight, but 3 is the best I can
|
||||
// massage out of the compiler. It doesn't really seem to matter how
|
||||
// much we try and help the compiler save VGPRs because it seems to throw
|
||||
// that information away, hence the implementation of keccak here
|
||||
// doesn't bother.
|
||||
if (isolate)
|
||||
{
|
||||
keccak_f1600_round((uint2*)a, r++, 25);
|
||||
}
|
||||
}
|
||||
while (r < 23);
|
||||
|
||||
// final round optimised for digest size
|
||||
keccak_f1600_round((uint2*)a, r++, out_size);
|
||||
}
|
||||
|
||||
#define copy(dst, src, count) for (uint i = 0; i != count; ++i) { (dst)[i] = (src)[i]; }
|
||||
|
||||
#define countof(x) (sizeof(x) / sizeof(x[0]))
|
||||
|
||||
uint fnv(uint x, uint y)
|
||||
{
|
||||
return x * FNV_PRIME ^ y;
|
||||
}
|
||||
|
||||
uint4 fnv4(uint4 x, uint4 y)
|
||||
{
|
||||
return x * FNV_PRIME ^ y;
|
||||
}
|
||||
|
||||
uint fnv_reduce(uint4 v)
|
||||
{
|
||||
return fnv(fnv(fnv(v.x, v.y), v.z), v.w);
|
||||
}
|
||||
|
||||
typedef union
|
||||
{
|
||||
ulong ulongs[32 / sizeof(ulong)];
|
||||
uint uints[32 / sizeof(uint)];
|
||||
} hash32_t;
|
||||
|
||||
typedef union
|
||||
{
|
||||
ulong ulongs[64 / sizeof(ulong)];
|
||||
uint4 uint4s[64 / sizeof(uint4)];
|
||||
} hash64_t;
|
||||
|
||||
typedef union
|
||||
{
|
||||
uint uints[128 / sizeof(uint)];
|
||||
uint4 uint4s[128 / sizeof(uint4)];
|
||||
} hash128_t;
|
||||
|
||||
hash64_t init_hash(__constant hash32_t const* header, ulong nonce, uint isolate)
|
||||
{
|
||||
hash64_t init;
|
||||
uint const init_size = countof(init.ulongs);
|
||||
uint const hash_size = countof(header->ulongs);
|
||||
|
||||
// sha3_512(header .. nonce)
|
||||
ulong state[25];
|
||||
copy(state, header->ulongs, hash_size);
|
||||
state[hash_size] = nonce;
|
||||
keccak_f1600_no_absorb(state, hash_size + 1, init_size, isolate);
|
||||
|
||||
copy(init.ulongs, state, init_size);
|
||||
return init;
|
||||
}
|
||||
|
||||
uint inner_loop_chunks(uint4 init, uint thread_id, __local uint* share, __global hash128_t const* g_dag, __global hash128_t const* g_dag1, __global hash128_t const* g_dag2, __global hash128_t const* g_dag3, uint isolate)
|
||||
{
|
||||
uint4 mix = init;
|
||||
|
||||
// share init0
|
||||
if (thread_id == 0)
|
||||
*share = mix.x;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
uint init0 = *share;
|
||||
|
||||
uint a = 0;
|
||||
do
|
||||
{
|
||||
bool update_share = thread_id == (a/4) % THREADS_PER_HASH;
|
||||
|
||||
#pragma unroll
|
||||
for (uint i = 0; i != 4; ++i)
|
||||
{
|
||||
if (update_share)
|
||||
{
|
||||
uint m[4] = { mix.x, mix.y, mix.z, mix.w };
|
||||
*share = fnv(init0 ^ (a+i), m[i]) % DAG_SIZE;
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
mix = fnv4(mix, *share>=3 * DAG_SIZE / 4 ? g_dag3[*share - 3 * DAG_SIZE / 4].uint4s[thread_id] : *share>=DAG_SIZE / 2 ? g_dag2[*share - DAG_SIZE / 2].uint4s[thread_id] : *share>=DAG_SIZE / 4 ? g_dag1[*share - DAG_SIZE / 4].uint4s[thread_id]:g_dag[*share].uint4s[thread_id]);
|
||||
}
|
||||
} while ((a += 4) != (ACCESSES & isolate));
|
||||
|
||||
return fnv_reduce(mix);
|
||||
}
|
||||
|
||||
|
||||
|
||||
uint inner_loop(uint4 init, uint thread_id, __local uint* share, __global hash128_t const* g_dag, uint isolate)
|
||||
{
|
||||
uint4 mix = init;
|
||||
|
||||
// share init0
|
||||
if (thread_id == 0)
|
||||
*share = mix.x;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
uint init0 = *share;
|
||||
|
||||
uint a = 0;
|
||||
do
|
||||
{
|
||||
bool update_share = thread_id == (a/4) % THREADS_PER_HASH;
|
||||
|
||||
#pragma unroll
|
||||
for (uint i = 0; i != 4; ++i)
|
||||
{
|
||||
if (update_share)
|
||||
{
|
||||
uint m[4] = { mix.x, mix.y, mix.z, mix.w };
|
||||
*share = fnv(init0 ^ (a+i), m[i]) % DAG_SIZE;
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
mix = fnv4(mix, g_dag[*share].uint4s[thread_id]);
|
||||
}
|
||||
}
|
||||
while ((a += 4) != (ACCESSES & isolate));
|
||||
|
||||
return fnv_reduce(mix);
|
||||
}
|
||||
|
||||
|
||||
hash32_t final_hash(hash64_t const* init, hash32_t const* mix, uint isolate)
|
||||
{
|
||||
ulong state[25];
|
||||
|
||||
hash32_t hash;
|
||||
uint const hash_size = countof(hash.ulongs);
|
||||
uint const init_size = countof(init->ulongs);
|
||||
uint const mix_size = countof(mix->ulongs);
|
||||
|
||||
// keccak_256(keccak_512(header..nonce) .. mix);
|
||||
copy(state, init->ulongs, init_size);
|
||||
copy(state + init_size, mix->ulongs, mix_size);
|
||||
keccak_f1600_no_absorb(state, init_size+mix_size, hash_size, isolate);
|
||||
|
||||
// copy out
|
||||
copy(hash.ulongs, state, hash_size);
|
||||
return hash;
|
||||
}
|
||||
|
||||
hash32_t compute_hash_simple(
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
ulong nonce,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
hash64_t init = init_hash(g_header, nonce, isolate);
|
||||
|
||||
hash128_t mix;
|
||||
for (uint i = 0; i != countof(mix.uint4s); ++i)
|
||||
{
|
||||
mix.uint4s[i] = init.uint4s[i % countof(init.uint4s)];
|
||||
}
|
||||
|
||||
uint mix_val = mix.uints[0];
|
||||
uint init0 = mix.uints[0];
|
||||
uint a = 0;
|
||||
do
|
||||
{
|
||||
uint pi = fnv(init0 ^ a, mix_val) % DAG_SIZE;
|
||||
uint n = (a+1) % countof(mix.uints);
|
||||
|
||||
#pragma unroll
|
||||
for (uint i = 0; i != countof(mix.uints); ++i)
|
||||
{
|
||||
mix.uints[i] = fnv(mix.uints[i], g_dag[pi].uints[i]);
|
||||
mix_val = i == n ? mix.uints[i] : mix_val;
|
||||
}
|
||||
}
|
||||
while (++a != (ACCESSES & isolate));
|
||||
|
||||
// reduce to output
|
||||
hash32_t fnv_mix;
|
||||
for (uint i = 0; i != countof(fnv_mix.uints); ++i)
|
||||
{
|
||||
fnv_mix.uints[i] = fnv_reduce(mix.uint4s[i]);
|
||||
}
|
||||
|
||||
return final_hash(&init, &fnv_mix, isolate);
|
||||
}
|
||||
|
||||
typedef union
|
||||
{
|
||||
struct
|
||||
{
|
||||
hash64_t init;
|
||||
uint pad; // avoid lds bank conflicts
|
||||
};
|
||||
hash32_t mix;
|
||||
} compute_hash_share;
|
||||
|
||||
|
||||
hash32_t compute_hash(
|
||||
__local compute_hash_share* share,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
ulong nonce,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
uint const gid = get_global_id(0);
|
||||
|
||||
// Compute one init hash per work item.
|
||||
hash64_t init = init_hash(g_header, nonce, isolate);
|
||||
|
||||
// Threads work together in this phase in groups of 8.
|
||||
uint const thread_id = gid % THREADS_PER_HASH;
|
||||
uint const hash_id = (gid % GROUP_SIZE) / THREADS_PER_HASH;
|
||||
|
||||
hash32_t mix;
|
||||
uint i = 0;
|
||||
do
|
||||
{
|
||||
// share init with other threads
|
||||
if (i == thread_id)
|
||||
share[hash_id].init = init;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
uint4 thread_init = share[hash_id].init.uint4s[thread_id % (64 / sizeof(uint4))];
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
uint thread_mix = inner_loop(thread_init, thread_id, share[hash_id].mix.uints, g_dag, isolate);
|
||||
|
||||
share[hash_id].mix.uints[thread_id] = thread_mix;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
if (i == thread_id)
|
||||
mix = share[hash_id].mix;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
while (++i != (THREADS_PER_HASH & isolate));
|
||||
|
||||
return final_hash(&init, &mix, isolate);
|
||||
}
|
||||
|
||||
|
||||
hash32_t compute_hash_chunks(
|
||||
__local compute_hash_share* share,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
__global hash128_t const* g_dag1,
|
||||
__global hash128_t const* g_dag2,
|
||||
__global hash128_t const* g_dag3,
|
||||
ulong nonce,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
uint const gid = get_global_id(0);
|
||||
|
||||
// Compute one init hash per work item.
|
||||
hash64_t init = init_hash(g_header, nonce, isolate);
|
||||
|
||||
// Threads work together in this phase in groups of 8.
|
||||
uint const thread_id = gid % THREADS_PER_HASH;
|
||||
uint const hash_id = (gid % GROUP_SIZE) / THREADS_PER_HASH;
|
||||
|
||||
hash32_t mix;
|
||||
uint i = 0;
|
||||
do
|
||||
{
|
||||
// share init with other threads
|
||||
if (i == thread_id)
|
||||
share[hash_id].init = init;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
uint4 thread_init = share[hash_id].init.uint4s[thread_id % (64 / sizeof(uint4))];
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
uint thread_mix = inner_loop_chunks(thread_init, thread_id, share[hash_id].mix.uints, g_dag, g_dag1, g_dag2, g_dag3, isolate);
|
||||
|
||||
share[hash_id].mix.uints[thread_id] = thread_mix;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
if (i == thread_id)
|
||||
mix = share[hash_id].mix;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
while (++i != (THREADS_PER_HASH & isolate));
|
||||
|
||||
return final_hash(&init, &mix, isolate);
|
||||
}
|
||||
|
||||
__attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
__kernel void ethash_hash_simple(
|
||||
__global hash32_t* g_hashes,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
ulong start_nonce,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
uint const gid = get_global_id(0);
|
||||
g_hashes[gid] = compute_hash_simple(g_header, g_dag, start_nonce + gid, isolate);
|
||||
}
|
||||
|
||||
__attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
__kernel void ethash_search_simple(
|
||||
__global volatile uint* restrict g_output,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
ulong start_nonce,
|
||||
ulong target,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
uint const gid = get_global_id(0);
|
||||
hash32_t hash = compute_hash_simple(g_header, g_dag, start_nonce + gid, isolate);
|
||||
|
||||
if (hash.ulongs[countof(hash.ulongs)-1] < target)
|
||||
{
|
||||
uint slot = min(convert_uint(MAX_OUTPUTS), convert_uint(atomic_inc(&g_output[0]) + 1));
|
||||
g_output[slot] = gid;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
__attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
__kernel void ethash_hash(
|
||||
__global hash32_t* g_hashes,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
ulong start_nonce,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
__local compute_hash_share share[HASHES_PER_LOOP];
|
||||
|
||||
uint const gid = get_global_id(0);
|
||||
g_hashes[gid] = compute_hash(share, g_header, g_dag, start_nonce + gid, isolate);
|
||||
}
|
||||
|
||||
__attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
__kernel void ethash_search(
|
||||
__global volatile uint* restrict g_output,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
ulong start_nonce,
|
||||
ulong target,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
__local compute_hash_share share[HASHES_PER_LOOP];
|
||||
|
||||
uint const gid = get_global_id(0);
|
||||
hash32_t hash = compute_hash(share, g_header, g_dag, start_nonce + gid, isolate);
|
||||
|
||||
if (as_ulong(as_uchar8(hash.ulongs[0]).s76543210) < target)
|
||||
{
|
||||
uint slot = min((uint)MAX_OUTPUTS, atomic_inc(&g_output[0]) + 1);
|
||||
g_output[slot] = gid;
|
||||
}
|
||||
}
|
||||
|
||||
__attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
__kernel void ethash_hash_chunks(
|
||||
__global hash32_t* g_hashes,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
__global hash128_t const* g_dag1,
|
||||
__global hash128_t const* g_dag2,
|
||||
__global hash128_t const* g_dag3,
|
||||
ulong start_nonce,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
__local compute_hash_share share[HASHES_PER_LOOP];
|
||||
|
||||
uint const gid = get_global_id(0);
|
||||
g_hashes[gid] = compute_hash_chunks(share, g_header, g_dag, g_dag1, g_dag2, g_dag3,start_nonce + gid, isolate);
|
||||
}
|
||||
|
||||
__attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
__kernel void ethash_search_chunks(
|
||||
__global volatile uint* restrict g_output,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
__global hash128_t const* g_dag1,
|
||||
__global hash128_t const* g_dag2,
|
||||
__global hash128_t const* g_dag3,
|
||||
ulong start_nonce,
|
||||
ulong target,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
__local compute_hash_share share[HASHES_PER_LOOP];
|
||||
|
||||
uint const gid = get_global_id(0);
|
||||
hash32_t hash = compute_hash_chunks(share, g_header, g_dag, g_dag1, g_dag2, g_dag3, start_nonce + gid, isolate);
|
||||
|
||||
if (as_ulong(as_uchar8(hash.ulongs[0]).s76543210) < target)
|
||||
{
|
||||
uint slot = min(convert_uint(MAX_OUTPUTS), convert_uint(atomic_inc(&g_output[0]) + 1));
|
||||
g_output[slot] = gid;
|
||||
}
|
||||
}
|
||||
`
|
|
@ -1,51 +0,0 @@
|
|||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ethash
|
||||
|
||||
/*
|
||||
-mno-stack-arg-probe disables stack probing which avoids the function
|
||||
__chkstk_ms being linked. this avoids a clash of this symbol as we also
|
||||
separately link the secp256k1 lib which ends up defining this symbol
|
||||
|
||||
1. https://gcc.gnu.org/onlinedocs/gccint/Stack-Checking.html
|
||||
2. https://groups.google.com/forum/#!msg/golang-dev/v1bziURSQ4k/88fXuJ24e-gJ
|
||||
3. https://groups.google.com/forum/#!topic/golang-nuts/VNP6Mwz_B6o
|
||||
|
||||
*/
|
||||
|
||||
/*
|
||||
#cgo CFLAGS: -std=gnu99 -Wall
|
||||
#cgo windows CFLAGS: -mno-stack-arg-probe
|
||||
#cgo LDFLAGS: -lm
|
||||
|
||||
#include "src/libethash/internal.c"
|
||||
#include "src/libethash/sha3.c"
|
||||
#include "src/libethash/io.c"
|
||||
|
||||
#ifdef _WIN32
|
||||
# include "src/libethash/io_win32.c"
|
||||
# include "src/libethash/mmap_win32.c"
|
||||
#else
|
||||
# include "src/libethash/io_posix.c"
|
||||
#endif
|
||||
|
||||
// 'gateway function' for calling back into go.
|
||||
extern int ethashGoCallback(unsigned);
|
||||
int ethashGoCallback_cgo(unsigned percent) { return ethashGoCallback(percent); }
|
||||
|
||||
*/
|
||||
import "C"
|
|
@ -1,47 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
import os
|
||||
from distutils.core import setup, Extension
|
||||
sources = [
|
||||
'src/python/core.c',
|
||||
'src/libethash/io.c',
|
||||
'src/libethash/internal.c',
|
||||
'src/libethash/sha3.c']
|
||||
if os.name == 'nt':
|
||||
sources += [
|
||||
'src/libethash/util_win32.c',
|
||||
'src/libethash/io_win32.c',
|
||||
'src/libethash/mmap_win32.c',
|
||||
]
|
||||
else:
|
||||
sources += [
|
||||
'src/libethash/io_posix.c'
|
||||
]
|
||||
depends = [
|
||||
'src/libethash/ethash.h',
|
||||
'src/libethash/compiler.h',
|
||||
'src/libethash/data_sizes.h',
|
||||
'src/libethash/endian.h',
|
||||
'src/libethash/ethash.h',
|
||||
'src/libethash/io.h',
|
||||
'src/libethash/fnv.h',
|
||||
'src/libethash/internal.h',
|
||||
'src/libethash/sha3.h',
|
||||
'src/libethash/util.h',
|
||||
]
|
||||
pyethash = Extension('pyethash',
|
||||
sources=sources,
|
||||
depends=depends,
|
||||
extra_compile_args=["-Isrc/", "-std=gnu99", "-Wall"])
|
||||
|
||||
setup(
|
||||
name='pyethash',
|
||||
author="Matthew Wampler-Doty",
|
||||
author_email="matthew.wampler.doty@gmail.com",
|
||||
license='GPL',
|
||||
version='0.1.23',
|
||||
url='https://github.com/ethereum/ethash',
|
||||
download_url='https://github.com/ethereum/ethash/tarball/v23',
|
||||
description=('Python wrappers for ethash, the ethereum proof of work'
|
||||
'hashing function'),
|
||||
ext_modules=[pyethash],
|
||||
)
|
|
@ -1,58 +0,0 @@
|
|||
include_directories(..)
|
||||
|
||||
set(CMAKE_BUILD_TYPE Release)
|
||||
|
||||
if (MSVC)
|
||||
add_definitions("/openmp")
|
||||
endif()
|
||||
|
||||
# enable C++11, should probably be a bit more specific about compiler
|
||||
if (NOT MSVC)
|
||||
SET(CMAKE_CXX_FLAGS "-std=c++11")
|
||||
endif()
|
||||
|
||||
if (NOT MPI_FOUND)
|
||||
find_package(MPI)
|
||||
endif()
|
||||
|
||||
if (NOT CRYPTOPP_FOUND)
|
||||
find_package(CryptoPP 5.6.2)
|
||||
endif()
|
||||
|
||||
if (CRYPTOPP_FOUND)
|
||||
add_definitions(-DWITH_CRYPTOPP)
|
||||
find_package (Threads REQUIRED)
|
||||
endif()
|
||||
|
||||
if (NOT OpenCL_FOUND)
|
||||
find_package(OpenCL)
|
||||
endif()
|
||||
if (OpenCL_FOUND)
|
||||
add_definitions(-DWITH_OPENCL)
|
||||
include_directories(${OpenCL_INCLUDE_DIRS})
|
||||
list(APPEND FILES ethash_cl_miner.cpp ethash_cl_miner.h)
|
||||
endif()
|
||||
|
||||
if (MPI_FOUND)
|
||||
include_directories(${MPI_INCLUDE_PATH})
|
||||
add_executable (Benchmark_MPI_FULL benchmark.cpp)
|
||||
target_link_libraries (Benchmark_MPI_FULL ${ETHHASH_LIBS} ${MPI_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
|
||||
SET_TARGET_PROPERTIES(Benchmark_MPI_FULL PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS} ${MPI_COMPILE_FLAGS} -DFULL -DMPI")
|
||||
|
||||
add_executable (Benchmark_MPI_LIGHT benchmark.cpp)
|
||||
target_link_libraries (Benchmark_MPI_LIGHT ${ETHHASH_LIBS} ${MPI_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
|
||||
SET_TARGET_PROPERTIES(Benchmark_MPI_LIGHT PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS} ${MPI_COMPILE_FLAGS} -DMPI")
|
||||
endif()
|
||||
|
||||
add_executable (Benchmark_FULL benchmark.cpp)
|
||||
target_link_libraries (Benchmark_FULL ${ETHHASH_LIBS} ${CMAKE_THREAD_LIBS_INIT})
|
||||
SET_TARGET_PROPERTIES(Benchmark_FULL PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS} -DFULL")
|
||||
|
||||
add_executable (Benchmark_LIGHT benchmark.cpp)
|
||||
target_link_libraries (Benchmark_LIGHT ${ETHHASH_LIBS} ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
||||
if (OpenCL_FOUND)
|
||||
add_executable (Benchmark_CL benchmark.cpp)
|
||||
target_link_libraries (Benchmark_CL ${ETHHASH_LIBS} ethash-cl ${CMAKE_THREAD_LIBS_INIT})
|
||||
SET_TARGET_PROPERTIES(Benchmark_CL PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS} -DOPENCL")
|
||||
endif()
|
|
@ -1,278 +0,0 @@
|
|||
/*
|
||||
This file is part of cpp-ethereum.
|
||||
|
||||
cpp-ethereum is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
cpp-ethereum is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
/** @file benchmark.cpp
|
||||
* @author Tim Hughes <tim@twistedfury.com>
|
||||
* @date 2015
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <chrono>
|
||||
#include <libethash/ethash.h>
|
||||
#include <libethash/util.h>
|
||||
#ifdef OPENCL
|
||||
#include <libethash-cl/ethash_cl_miner.h>
|
||||
#endif
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
|
||||
#ifdef WITH_CRYPTOPP
|
||||
#include <libethash/sha3_cryptopp.h>
|
||||
#include <string>
|
||||
|
||||
#else
|
||||
#include "libethash/sha3.h"
|
||||
#endif // WITH_CRYPTOPP
|
||||
|
||||
#undef min
|
||||
#undef max
|
||||
|
||||
using std::chrono::high_resolution_clock;
|
||||
|
||||
#if defined(OPENCL)
|
||||
const unsigned trials = 1024*1024*32;
|
||||
#elif defined(FULL)
|
||||
const unsigned trials = 1024*1024/8;
|
||||
#else
|
||||
const unsigned trials = 1024*1024/1024;
|
||||
#endif
|
||||
uint8_t g_hashes[1024*32];
|
||||
|
||||
static char nibbleToChar(unsigned nibble)
|
||||
{
|
||||
return (char) ((nibble >= 10 ? 'a'-10 : '0') + nibble);
|
||||
}
|
||||
|
||||
static uint8_t charToNibble(char chr)
|
||||
{
|
||||
if (chr >= '0' && chr <= '9')
|
||||
{
|
||||
return (uint8_t) (chr - '0');
|
||||
}
|
||||
if (chr >= 'a' && chr <= 'z')
|
||||
{
|
||||
return (uint8_t) (chr - 'a' + 10);
|
||||
}
|
||||
if (chr >= 'A' && chr <= 'Z')
|
||||
{
|
||||
return (uint8_t) (chr - 'A' + 10);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static std::vector<uint8_t> hexStringToBytes(char const* str)
|
||||
{
|
||||
std::vector<uint8_t> bytes(strlen(str) >> 1);
|
||||
for (unsigned i = 0; i != bytes.size(); ++i)
|
||||
{
|
||||
bytes[i] = charToNibble(str[i*2 | 0]) << 4;
|
||||
bytes[i] |= charToNibble(str[i*2 | 1]);
|
||||
}
|
||||
return bytes;
|
||||
}
|
||||
|
||||
static std::string bytesToHexString(uint8_t const* bytes, unsigned size)
|
||||
{
|
||||
std::string str;
|
||||
for (unsigned i = 0; i != size; ++i)
|
||||
{
|
||||
str += nibbleToChar(bytes[i] >> 4);
|
||||
str += nibbleToChar(bytes[i] & 0xf);
|
||||
}
|
||||
return str;
|
||||
}
|
||||
|
||||
static std::string bytesToHexString(ethash_h256_t const *hash, unsigned size)
|
||||
{
|
||||
return bytesToHexString((uint8_t*)hash, size);
|
||||
}
|
||||
|
||||
extern "C" int main(void)
|
||||
{
|
||||
// params for ethash
|
||||
ethash_params params;
|
||||
ethash_params_init(¶ms, 0);
|
||||
//params.full_size = 262147 * 4096; // 1GBish;
|
||||
//params.full_size = 32771 * 4096; // 128MBish;
|
||||
//params.full_size = 8209 * 4096; // 8MBish;
|
||||
//params.cache_size = 8209*4096;
|
||||
//params.cache_size = 2053*4096;
|
||||
ethash_h256_t seed;
|
||||
ethash_h256_t previous_hash;
|
||||
|
||||
memcpy(&seed, hexStringToBytes("9410b944535a83d9adf6bbdcc80e051f30676173c16ca0d32d6f1263fc246466").data(), 32);
|
||||
memcpy(&previous_hash, hexStringToBytes("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").data(), 32);
|
||||
|
||||
// allocate page aligned buffer for dataset
|
||||
#ifdef FULL
|
||||
void* full_mem_buf = malloc(params.full_size + 4095);
|
||||
void* full_mem = (void*)((uintptr_t(full_mem_buf) + 4095) & ~4095);
|
||||
#endif
|
||||
void* cache_mem_buf = malloc(params.cache_size + 63);
|
||||
void* cache_mem = (void*)((uintptr_t(cache_mem_buf) + 63) & ~63);
|
||||
|
||||
ethash_cache cache;
|
||||
cache.mem = cache_mem;
|
||||
|
||||
// compute cache or full data
|
||||
{
|
||||
auto startTime = high_resolution_clock::now();
|
||||
ethash_mkcache(&cache, ¶ms, &seed);
|
||||
auto time = std::chrono::duration_cast<std::chrono::milliseconds>(high_resolution_clock::now() - startTime).count();
|
||||
|
||||
ethash_h256_t cache_hash;
|
||||
SHA3_256(&cache_hash, (uint8_t const*)cache_mem, params.cache_size);
|
||||
debugf("ethash_mkcache: %ums, sha3: %s\n", (unsigned)((time*1000)/CLOCKS_PER_SEC), bytesToHexString(&cache_hash, sizeof(cache_hash)).data());
|
||||
|
||||
// print a couple of test hashes
|
||||
{
|
||||
auto startTime = high_resolution_clock::now();
|
||||
ethash_return_value hash;
|
||||
ethash_light(&hash, &cache, ¶ms, &previous_hash, 0);
|
||||
auto time = std::chrono::duration_cast<std::chrono::milliseconds>(high_resolution_clock::now() - startTime).count();
|
||||
debugf("ethash_light test: %ums, %s\n", (unsigned)time, bytesToHexString(&hash.result, 32).data());
|
||||
}
|
||||
|
||||
#ifdef FULL
|
||||
startTime = high_resolution_clock::now();
|
||||
ethash_compute_full_data(full_mem, ¶ms, &cache);
|
||||
time = std::chrono::duration_cast<std::chrono::milliseconds>(high_resolution_clock::now() - startTime).count();
|
||||
debugf("ethash_compute_full_data: %ums\n", (unsigned)time);
|
||||
#endif // FULL
|
||||
}
|
||||
|
||||
#ifdef OPENCL
|
||||
ethash_cl_miner miner;
|
||||
{
|
||||
auto startTime = high_resolution_clock::now();
|
||||
if (!miner.init(params, &seed))
|
||||
exit(-1);
|
||||
auto time = std::chrono::duration_cast<std::chrono::milliseconds>(high_resolution_clock::now() - startTime).count();
|
||||
debugf("ethash_cl_miner init: %ums\n", (unsigned)time);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef FULL
|
||||
{
|
||||
auto startTime = high_resolution_clock::now();
|
||||
ethash_return_value hash;
|
||||
ethash_full(&hash, full_mem, ¶ms, &previous_hash, 0);
|
||||
auto time = std::chrono::duration_cast<std::chrono::milliseconds>(high_resolution_clock::now() - startTime).count();
|
||||
debugf("ethash_full test: %uns\n", (unsigned)time);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef OPENCL
|
||||
// validate 1024 hashes against CPU
|
||||
miner.hash(g_hashes, (uint8_t*)&previous_hash, 0, 1024);
|
||||
for (unsigned i = 0; i != 1024; ++i)
|
||||
{
|
||||
ethash_return_value hash;
|
||||
ethash_light(&hash, &cache, ¶ms, &previous_hash, i);
|
||||
if (memcmp(&hash.result, g_hashes + 32*i, 32) != 0)
|
||||
{
|
||||
debugf("nonce %u failed: %s %s\n", i, bytesToHexString(g_hashes + 32*i, 32).c_str(), bytesToHexString(&hash.result, 32).c_str());
|
||||
static unsigned c = 0;
|
||||
if (++c == 16)
|
||||
{
|
||||
exit(-1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ensure nothing else is going on
|
||||
miner.finish();
|
||||
#endif
|
||||
|
||||
auto startTime = high_resolution_clock::now();
|
||||
unsigned hash_count = trials;
|
||||
|
||||
#ifdef OPENCL
|
||||
{
|
||||
struct search_hook : ethash_cl_miner::search_hook
|
||||
{
|
||||
unsigned hash_count;
|
||||
std::vector<uint64_t> nonce_vec;
|
||||
|
||||
virtual bool found(uint64_t const* nonces, uint32_t count)
|
||||
{
|
||||
nonce_vec.insert(nonce_vec.end(), nonces, nonces + count);
|
||||
return false;
|
||||
}
|
||||
|
||||
virtual bool searched(uint64_t start_nonce, uint32_t count)
|
||||
{
|
||||
// do nothing
|
||||
hash_count += count;
|
||||
return hash_count >= trials;
|
||||
}
|
||||
};
|
||||
search_hook hook;
|
||||
hook.hash_count = 0;
|
||||
|
||||
miner.search((uint8_t*)&previous_hash, 0x000000ffffffffff, hook);
|
||||
|
||||
for (unsigned i = 0; i != hook.nonce_vec.size(); ++i)
|
||||
{
|
||||
uint64_t nonce = hook.nonce_vec[i];
|
||||
ethash_return_value hash;
|
||||
ethash_light(&hash, &cache, ¶ms, &previous_hash, nonce);
|
||||
debugf("found: %.8x%.8x -> %s\n", unsigned(nonce>>32), unsigned(nonce), bytesToHexString(&hash.result, 32).c_str());
|
||||
}
|
||||
|
||||
hash_count = hook.hash_count;
|
||||
}
|
||||
#else
|
||||
{
|
||||
//#pragma omp parallel for
|
||||
for (int nonce = 0; nonce < trials; ++nonce)
|
||||
{
|
||||
ethash_return_value hash;
|
||||
#ifdef FULL
|
||||
ethash_full(&hash, full_mem, ¶ms, &previous_hash, nonce);
|
||||
#else
|
||||
ethash_light(&hash, &cache, ¶ms, &previous_hash, nonce);
|
||||
#endif // FULL
|
||||
}
|
||||
}
|
||||
#endif
|
||||
auto time = std::chrono::duration_cast<std::chrono::microseconds>(high_resolution_clock::now() - startTime).count();
|
||||
debugf("Search took: %ums\n", (unsigned)time/1000);
|
||||
|
||||
unsigned read_size = ETHASH_ACCESSES * ETHASH_MIX_BYTES;
|
||||
#if defined(OPENCL) || defined(FULL)
|
||||
debugf(
|
||||
"hashrate: %8.2f Mh/s, bw: %8.2f GB/s\n",
|
||||
(double)hash_count * (1000*1000)/time / (1000*1000),
|
||||
(double)hash_count*read_size * (1000*1000)/time / (1024*1024*1024)
|
||||
);
|
||||
#else
|
||||
debugf(
|
||||
"hashrate: %8.2f Kh/s, bw: %8.2f MB/s\n",
|
||||
(double)hash_count * (1000*1000)/time / (1000),
|
||||
(double)hash_count*read_size * (1000*1000)/time / (1024*1024)
|
||||
);
|
||||
#endif
|
||||
|
||||
free(cache_mem_buf);
|
||||
#ifdef FULL
|
||||
free(full_mem_buf);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -1,44 +0,0 @@
|
|||
set(LIBRARY ethash)
|
||||
|
||||
if (CPPETHEREUM)
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC")
|
||||
endif ()
|
||||
|
||||
set(CMAKE_BUILD_TYPE Release)
|
||||
|
||||
if (NOT MSVC)
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99")
|
||||
endif()
|
||||
|
||||
set(FILES util.h
|
||||
io.c
|
||||
internal.c
|
||||
ethash.h
|
||||
endian.h
|
||||
compiler.h
|
||||
fnv.h
|
||||
data_sizes.h)
|
||||
|
||||
if (MSVC)
|
||||
list(APPEND FILES util_win32.c io_win32.c mmap_win32.c)
|
||||
else()
|
||||
list(APPEND FILES io_posix.c)
|
||||
endif()
|
||||
|
||||
if (NOT CRYPTOPP_FOUND)
|
||||
find_package(CryptoPP 5.6.2)
|
||||
endif()
|
||||
|
||||
if (CRYPTOPP_FOUND)
|
||||
add_definitions(-DWITH_CRYPTOPP)
|
||||
include_directories( ${CRYPTOPP_INCLUDE_DIRS} )
|
||||
list(APPEND FILES sha3_cryptopp.cpp sha3_cryptopp.h)
|
||||
else()
|
||||
list(APPEND FILES sha3.c sha3.h)
|
||||
endif()
|
||||
|
||||
add_library(${LIBRARY} ${FILES})
|
||||
|
||||
if (CRYPTOPP_FOUND)
|
||||
TARGET_LINK_LIBRARIES(${LIBRARY} ${CRYPTOPP_LIBRARIES})
|
||||
endif()
|
|
@ -1,33 +0,0 @@
|
|||
/*
|
||||
This file is part of cpp-ethereum.
|
||||
|
||||
cpp-ethereum is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
cpp-ethereum is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
/** @file compiler.h
|
||||
* @date 2014
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
// Visual Studio doesn't support the inline keyword in C mode
|
||||
#if defined(_MSC_VER) && !defined(__cplusplus)
|
||||
#define inline __inline
|
||||
#endif
|
||||
|
||||
// pretend restrict is a standard keyword
|
||||
#if defined(_MSC_VER)
|
||||
#define restrict __restrict
|
||||
#else
|
||||
#define restrict __restrict__
|
||||
#endif
|
||||
|
|
@ -1,812 +0,0 @@
|
|||
/*
|
||||
This file is part of cpp-ethereum.
|
||||
|
||||
cpp-ethereum is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software FoundationUUU,either version 3 of the LicenseUUU,or
|
||||
(at your option) any later version.
|
||||
|
||||
cpp-ethereum is distributed in the hope that it will be usefulU,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with cpp-ethereum. If notUUU,see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
/** @file data_sizes.h
|
||||
* @author Matthew Wampler-Doty <negacthulhu@gmail.com>
|
||||
* @date 2015
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include "compiler.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
// 2048 Epochs (~20 years) worth of tabulated DAG sizes
|
||||
|
||||
// Generated with the following Mathematica Code:
|
||||
|
||||
// GetCacheSizes[n_] := Module[{
|
||||
// CacheSizeBytesInit = 2^24,
|
||||
// CacheGrowth = 2^17,
|
||||
// HashBytes = 64,
|
||||
// j = 0},
|
||||
// Reap[
|
||||
// While[j < n,
|
||||
// Module[{i =
|
||||
// Floor[(CacheSizeBytesInit + CacheGrowth * j) / HashBytes]},
|
||||
// While[! PrimeQ[i], i--];
|
||||
// Sow[i*HashBytes]; j++]]]][[2]][[1]]
|
||||
|
||||
|
||||
static const uint64_t dag_sizes[2048] = {
|
||||
1073739904U, 1082130304U, 1090514816U, 1098906752U, 1107293056U,
|
||||
1115684224U, 1124070016U, 1132461952U, 1140849536U, 1149232768U,
|
||||
1157627776U, 1166013824U, 1174404736U, 1182786944U, 1191180416U,
|
||||
1199568512U, 1207958912U, 1216345216U, 1224732032U, 1233124736U,
|
||||
1241513344U, 1249902464U, 1258290304U, 1266673792U, 1275067264U,
|
||||
1283453312U, 1291844992U, 1300234112U, 1308619904U, 1317010048U,
|
||||
1325397376U, 1333787776U, 1342176128U, 1350561664U, 1358954368U,
|
||||
1367339392U, 1375731584U, 1384118144U, 1392507008U, 1400897408U,
|
||||
1409284736U, 1417673344U, 1426062464U, 1434451072U, 1442839168U,
|
||||
1451229056U, 1459615616U, 1468006016U, 1476394112U, 1484782976U,
|
||||
1493171584U, 1501559168U, 1509948032U, 1518337664U, 1526726528U,
|
||||
1535114624U, 1543503488U, 1551892096U, 1560278656U, 1568669056U,
|
||||
1577056384U, 1585446272U, 1593831296U, 1602219392U, 1610610304U,
|
||||
1619000192U, 1627386752U, 1635773824U, 1644164224U, 1652555648U,
|
||||
1660943488U, 1669332608U, 1677721216U, 1686109312U, 1694497664U,
|
||||
1702886272U, 1711274624U, 1719661184U, 1728047744U, 1736434816U,
|
||||
1744829056U, 1753218944U, 1761606272U, 1769995904U, 1778382464U,
|
||||
1786772864U, 1795157888U, 1803550592U, 1811937664U, 1820327552U,
|
||||
1828711552U, 1837102976U, 1845488768U, 1853879936U, 1862269312U,
|
||||
1870656896U, 1879048064U, 1887431552U, 1895825024U, 1904212096U,
|
||||
1912601216U, 1920988544U, 1929379456U, 1937765504U, 1946156672U,
|
||||
1954543232U, 1962932096U, 1971321728U, 1979707264U, 1988093056U,
|
||||
1996487552U, 2004874624U, 2013262208U, 2021653888U, 2030039936U,
|
||||
2038430848U, 2046819968U, 2055208576U, 2063596672U, 2071981952U,
|
||||
2080373632U, 2088762752U, 2097149056U, 2105539712U, 2113928576U,
|
||||
2122315136U, 2130700672U, 2139092608U, 2147483264U, 2155872128U,
|
||||
2164257664U, 2172642176U, 2181035392U, 2189426048U, 2197814912U,
|
||||
2206203008U, 2214587264U, 2222979712U, 2231367808U, 2239758208U,
|
||||
2248145024U, 2256527744U, 2264922752U, 2273312128U, 2281701248U,
|
||||
2290086272U, 2298476672U, 2306867072U, 2315251072U, 2323639168U,
|
||||
2332032128U, 2340420224U, 2348808064U, 2357196416U, 2365580416U,
|
||||
2373966976U, 2382363008U, 2390748544U, 2399139968U, 2407530368U,
|
||||
2415918976U, 2424307328U, 2432695424U, 2441084288U, 2449472384U,
|
||||
2457861248U, 2466247808U, 2474637184U, 2483026816U, 2491414144U,
|
||||
2499803776U, 2508191872U, 2516582272U, 2524970368U, 2533359232U,
|
||||
2541743488U, 2550134144U, 2558525056U, 2566913408U, 2575301504U,
|
||||
2583686528U, 2592073856U, 2600467328U, 2608856192U, 2617240448U,
|
||||
2625631616U, 2634022016U, 2642407552U, 2650796416U, 2659188352U,
|
||||
2667574912U, 2675965312U, 2684352896U, 2692738688U, 2701130624U,
|
||||
2709518464U, 2717907328U, 2726293376U, 2734685056U, 2743073152U,
|
||||
2751462016U, 2759851648U, 2768232832U, 2776625536U, 2785017728U,
|
||||
2793401984U, 2801794432U, 2810182016U, 2818571648U, 2826959488U,
|
||||
2835349376U, 2843734144U, 2852121472U, 2860514432U, 2868900992U,
|
||||
2877286784U, 2885676928U, 2894069632U, 2902451584U, 2910843008U,
|
||||
2919234688U, 2927622784U, 2936011648U, 2944400768U, 2952789376U,
|
||||
2961177728U, 2969565568U, 2977951616U, 2986338944U, 2994731392U,
|
||||
3003120256U, 3011508352U, 3019895936U, 3028287104U, 3036675968U,
|
||||
3045063808U, 3053452928U, 3061837696U, 3070228352U, 3078615424U,
|
||||
3087003776U, 3095394944U, 3103782272U, 3112173184U, 3120562048U,
|
||||
3128944768U, 3137339264U, 3145725056U, 3154109312U, 3162505088U,
|
||||
3170893184U, 3179280256U, 3187669376U, 3196056704U, 3204445568U,
|
||||
3212836736U, 3221224064U, 3229612928U, 3238002304U, 3246391168U,
|
||||
3254778496U, 3263165824U, 3271556224U, 3279944576U, 3288332416U,
|
||||
3296719232U, 3305110912U, 3313500032U, 3321887104U, 3330273152U,
|
||||
3338658944U, 3347053184U, 3355440512U, 3363827072U, 3372220288U,
|
||||
3380608384U, 3388997504U, 3397384576U, 3405774208U, 3414163072U,
|
||||
3422551936U, 3430937984U, 3439328384U, 3447714176U, 3456104576U,
|
||||
3464493952U, 3472883584U, 3481268864U, 3489655168U, 3498048896U,
|
||||
3506434432U, 3514826368U, 3523213952U, 3531603584U, 3539987072U,
|
||||
3548380288U, 3556763264U, 3565157248U, 3573545344U, 3581934464U,
|
||||
3590324096U, 3598712704U, 3607098752U, 3615488384U, 3623877248U,
|
||||
3632265856U, 3640646528U, 3649043584U, 3657430144U, 3665821568U,
|
||||
3674207872U, 3682597504U, 3690984832U, 3699367808U, 3707764352U,
|
||||
3716152448U, 3724541056U, 3732925568U, 3741318016U, 3749706368U,
|
||||
3758091136U, 3766481536U, 3774872704U, 3783260032U, 3791650432U,
|
||||
3800036224U, 3808427648U, 3816815488U, 3825204608U, 3833592704U,
|
||||
3841981568U, 3850370432U, 3858755968U, 3867147904U, 3875536256U,
|
||||
3883920512U, 3892313728U, 3900702592U, 3909087872U, 3917478784U,
|
||||
3925868416U, 3934256512U, 3942645376U, 3951032192U, 3959422336U,
|
||||
3967809152U, 3976200064U, 3984588416U, 3992974976U, 4001363584U,
|
||||
4009751168U, 4018141312U, 4026530432U, 4034911616U, 4043308928U,
|
||||
4051695488U, 4060084352U, 4068472448U, 4076862848U, 4085249408U,
|
||||
4093640576U, 4102028416U, 4110413696U, 4118805632U, 4127194496U,
|
||||
4135583104U, 4143971968U, 4152360832U, 4160746112U, 4169135744U,
|
||||
4177525888U, 4185912704U, 4194303616U, 4202691968U, 4211076736U,
|
||||
4219463552U, 4227855488U, 4236246656U, 4244633728U, 4253022848U,
|
||||
4261412224U, 4269799808U, 4278184832U, 4286578048U, 4294962304U,
|
||||
4303349632U, 4311743104U, 4320130432U, 4328521088U, 4336909184U,
|
||||
4345295488U, 4353687424U, 4362073472U, 4370458496U, 4378852736U,
|
||||
4387238528U, 4395630208U, 4404019072U, 4412407424U, 4420790656U,
|
||||
4429182848U, 4437571456U, 4445962112U, 4454344064U, 4462738048U,
|
||||
4471119232U, 4479516544U, 4487904128U, 4496289664U, 4504682368U,
|
||||
4513068416U, 4521459584U, 4529846144U, 4538232704U, 4546619776U,
|
||||
4555010176U, 4563402112U, 4571790208U, 4580174464U, 4588567936U,
|
||||
4596957056U, 4605344896U, 4613734016U, 4622119808U, 4630511488U,
|
||||
4638898816U, 4647287936U, 4655675264U, 4664065664U, 4672451968U,
|
||||
4680842624U, 4689231488U, 4697620352U, 4706007424U, 4714397056U,
|
||||
4722786176U, 4731173248U, 4739562368U, 4747951744U, 4756340608U,
|
||||
4764727936U, 4773114496U, 4781504384U, 4789894784U, 4798283648U,
|
||||
4806667648U, 4815059584U, 4823449472U, 4831835776U, 4840226176U,
|
||||
4848612224U, 4857003392U, 4865391488U, 4873780096U, 4882169728U,
|
||||
4890557312U, 4898946944U, 4907333248U, 4915722368U, 4924110976U,
|
||||
4932499328U, 4940889728U, 4949276032U, 4957666432U, 4966054784U,
|
||||
4974438016U, 4982831488U, 4991221376U, 4999607168U, 5007998848U,
|
||||
5016386432U, 5024763776U, 5033164672U, 5041544576U, 5049941888U,
|
||||
5058329728U, 5066717056U, 5075107456U, 5083494272U, 5091883904U,
|
||||
5100273536U, 5108662144U, 5117048192U, 5125436032U, 5133827456U,
|
||||
5142215296U, 5150605184U, 5158993024U, 5167382144U, 5175769472U,
|
||||
5184157568U, 5192543872U, 5200936064U, 5209324928U, 5217711232U,
|
||||
5226102656U, 5234490496U, 5242877312U, 5251263872U, 5259654016U,
|
||||
5268040832U, 5276434304U, 5284819328U, 5293209728U, 5301598592U,
|
||||
5309986688U, 5318374784U, 5326764416U, 5335151488U, 5343542144U,
|
||||
5351929472U, 5360319872U, 5368706944U, 5377096576U, 5385484928U,
|
||||
5393871232U, 5402263424U, 5410650496U, 5419040384U, 5427426944U,
|
||||
5435816576U, 5444205952U, 5452594816U, 5460981376U, 5469367936U,
|
||||
5477760896U, 5486148736U, 5494536832U, 5502925952U, 5511315328U,
|
||||
5519703424U, 5528089984U, 5536481152U, 5544869504U, 5553256064U,
|
||||
5561645696U, 5570032768U, 5578423936U, 5586811264U, 5595193216U,
|
||||
5603585408U, 5611972736U, 5620366208U, 5628750464U, 5637143936U,
|
||||
5645528192U, 5653921408U, 5662310272U, 5670694784U, 5679082624U,
|
||||
5687474048U, 5695864448U, 5704251008U, 5712641408U, 5721030272U,
|
||||
5729416832U, 5737806208U, 5746194304U, 5754583936U, 5762969984U,
|
||||
5771358592U, 5779748224U, 5788137856U, 5796527488U, 5804911232U,
|
||||
5813300608U, 5821692544U, 5830082176U, 5838468992U, 5846855552U,
|
||||
5855247488U, 5863636096U, 5872024448U, 5880411008U, 5888799872U,
|
||||
5897186432U, 5905576832U, 5913966976U, 5922352768U, 5930744704U,
|
||||
5939132288U, 5947522432U, 5955911296U, 5964299392U, 5972688256U,
|
||||
5981074304U, 5989465472U, 5997851008U, 6006241408U, 6014627968U,
|
||||
6023015552U, 6031408256U, 6039796096U, 6048185216U, 6056574848U,
|
||||
6064963456U, 6073351808U, 6081736064U, 6090128768U, 6098517632U,
|
||||
6106906496U, 6115289216U, 6123680896U, 6132070016U, 6140459648U,
|
||||
6148849024U, 6157237376U, 6165624704U, 6174009728U, 6182403712U,
|
||||
6190792064U, 6199176064U, 6207569792U, 6215952256U, 6224345216U,
|
||||
6232732544U, 6241124224U, 6249510272U, 6257899136U, 6266287744U,
|
||||
6274676864U, 6283065728U, 6291454336U, 6299843456U, 6308232064U,
|
||||
6316620928U, 6325006208U, 6333395584U, 6341784704U, 6350174848U,
|
||||
6358562176U, 6366951296U, 6375337856U, 6383729536U, 6392119168U,
|
||||
6400504192U, 6408895616U, 6417283456U, 6425673344U, 6434059136U,
|
||||
6442444672U, 6450837376U, 6459223424U, 6467613056U, 6476004224U,
|
||||
6484393088U, 6492781952U, 6501170048U, 6509555072U, 6517947008U,
|
||||
6526336384U, 6534725504U, 6543112832U, 6551500672U, 6559888768U,
|
||||
6568278656U, 6576662912U, 6585055616U, 6593443456U, 6601834112U,
|
||||
6610219648U, 6618610304U, 6626999168U, 6635385472U, 6643777408U,
|
||||
6652164224U, 6660552832U, 6668941952U, 6677330048U, 6685719424U,
|
||||
6694107776U, 6702493568U, 6710882176U, 6719274112U, 6727662976U,
|
||||
6736052096U, 6744437632U, 6752825984U, 6761213824U, 6769604224U,
|
||||
6777993856U, 6786383488U, 6794770816U, 6803158144U, 6811549312U,
|
||||
6819937664U, 6828326528U, 6836706176U, 6845101696U, 6853491328U,
|
||||
6861880448U, 6870269312U, 6878655104U, 6887046272U, 6895433344U,
|
||||
6903822208U, 6912212864U, 6920596864U, 6928988288U, 6937377152U,
|
||||
6945764992U, 6954149248U, 6962544256U, 6970928768U, 6979317376U,
|
||||
6987709312U, 6996093824U, 7004487296U, 7012875392U, 7021258624U,
|
||||
7029652352U, 7038038912U, 7046427776U, 7054818944U, 7063207808U,
|
||||
7071595136U, 7079980928U, 7088372608U, 7096759424U, 7105149824U,
|
||||
7113536896U, 7121928064U, 7130315392U, 7138699648U, 7147092352U,
|
||||
7155479168U, 7163865728U, 7172249984U, 7180648064U, 7189036672U,
|
||||
7197424768U, 7205810816U, 7214196608U, 7222589824U, 7230975104U,
|
||||
7239367552U, 7247755904U, 7256145536U, 7264533376U, 7272921472U,
|
||||
7281308032U, 7289694848U, 7298088832U, 7306471808U, 7314864512U,
|
||||
7323253888U, 7331643008U, 7340029568U, 7348419712U, 7356808832U,
|
||||
7365196672U, 7373585792U, 7381973888U, 7390362752U, 7398750592U,
|
||||
7407138944U, 7415528576U, 7423915648U, 7432302208U, 7440690304U,
|
||||
7449080192U, 7457472128U, 7465860992U, 7474249088U, 7482635648U,
|
||||
7491023744U, 7499412608U, 7507803008U, 7516192384U, 7524579968U,
|
||||
7532967296U, 7541358464U, 7549745792U, 7558134656U, 7566524032U,
|
||||
7574912896U, 7583300992U, 7591690112U, 7600075136U, 7608466816U,
|
||||
7616854912U, 7625244544U, 7633629824U, 7642020992U, 7650410368U,
|
||||
7658794112U, 7667187328U, 7675574912U, 7683961984U, 7692349568U,
|
||||
7700739712U, 7709130368U, 7717519232U, 7725905536U, 7734295424U,
|
||||
7742683264U, 7751069056U, 7759457408U, 7767849088U, 7776238208U,
|
||||
7784626816U, 7793014912U, 7801405312U, 7809792128U, 7818179968U,
|
||||
7826571136U, 7834957184U, 7843347328U, 7851732352U, 7860124544U,
|
||||
7868512384U, 7876902016U, 7885287808U, 7893679744U, 7902067072U,
|
||||
7910455936U, 7918844288U, 7927230848U, 7935622784U, 7944009344U,
|
||||
7952400256U, 7960786048U, 7969176704U, 7977565312U, 7985953408U,
|
||||
7994339968U, 8002730368U, 8011119488U, 8019508096U, 8027896192U,
|
||||
8036285056U, 8044674688U, 8053062272U, 8061448832U, 8069838464U,
|
||||
8078227328U, 8086616704U, 8095006592U, 8103393664U, 8111783552U,
|
||||
8120171392U, 8128560256U, 8136949376U, 8145336704U, 8153726848U,
|
||||
8162114944U, 8170503296U, 8178891904U, 8187280768U, 8195669632U,
|
||||
8204058496U, 8212444544U, 8220834176U, 8229222272U, 8237612672U,
|
||||
8246000768U, 8254389376U, 8262775168U, 8271167104U, 8279553664U,
|
||||
8287944064U, 8296333184U, 8304715136U, 8313108352U, 8321497984U,
|
||||
8329885568U, 8338274432U, 8346663296U, 8355052928U, 8363441536U,
|
||||
8371828352U, 8380217984U, 8388606592U, 8396996224U, 8405384576U,
|
||||
8413772672U, 8422161536U, 8430549376U, 8438939008U, 8447326592U,
|
||||
8455715456U, 8464104832U, 8472492928U, 8480882048U, 8489270656U,
|
||||
8497659776U, 8506045312U, 8514434944U, 8522823808U, 8531208832U,
|
||||
8539602304U, 8547990656U, 8556378752U, 8564768384U, 8573154176U,
|
||||
8581542784U, 8589933952U, 8598322816U, 8606705024U, 8615099264U,
|
||||
8623487872U, 8631876992U, 8640264064U, 8648653952U, 8657040256U,
|
||||
8665430656U, 8673820544U, 8682209152U, 8690592128U, 8698977152U,
|
||||
8707374464U, 8715763328U, 8724151424U, 8732540032U, 8740928384U,
|
||||
8749315712U, 8757704576U, 8766089344U, 8774480768U, 8782871936U,
|
||||
8791260032U, 8799645824U, 8808034432U, 8816426368U, 8824812928U,
|
||||
8833199488U, 8841591424U, 8849976448U, 8858366336U, 8866757248U,
|
||||
8875147136U, 8883532928U, 8891923328U, 8900306816U, 8908700288U,
|
||||
8917088384U, 8925478784U, 8933867392U, 8942250368U, 8950644608U,
|
||||
8959032704U, 8967420544U, 8975809664U, 8984197504U, 8992584064U,
|
||||
9000976256U, 9009362048U, 9017752448U, 9026141312U, 9034530688U,
|
||||
9042917504U, 9051307904U, 9059694208U, 9068084864U, 9076471424U,
|
||||
9084861824U, 9093250688U, 9101638528U, 9110027648U, 9118416512U,
|
||||
9126803584U, 9135188096U, 9143581312U, 9151969664U, 9160356224U,
|
||||
9168747136U, 9177134464U, 9185525632U, 9193910144U, 9202302848U,
|
||||
9210690688U, 9219079552U, 9227465344U, 9235854464U, 9244244864U,
|
||||
9252633472U, 9261021824U, 9269411456U, 9277799296U, 9286188928U,
|
||||
9294574208U, 9302965888U, 9311351936U, 9319740032U, 9328131968U,
|
||||
9336516736U, 9344907392U, 9353296768U, 9361685888U, 9370074752U,
|
||||
9378463616U, 9386849408U, 9395239808U, 9403629184U, 9412016512U,
|
||||
9420405376U, 9428795008U, 9437181568U, 9445570688U, 9453960832U,
|
||||
9462346624U, 9470738048U, 9479121536U, 9487515008U, 9495903616U,
|
||||
9504289664U, 9512678528U, 9521067904U, 9529456256U, 9537843584U,
|
||||
9546233728U, 9554621312U, 9563011456U, 9571398784U, 9579788672U,
|
||||
9588178304U, 9596567168U, 9604954496U, 9613343104U, 9621732992U,
|
||||
9630121856U, 9638508416U, 9646898816U, 9655283584U, 9663675776U,
|
||||
9672061312U, 9680449664U, 9688840064U, 9697230464U, 9705617536U,
|
||||
9714003584U, 9722393984U, 9730772608U, 9739172224U, 9747561088U,
|
||||
9755945344U, 9764338816U, 9772726144U, 9781116544U, 9789503872U,
|
||||
9797892992U, 9806282624U, 9814670464U, 9823056512U, 9831439232U,
|
||||
9839833984U, 9848224384U, 9856613504U, 9865000576U, 9873391232U,
|
||||
9881772416U, 9890162816U, 9898556288U, 9906940544U, 9915333248U,
|
||||
9923721088U, 9932108672U, 9940496512U, 9948888448U, 9957276544U,
|
||||
9965666176U, 9974048384U, 9982441088U, 9990830464U, 9999219584U,
|
||||
10007602816U, 10015996544U, 10024385152U, 10032774016U, 10041163648U,
|
||||
10049548928U, 10057940096U, 10066329472U, 10074717824U, 10083105152U,
|
||||
10091495296U, 10099878784U, 10108272256U, 10116660608U, 10125049216U,
|
||||
10133437312U, 10141825664U, 10150213504U, 10158601088U, 10166991232U,
|
||||
10175378816U, 10183766144U, 10192157312U, 10200545408U, 10208935552U,
|
||||
10217322112U, 10225712768U, 10234099328U, 10242489472U, 10250876032U,
|
||||
10259264896U, 10267656064U, 10276042624U, 10284429184U, 10292820352U,
|
||||
10301209472U, 10309598848U, 10317987712U, 10326375296U, 10334763392U,
|
||||
10343153536U, 10351541632U, 10359930752U, 10368318592U, 10376707456U,
|
||||
10385096576U, 10393484672U, 10401867136U, 10410262144U, 10418647424U,
|
||||
10427039104U, 10435425664U, 10443810176U, 10452203648U, 10460589952U,
|
||||
10468982144U, 10477369472U, 10485759104U, 10494147712U, 10502533504U,
|
||||
10510923392U, 10519313536U, 10527702656U, 10536091264U, 10544478592U,
|
||||
10552867712U, 10561255808U, 10569642368U, 10578032768U, 10586423168U,
|
||||
10594805632U, 10603200128U, 10611588992U, 10619976064U, 10628361344U,
|
||||
10636754048U, 10645143424U, 10653531776U, 10661920384U, 10670307968U,
|
||||
10678696832U, 10687086464U, 10695475072U, 10703863168U, 10712246144U,
|
||||
10720639616U, 10729026688U, 10737414784U, 10745806208U, 10754190976U,
|
||||
10762581376U, 10770971264U, 10779356288U, 10787747456U, 10796135552U,
|
||||
10804525184U, 10812915584U, 10821301888U, 10829692288U, 10838078336U,
|
||||
10846469248U, 10854858368U, 10863247232U, 10871631488U, 10880023424U,
|
||||
10888412032U, 10896799616U, 10905188992U, 10913574016U, 10921964672U,
|
||||
10930352768U, 10938742912U, 10947132544U, 10955518592U, 10963909504U,
|
||||
10972298368U, 10980687488U, 10989074816U, 10997462912U, 11005851776U,
|
||||
11014241152U, 11022627712U, 11031017344U, 11039403904U, 11047793024U,
|
||||
11056184704U, 11064570752U, 11072960896U, 11081343872U, 11089737856U,
|
||||
11098128256U, 11106514816U, 11114904448U, 11123293568U, 11131680128U,
|
||||
11140065152U, 11148458368U, 11156845696U, 11165236864U, 11173624192U,
|
||||
11182013824U, 11190402688U, 11198790784U, 11207179136U, 11215568768U,
|
||||
11223957376U, 11232345728U, 11240734592U, 11249122688U, 11257511296U,
|
||||
11265899648U, 11274285952U, 11282675584U, 11291065472U, 11299452544U,
|
||||
11307842432U, 11316231296U, 11324616832U, 11333009024U, 11341395584U,
|
||||
11349782656U, 11358172288U, 11366560384U, 11374950016U, 11383339648U,
|
||||
11391721856U, 11400117376U, 11408504192U, 11416893568U, 11425283456U,
|
||||
11433671552U, 11442061184U, 11450444672U, 11458837888U, 11467226752U,
|
||||
11475611776U, 11484003968U, 11492392064U, 11500780672U, 11509169024U,
|
||||
11517550976U, 11525944448U, 11534335616U, 11542724224U, 11551111808U,
|
||||
11559500672U, 11567890304U, 11576277376U, 11584667008U, 11593056128U,
|
||||
11601443456U, 11609830016U, 11618221952U, 11626607488U, 11634995072U,
|
||||
11643387776U, 11651775104U, 11660161664U, 11668552576U, 11676940928U,
|
||||
11685330304U, 11693718656U, 11702106496U, 11710496128U, 11718882688U,
|
||||
11727273088U, 11735660416U, 11744050048U, 11752437376U, 11760824704U,
|
||||
11769216128U, 11777604736U, 11785991296U, 11794381952U, 11802770048U,
|
||||
11811157888U, 11819548544U, 11827932544U, 11836324736U, 11844713344U,
|
||||
11853100928U, 11861486464U, 11869879936U, 11878268032U, 11886656896U,
|
||||
11895044992U, 11903433088U, 11911822976U, 11920210816U, 11928600448U,
|
||||
11936987264U, 11945375872U, 11953761152U, 11962151296U, 11970543488U,
|
||||
11978928512U, 11987320448U, 11995708288U, 12004095104U, 12012486272U,
|
||||
12020875136U, 12029255552U, 12037652096U, 12046039168U, 12054429568U,
|
||||
12062813824U, 12071206528U, 12079594624U, 12087983744U, 12096371072U,
|
||||
12104759936U, 12113147264U, 12121534592U, 12129924992U, 12138314624U,
|
||||
12146703232U, 12155091584U, 12163481216U, 12171864704U, 12180255872U,
|
||||
12188643968U, 12197034112U, 12205424512U, 12213811328U, 12222199424U,
|
||||
12230590336U, 12238977664U, 12247365248U, 12255755392U, 12264143488U,
|
||||
12272531584U, 12280920448U, 12289309568U, 12297694592U, 12306086528U,
|
||||
12314475392U, 12322865024U, 12331253632U, 12339640448U, 12348029312U,
|
||||
12356418944U, 12364805248U, 12373196672U, 12381580928U, 12389969024U,
|
||||
12398357632U, 12406750592U, 12415138432U, 12423527552U, 12431916416U,
|
||||
12440304512U, 12448692352U, 12457081216U, 12465467776U, 12473859968U,
|
||||
12482245504U, 12490636672U, 12499025536U, 12507411584U, 12515801728U,
|
||||
12524190592U, 12532577152U, 12540966272U, 12549354368U, 12557743232U,
|
||||
12566129536U, 12574523264U, 12582911872U, 12591299456U, 12599688064U,
|
||||
12608074624U, 12616463488U, 12624845696U, 12633239936U, 12641631616U,
|
||||
12650019968U, 12658407296U, 12666795136U, 12675183232U, 12683574656U,
|
||||
12691960192U, 12700350592U, 12708740224U, 12717128576U, 12725515904U,
|
||||
12733906816U, 12742295168U, 12750680192U, 12759071872U, 12767460736U,
|
||||
12775848832U, 12784236928U, 12792626816U, 12801014656U, 12809404288U,
|
||||
12817789312U, 12826181504U, 12834568832U, 12842954624U, 12851345792U,
|
||||
12859732352U, 12868122496U, 12876512128U, 12884901248U, 12893289088U,
|
||||
12901672832U, 12910067584U, 12918455168U, 12926842496U, 12935232896U,
|
||||
12943620736U, 12952009856U, 12960396928U, 12968786816U, 12977176192U,
|
||||
12985563776U, 12993951104U, 13002341504U, 13010730368U, 13019115392U,
|
||||
13027506304U, 13035895168U, 13044272512U, 13052673152U, 13061062528U,
|
||||
13069446272U, 13077838976U, 13086227072U, 13094613632U, 13103000192U,
|
||||
13111393664U, 13119782528U, 13128157568U, 13136559232U, 13144945024U,
|
||||
13153329536U, 13161724288U, 13170111872U, 13178502784U, 13186884736U,
|
||||
13195279744U, 13203667072U, 13212057472U, 13220445824U, 13228832128U,
|
||||
13237221248U, 13245610624U, 13254000512U, 13262388352U, 13270777472U,
|
||||
13279166336U, 13287553408U, 13295943296U, 13304331904U, 13312719488U,
|
||||
13321108096U, 13329494656U, 13337885824U, 13346274944U, 13354663808U,
|
||||
13363051136U, 13371439232U, 13379825024U, 13388210816U, 13396605056U,
|
||||
13404995456U, 13413380224U, 13421771392U, 13430159744U, 13438546048U,
|
||||
13446937216U, 13455326848U, 13463708288U, 13472103808U, 13480492672U,
|
||||
13488875648U, 13497269888U, 13505657728U, 13514045312U, 13522435712U,
|
||||
13530824576U, 13539210112U, 13547599232U, 13555989376U, 13564379008U,
|
||||
13572766336U, 13581154432U, 13589544832U, 13597932928U, 13606320512U,
|
||||
13614710656U, 13623097472U, 13631477632U, 13639874944U, 13648264064U,
|
||||
13656652928U, 13665041792U, 13673430656U, 13681818496U, 13690207616U,
|
||||
13698595712U, 13706982272U, 13715373184U, 13723762048U, 13732150144U,
|
||||
13740536704U, 13748926592U, 13757316224U, 13765700992U, 13774090112U,
|
||||
13782477952U, 13790869376U, 13799259008U, 13807647872U, 13816036736U,
|
||||
13824425344U, 13832814208U, 13841202304U, 13849591424U, 13857978752U,
|
||||
13866368896U, 13874754688U, 13883145344U, 13891533184U, 13899919232U,
|
||||
13908311168U, 13916692096U, 13925085056U, 13933473152U, 13941866368U,
|
||||
13950253696U, 13958643584U, 13967032192U, 13975417216U, 13983807616U,
|
||||
13992197504U, 14000582272U, 14008973696U, 14017363072U, 14025752192U,
|
||||
14034137984U, 14042528384U, 14050918016U, 14059301504U, 14067691648U,
|
||||
14076083584U, 14084470144U, 14092852352U, 14101249664U, 14109635968U,
|
||||
14118024832U, 14126407552U, 14134804352U, 14143188608U, 14151577984U,
|
||||
14159968384U, 14168357248U, 14176741504U, 14185127296U, 14193521024U,
|
||||
14201911424U, 14210301824U, 14218685056U, 14227067264U, 14235467392U,
|
||||
14243855488U, 14252243072U, 14260630144U, 14269021568U, 14277409408U,
|
||||
14285799296U, 14294187904U, 14302571392U, 14310961792U, 14319353728U,
|
||||
14327738752U, 14336130944U, 14344518784U, 14352906368U, 14361296512U,
|
||||
14369685376U, 14378071424U, 14386462592U, 14394848128U, 14403230848U,
|
||||
14411627392U, 14420013952U, 14428402304U, 14436793472U, 14445181568U,
|
||||
14453569664U, 14461959808U, 14470347904U, 14478737024U, 14487122816U,
|
||||
14495511424U, 14503901824U, 14512291712U, 14520677504U, 14529064832U,
|
||||
14537456768U, 14545845632U, 14554234496U, 14562618496U, 14571011456U,
|
||||
14579398784U, 14587789184U, 14596172672U, 14604564608U, 14612953984U,
|
||||
14621341312U, 14629724288U, 14638120832U, 14646503296U, 14654897536U,
|
||||
14663284864U, 14671675264U, 14680061056U, 14688447616U, 14696835968U,
|
||||
14705228416U, 14713616768U, 14722003328U, 14730392192U, 14738784128U,
|
||||
14747172736U, 14755561088U, 14763947648U, 14772336512U, 14780725376U,
|
||||
14789110144U, 14797499776U, 14805892736U, 14814276992U, 14822670208U,
|
||||
14831056256U, 14839444352U, 14847836032U, 14856222848U, 14864612992U,
|
||||
14872997504U, 14881388672U, 14889775744U, 14898165376U, 14906553472U,
|
||||
14914944896U, 14923329664U, 14931721856U, 14940109696U, 14948497024U,
|
||||
14956887424U, 14965276544U, 14973663616U, 14982053248U, 14990439808U,
|
||||
14998830976U, 15007216768U, 15015605888U, 15023995264U, 15032385152U,
|
||||
15040768384U, 15049154944U, 15057549184U, 15065939072U, 15074328448U,
|
||||
15082715008U, 15091104128U, 15099493504U, 15107879296U, 15116269184U,
|
||||
15124659584U, 15133042304U, 15141431936U, 15149824384U, 15158214272U,
|
||||
15166602368U, 15174991232U, 15183378304U, 15191760512U, 15200154496U,
|
||||
15208542592U, 15216931712U, 15225323392U, 15233708416U, 15242098048U,
|
||||
15250489216U, 15258875264U, 15267265408U, 15275654528U, 15284043136U,
|
||||
15292431488U, 15300819584U, 15309208192U, 15317596544U, 15325986176U,
|
||||
15334374784U, 15342763648U, 15351151744U, 15359540608U, 15367929728U,
|
||||
15376318336U, 15384706432U, 15393092992U, 15401481856U, 15409869952U,
|
||||
15418258816U, 15426649984U, 15435037568U, 15443425664U, 15451815296U,
|
||||
15460203392U, 15468589184U, 15476979328U, 15485369216U, 15493755776U,
|
||||
15502146944U, 15510534272U, 15518924416U, 15527311232U, 15535699072U,
|
||||
15544089472U, 15552478336U, 15560866688U, 15569254528U, 15577642624U,
|
||||
15586031488U, 15594419072U, 15602809472U, 15611199104U, 15619586432U,
|
||||
15627975296U, 15636364928U, 15644753792U, 15653141888U, 15661529216U,
|
||||
15669918848U, 15678305152U, 15686696576U, 15695083136U, 15703474048U,
|
||||
15711861632U, 15720251264U, 15728636288U, 15737027456U, 15745417088U,
|
||||
15753804928U, 15762194048U, 15770582656U, 15778971008U, 15787358336U,
|
||||
15795747712U, 15804132224U, 15812523392U, 15820909696U, 15829300096U,
|
||||
15837691264U, 15846071936U, 15854466944U, 15862855808U, 15871244672U,
|
||||
15879634816U, 15888020608U, 15896409728U, 15904799104U, 15913185152U,
|
||||
15921577088U, 15929966464U, 15938354816U, 15946743424U, 15955129472U,
|
||||
15963519872U, 15971907968U, 15980296064U, 15988684928U, 15997073024U,
|
||||
16005460864U, 16013851264U, 16022241152U, 16030629248U, 16039012736U,
|
||||
16047406976U, 16055794816U, 16064181376U, 16072571264U, 16080957824U,
|
||||
16089346688U, 16097737856U, 16106125184U, 16114514816U, 16122904192U,
|
||||
16131292544U, 16139678848U, 16148066944U, 16156453504U, 16164839552U,
|
||||
16173236096U, 16181623424U, 16190012032U, 16198401152U, 16206790528U,
|
||||
16215177344U, 16223567744U, 16231956352U, 16240344704U, 16248731008U,
|
||||
16257117824U, 16265504384U, 16273898624U, 16282281856U, 16290668672U,
|
||||
16299064192U, 16307449216U, 16315842176U, 16324230016U, 16332613504U,
|
||||
16341006464U, 16349394304U, 16357783168U, 16366172288U, 16374561664U,
|
||||
16382951296U, 16391337856U, 16399726208U, 16408116352U, 16416505472U,
|
||||
16424892032U, 16433282176U, 16441668224U, 16450058624U, 16458448768U,
|
||||
16466836864U, 16475224448U, 16483613056U, 16492001408U, 16500391808U,
|
||||
16508779648U, 16517166976U, 16525555328U, 16533944192U, 16542330752U,
|
||||
16550719616U, 16559110528U, 16567497088U, 16575888512U, 16584274816U,
|
||||
16592665472U, 16601051008U, 16609442944U, 16617832064U, 16626218624U,
|
||||
16634607488U, 16642996096U, 16651385728U, 16659773824U, 16668163712U,
|
||||
16676552576U, 16684938112U, 16693328768U, 16701718144U, 16710095488U,
|
||||
16718492288U, 16726883968U, 16735272832U, 16743661184U, 16752049792U,
|
||||
16760436608U, 16768827008U, 16777214336U, 16785599104U, 16793992832U,
|
||||
16802381696U, 16810768768U, 16819151744U, 16827542656U, 16835934848U,
|
||||
16844323712U, 16852711552U, 16861101952U, 16869489536U, 16877876864U,
|
||||
16886265728U, 16894653056U, 16903044736U, 16911431296U, 16919821696U,
|
||||
16928207488U, 16936592768U, 16944987776U, 16953375616U, 16961763968U,
|
||||
16970152832U, 16978540928U, 16986929536U, 16995319168U, 17003704448U,
|
||||
17012096896U, 17020481152U, 17028870784U, 17037262208U, 17045649536U,
|
||||
17054039936U, 17062426496U, 17070814336U, 17079205504U, 17087592064U,
|
||||
17095978112U, 17104369024U, 17112759424U, 17121147776U, 17129536384U,
|
||||
17137926016U, 17146314368U, 17154700928U, 17163089792U, 17171480192U,
|
||||
17179864192U, 17188256896U, 17196644992U, 17205033856U, 17213423488U,
|
||||
17221811072U, 17230198912U, 17238588032U, 17246976896U, 17255360384U,
|
||||
17263754624U, 17272143232U, 17280530048U, 17288918912U, 17297309312U,
|
||||
17305696384U, 17314085504U, 17322475136U, 17330863744U, 17339252096U,
|
||||
17347640192U, 17356026496U, 17364413824U, 17372796544U, 17381190016U,
|
||||
17389583488U, 17397972608U, 17406360704U, 17414748544U, 17423135872U,
|
||||
17431527296U, 17439915904U, 17448303232U, 17456691584U, 17465081728U,
|
||||
17473468288U, 17481857408U, 17490247552U, 17498635904U, 17507022464U,
|
||||
17515409024U, 17523801728U, 17532189824U, 17540577664U, 17548966016U,
|
||||
17557353344U, 17565741184U, 17574131584U, 17582519168U, 17590907008U,
|
||||
17599296128U, 17607687808U, 17616076672U, 17624455808U, 17632852352U,
|
||||
17641238656U, 17649630848U, 17658018944U, 17666403968U, 17674794112U,
|
||||
17683178368U, 17691573376U, 17699962496U, 17708350592U, 17716739968U,
|
||||
17725126528U, 17733517184U, 17741898112U, 17750293888U, 17758673024U,
|
||||
17767070336U, 17775458432U, 17783848832U, 17792236928U, 17800625536U,
|
||||
17809012352U, 17817402752U, 17825785984U, 17834178944U, 17842563968U,
|
||||
17850955648U, 17859344512U, 17867732864U, 17876119424U, 17884511872U,
|
||||
17892900224U, 17901287296U, 17909677696U, 17918058112U, 17926451072U,
|
||||
17934843776U, 17943230848U, 17951609216U, 17960008576U, 17968397696U,
|
||||
17976784256U, 17985175424U, 17993564032U, 18001952128U, 18010339712U,
|
||||
18018728576U, 18027116672U, 18035503232U, 18043894144U, 18052283264U,
|
||||
18060672128U, 18069056384U, 18077449856U, 18085837184U, 18094225792U,
|
||||
18102613376U, 18111004544U, 18119388544U, 18127781248U, 18136170368U,
|
||||
18144558976U, 18152947328U, 18161336192U, 18169724288U, 18178108544U,
|
||||
18186498944U, 18194886784U, 18203275648U, 18211666048U, 18220048768U,
|
||||
18228444544U, 18236833408U, 18245220736U
|
||||
};
|
||||
|
||||
|
||||
// Generated with the following Mathematica Code:
|
||||
|
||||
// GetCacheSizes[n_] := Module[{
|
||||
// DataSetSizeBytesInit = 2^30,
|
||||
// MixBytes = 128,
|
||||
// DataSetGrowth = 2^23,
|
||||
// HashBytes = 64,
|
||||
// CacheMultiplier = 1024,
|
||||
// j = 0},
|
||||
// Reap[
|
||||
// While[j < n,
|
||||
// Module[{i = Floor[(DataSetSizeBytesInit + DataSetGrowth * j) / (CacheMultiplier * HashBytes)]},
|
||||
// While[! PrimeQ[i], i--];
|
||||
// Sow[i*HashBytes]; j++]]]][[2]][[1]]
|
||||
|
||||
const uint64_t cache_sizes[2048] = {
|
||||
16776896U, 16907456U, 17039296U, 17170112U, 17301056U, 17432512U, 17563072U,
|
||||
17693888U, 17824192U, 17955904U, 18087488U, 18218176U, 18349504U, 18481088U,
|
||||
18611392U, 18742336U, 18874304U, 19004224U, 19135936U, 19267264U, 19398208U,
|
||||
19529408U, 19660096U, 19791424U, 19922752U, 20053952U, 20184896U, 20315968U,
|
||||
20446912U, 20576576U, 20709184U, 20840384U, 20971072U, 21102272U, 21233216U,
|
||||
21364544U, 21494848U, 21626816U, 21757376U, 21887552U, 22019392U, 22151104U,
|
||||
22281536U, 22412224U, 22543936U, 22675264U, 22806464U, 22935872U, 23068096U,
|
||||
23198272U, 23330752U, 23459008U, 23592512U, 23723968U, 23854912U, 23986112U,
|
||||
24116672U, 24247616U, 24378688U, 24509504U, 24640832U, 24772544U, 24903488U,
|
||||
25034432U, 25165376U, 25296704U, 25427392U, 25558592U, 25690048U, 25820096U,
|
||||
25951936U, 26081728U, 26214208U, 26345024U, 26476096U, 26606656U, 26737472U,
|
||||
26869184U, 26998208U, 27131584U, 27262528U, 27393728U, 27523904U, 27655744U,
|
||||
27786688U, 27917888U, 28049344U, 28179904U, 28311488U, 28441792U, 28573504U,
|
||||
28700864U, 28835648U, 28966208U, 29096768U, 29228608U, 29359808U, 29490752U,
|
||||
29621824U, 29752256U, 29882816U, 30014912U, 30144448U, 30273728U, 30406976U,
|
||||
30538432U, 30670784U, 30799936U, 30932672U, 31063744U, 31195072U, 31325248U,
|
||||
31456192U, 31588288U, 31719232U, 31850432U, 31981504U, 32110784U, 32243392U,
|
||||
32372672U, 32505664U, 32636608U, 32767808U, 32897344U, 33029824U, 33160768U,
|
||||
33289664U, 33423296U, 33554368U, 33683648U, 33816512U, 33947456U, 34076992U,
|
||||
34208704U, 34340032U, 34471744U, 34600256U, 34734016U, 34864576U, 34993984U,
|
||||
35127104U, 35258176U, 35386688U, 35518528U, 35650624U, 35782336U, 35910976U,
|
||||
36044608U, 36175808U, 36305728U, 36436672U, 36568384U, 36699968U, 36830656U,
|
||||
36961984U, 37093312U, 37223488U, 37355072U, 37486528U, 37617472U, 37747904U,
|
||||
37879232U, 38009792U, 38141888U, 38272448U, 38403392U, 38535104U, 38660672U,
|
||||
38795584U, 38925632U, 39059264U, 39190336U, 39320768U, 39452096U, 39581632U,
|
||||
39713984U, 39844928U, 39974848U, 40107968U, 40238144U, 40367168U, 40500032U,
|
||||
40631744U, 40762816U, 40894144U, 41023552U, 41155904U, 41286208U, 41418304U,
|
||||
41547712U, 41680448U, 41811904U, 41942848U, 42073792U, 42204992U, 42334912U,
|
||||
42467008U, 42597824U, 42729152U, 42860096U, 42991552U, 43122368U, 43253696U,
|
||||
43382848U, 43515712U, 43646912U, 43777088U, 43907648U, 44039104U, 44170432U,
|
||||
44302144U, 44433344U, 44564288U, 44694976U, 44825152U, 44956864U, 45088448U,
|
||||
45219008U, 45350464U, 45481024U, 45612608U, 45744064U, 45874496U, 46006208U,
|
||||
46136768U, 46267712U, 46399424U, 46529344U, 46660672U, 46791488U, 46923328U,
|
||||
47053504U, 47185856U, 47316928U, 47447872U, 47579072U, 47710144U, 47839936U,
|
||||
47971648U, 48103232U, 48234176U, 48365248U, 48496192U, 48627136U, 48757312U,
|
||||
48889664U, 49020736U, 49149248U, 49283008U, 49413824U, 49545152U, 49675712U,
|
||||
49807168U, 49938368U, 50069056U, 50200256U, 50331584U, 50462656U, 50593472U,
|
||||
50724032U, 50853952U, 50986048U, 51117632U, 51248576U, 51379904U, 51510848U,
|
||||
51641792U, 51773248U, 51903296U, 52035136U, 52164032U, 52297664U, 52427968U,
|
||||
52557376U, 52690112U, 52821952U, 52952896U, 53081536U, 53213504U, 53344576U,
|
||||
53475776U, 53608384U, 53738816U, 53870528U, 54000832U, 54131776U, 54263744U,
|
||||
54394688U, 54525248U, 54655936U, 54787904U, 54918592U, 55049152U, 55181248U,
|
||||
55312064U, 55442752U, 55574336U, 55705024U, 55836224U, 55967168U, 56097856U,
|
||||
56228672U, 56358592U, 56490176U, 56621888U, 56753728U, 56884928U, 57015488U,
|
||||
57146816U, 57278272U, 57409216U, 57540416U, 57671104U, 57802432U, 57933632U,
|
||||
58064576U, 58195264U, 58326976U, 58457408U, 58588864U, 58720192U, 58849984U,
|
||||
58981696U, 59113024U, 59243456U, 59375552U, 59506624U, 59637568U, 59768512U,
|
||||
59897792U, 60030016U, 60161984U, 60293056U, 60423872U, 60554432U, 60683968U,
|
||||
60817216U, 60948032U, 61079488U, 61209664U, 61341376U, 61471936U, 61602752U,
|
||||
61733696U, 61865792U, 61996736U, 62127808U, 62259136U, 62389568U, 62520512U,
|
||||
62651584U, 62781632U, 62910784U, 63045056U, 63176128U, 63307072U, 63438656U,
|
||||
63569216U, 63700928U, 63831616U, 63960896U, 64093888U, 64225088U, 64355392U,
|
||||
64486976U, 64617664U, 64748608U, 64879424U, 65009216U, 65142464U, 65273792U,
|
||||
65402816U, 65535424U, 65666752U, 65797696U, 65927744U, 66060224U, 66191296U,
|
||||
66321344U, 66453056U, 66584384U, 66715328U, 66846656U, 66977728U, 67108672U,
|
||||
67239104U, 67370432U, 67501888U, 67631296U, 67763776U, 67895104U, 68026304U,
|
||||
68157248U, 68287936U, 68419264U, 68548288U, 68681408U, 68811968U, 68942912U,
|
||||
69074624U, 69205568U, 69337024U, 69467584U, 69599168U, 69729472U, 69861184U,
|
||||
69989824U, 70122944U, 70253888U, 70385344U, 70515904U, 70647232U, 70778816U,
|
||||
70907968U, 71040832U, 71171648U, 71303104U, 71432512U, 71564992U, 71695168U,
|
||||
71826368U, 71958464U, 72089536U, 72219712U, 72350144U, 72482624U, 72613568U,
|
||||
72744512U, 72875584U, 73006144U, 73138112U, 73268672U, 73400128U, 73530944U,
|
||||
73662272U, 73793344U, 73924544U, 74055104U, 74185792U, 74316992U, 74448832U,
|
||||
74579392U, 74710976U, 74841664U, 74972864U, 75102784U, 75233344U, 75364544U,
|
||||
75497024U, 75627584U, 75759296U, 75890624U, 76021696U, 76152256U, 76283072U,
|
||||
76414144U, 76545856U, 76676672U, 76806976U, 76937792U, 77070016U, 77200832U,
|
||||
77331392U, 77462464U, 77593664U, 77725376U, 77856448U, 77987776U, 78118336U,
|
||||
78249664U, 78380992U, 78511424U, 78642496U, 78773056U, 78905152U, 79033664U,
|
||||
79166656U, 79297472U, 79429568U, 79560512U, 79690816U, 79822784U, 79953472U,
|
||||
80084672U, 80214208U, 80346944U, 80477632U, 80608576U, 80740288U, 80870848U,
|
||||
81002048U, 81133504U, 81264448U, 81395648U, 81525952U, 81657536U, 81786304U,
|
||||
81919808U, 82050112U, 82181312U, 82311616U, 82443968U, 82573376U, 82705984U,
|
||||
82835776U, 82967744U, 83096768U, 83230528U, 83359552U, 83491264U, 83622464U,
|
||||
83753536U, 83886016U, 84015296U, 84147776U, 84277184U, 84409792U, 84540608U,
|
||||
84672064U, 84803008U, 84934336U, 85065152U, 85193792U, 85326784U, 85458496U,
|
||||
85589312U, 85721024U, 85851968U, 85982656U, 86112448U, 86244416U, 86370112U,
|
||||
86506688U, 86637632U, 86769344U, 86900672U, 87031744U, 87162304U, 87293632U,
|
||||
87424576U, 87555392U, 87687104U, 87816896U, 87947968U, 88079168U, 88211264U,
|
||||
88341824U, 88473152U, 88603712U, 88735424U, 88862912U, 88996672U, 89128384U,
|
||||
89259712U, 89390272U, 89521984U, 89652544U, 89783872U, 89914816U, 90045376U,
|
||||
90177088U, 90307904U, 90438848U, 90569152U, 90700096U, 90832832U, 90963776U,
|
||||
91093696U, 91223744U, 91356992U, 91486784U, 91618496U, 91749824U, 91880384U,
|
||||
92012224U, 92143552U, 92273344U, 92405696U, 92536768U, 92666432U, 92798912U,
|
||||
92926016U, 93060544U, 93192128U, 93322816U, 93453632U, 93583936U, 93715136U,
|
||||
93845056U, 93977792U, 94109504U, 94240448U, 94371776U, 94501184U, 94632896U,
|
||||
94764224U, 94895552U, 95023424U, 95158208U, 95287744U, 95420224U, 95550016U,
|
||||
95681216U, 95811904U, 95943872U, 96075328U, 96203584U, 96337856U, 96468544U,
|
||||
96599744U, 96731072U, 96860992U, 96992576U, 97124288U, 97254848U, 97385536U,
|
||||
97517248U, 97647808U, 97779392U, 97910464U, 98041408U, 98172608U, 98303168U,
|
||||
98434496U, 98565568U, 98696768U, 98827328U, 98958784U, 99089728U, 99220928U,
|
||||
99352384U, 99482816U, 99614272U, 99745472U, 99876416U, 100007104U,
|
||||
100138048U, 100267072U, 100401088U, 100529984U, 100662592U, 100791872U,
|
||||
100925248U, 101056064U, 101187392U, 101317952U, 101449408U, 101580608U,
|
||||
101711296U, 101841728U, 101973824U, 102104896U, 102235712U, 102366016U,
|
||||
102498112U, 102628672U, 102760384U, 102890432U, 103021888U, 103153472U,
|
||||
103284032U, 103415744U, 103545152U, 103677248U, 103808576U, 103939648U,
|
||||
104070976U, 104201792U, 104332736U, 104462528U, 104594752U, 104725952U,
|
||||
104854592U, 104988608U, 105118912U, 105247808U, 105381184U, 105511232U,
|
||||
105643072U, 105774784U, 105903296U, 106037056U, 106167872U, 106298944U,
|
||||
106429504U, 106561472U, 106691392U, 106822592U, 106954304U, 107085376U,
|
||||
107216576U, 107346368U, 107478464U, 107609792U, 107739712U, 107872192U,
|
||||
108003136U, 108131392U, 108265408U, 108396224U, 108527168U, 108657344U,
|
||||
108789568U, 108920384U, 109049792U, 109182272U, 109312576U, 109444928U,
|
||||
109572928U, 109706944U, 109837888U, 109969088U, 110099648U, 110230976U,
|
||||
110362432U, 110492992U, 110624704U, 110755264U, 110886208U, 111017408U,
|
||||
111148864U, 111279296U, 111410752U, 111541952U, 111673024U, 111803456U,
|
||||
111933632U, 112066496U, 112196416U, 112328512U, 112457792U, 112590784U,
|
||||
112715968U, 112852672U, 112983616U, 113114944U, 113244224U, 113376448U,
|
||||
113505472U, 113639104U, 113770304U, 113901376U, 114031552U, 114163264U,
|
||||
114294592U, 114425536U, 114556864U, 114687424U, 114818624U, 114948544U,
|
||||
115080512U, 115212224U, 115343296U, 115473472U, 115605184U, 115736128U,
|
||||
115867072U, 115997248U, 116128576U, 116260288U, 116391488U, 116522944U,
|
||||
116652992U, 116784704U, 116915648U, 117046208U, 117178304U, 117308608U,
|
||||
117440192U, 117569728U, 117701824U, 117833024U, 117964096U, 118094656U,
|
||||
118225984U, 118357312U, 118489024U, 118617536U, 118749632U, 118882112U,
|
||||
119012416U, 119144384U, 119275328U, 119406016U, 119537344U, 119668672U,
|
||||
119798464U, 119928896U, 120061376U, 120192832U, 120321728U, 120454336U,
|
||||
120584512U, 120716608U, 120848192U, 120979136U, 121109056U, 121241408U,
|
||||
121372352U, 121502912U, 121634752U, 121764416U, 121895744U, 122027072U,
|
||||
122157632U, 122289088U, 122421184U, 122550592U, 122682944U, 122813888U,
|
||||
122945344U, 123075776U, 123207488U, 123338048U, 123468736U, 123600704U,
|
||||
123731264U, 123861952U, 123993664U, 124124608U, 124256192U, 124386368U,
|
||||
124518208U, 124649024U, 124778048U, 124911296U, 125041088U, 125173696U,
|
||||
125303744U, 125432896U, 125566912U, 125696576U, 125829056U, 125958592U,
|
||||
126090304U, 126221248U, 126352832U, 126483776U, 126615232U, 126746432U,
|
||||
126876608U, 127008704U, 127139392U, 127270336U, 127401152U, 127532224U,
|
||||
127663552U, 127794752U, 127925696U, 128055232U, 128188096U, 128319424U,
|
||||
128449856U, 128581312U, 128712256U, 128843584U, 128973632U, 129103808U,
|
||||
129236288U, 129365696U, 129498944U, 129629888U, 129760832U, 129892288U,
|
||||
130023104U, 130154048U, 130283968U, 130416448U, 130547008U, 130678336U,
|
||||
130807616U, 130939456U, 131071552U, 131202112U, 131331776U, 131464384U,
|
||||
131594048U, 131727296U, 131858368U, 131987392U, 132120256U, 132250816U,
|
||||
132382528U, 132513728U, 132644672U, 132774976U, 132905792U, 133038016U,
|
||||
133168832U, 133299392U, 133429312U, 133562048U, 133692992U, 133823296U,
|
||||
133954624U, 134086336U, 134217152U, 134348608U, 134479808U, 134607296U,
|
||||
134741056U, 134872384U, 135002944U, 135134144U, 135265472U, 135396544U,
|
||||
135527872U, 135659072U, 135787712U, 135921472U, 136052416U, 136182848U,
|
||||
136313792U, 136444864U, 136576448U, 136707904U, 136837952U, 136970048U,
|
||||
137099584U, 137232064U, 137363392U, 137494208U, 137625536U, 137755712U,
|
||||
137887424U, 138018368U, 138149824U, 138280256U, 138411584U, 138539584U,
|
||||
138672832U, 138804928U, 138936128U, 139066688U, 139196864U, 139328704U,
|
||||
139460032U, 139590208U, 139721024U, 139852864U, 139984576U, 140115776U,
|
||||
140245696U, 140376512U, 140508352U, 140640064U, 140769856U, 140902336U,
|
||||
141032768U, 141162688U, 141294016U, 141426496U, 141556544U, 141687488U,
|
||||
141819584U, 141949888U, 142080448U, 142212544U, 142342336U, 142474432U,
|
||||
142606144U, 142736192U, 142868288U, 142997824U, 143129408U, 143258944U,
|
||||
143392448U, 143523136U, 143653696U, 143785024U, 143916992U, 144045632U,
|
||||
144177856U, 144309184U, 144440768U, 144570688U, 144701888U, 144832448U,
|
||||
144965056U, 145096384U, 145227584U, 145358656U, 145489856U, 145620928U,
|
||||
145751488U, 145883072U, 146011456U, 146144704U, 146275264U, 146407232U,
|
||||
146538176U, 146668736U, 146800448U, 146931392U, 147062336U, 147193664U,
|
||||
147324224U, 147455936U, 147586624U, 147717056U, 147848768U, 147979456U,
|
||||
148110784U, 148242368U, 148373312U, 148503232U, 148635584U, 148766144U,
|
||||
148897088U, 149028416U, 149159488U, 149290688U, 149420224U, 149551552U,
|
||||
149683136U, 149814976U, 149943616U, 150076352U, 150208064U, 150338624U,
|
||||
150470464U, 150600256U, 150732224U, 150862784U, 150993088U, 151125952U,
|
||||
151254976U, 151388096U, 151519168U, 151649728U, 151778752U, 151911104U,
|
||||
152042944U, 152174144U, 152304704U, 152435648U, 152567488U, 152698816U,
|
||||
152828992U, 152960576U, 153091648U, 153222976U, 153353792U, 153484096U,
|
||||
153616192U, 153747008U, 153878336U, 154008256U, 154139968U, 154270912U,
|
||||
154402624U, 154533824U, 154663616U, 154795712U, 154926272U, 155057984U,
|
||||
155188928U, 155319872U, 155450816U, 155580608U, 155712064U, 155843392U,
|
||||
155971136U, 156106688U, 156237376U, 156367424U, 156499264U, 156630976U,
|
||||
156761536U, 156892352U, 157024064U, 157155008U, 157284416U, 157415872U,
|
||||
157545536U, 157677248U, 157810496U, 157938112U, 158071744U, 158203328U,
|
||||
158334656U, 158464832U, 158596288U, 158727616U, 158858048U, 158988992U,
|
||||
159121216U, 159252416U, 159381568U, 159513152U, 159645632U, 159776192U,
|
||||
159906496U, 160038464U, 160169536U, 160300352U, 160430656U, 160563008U,
|
||||
160693952U, 160822208U, 160956352U, 161086784U, 161217344U, 161349184U,
|
||||
161480512U, 161611456U, 161742272U, 161873216U, 162002752U, 162135872U,
|
||||
162266432U, 162397888U, 162529216U, 162660032U, 162790976U, 162922048U,
|
||||
163052096U, 163184576U, 163314752U, 163446592U, 163577408U, 163707968U,
|
||||
163839296U, 163969984U, 164100928U, 164233024U, 164364224U, 164494912U,
|
||||
164625856U, 164756672U, 164887616U, 165019072U, 165150016U, 165280064U,
|
||||
165412672U, 165543104U, 165674944U, 165805888U, 165936832U, 166067648U,
|
||||
166198336U, 166330048U, 166461248U, 166591552U, 166722496U, 166854208U,
|
||||
166985408U, 167116736U, 167246656U, 167378368U, 167508416U, 167641024U,
|
||||
167771584U, 167903168U, 168034112U, 168164032U, 168295744U, 168427456U,
|
||||
168557632U, 168688448U, 168819136U, 168951616U, 169082176U, 169213504U,
|
||||
169344832U, 169475648U, 169605952U, 169738048U, 169866304U, 169999552U,
|
||||
170131264U, 170262464U, 170393536U, 170524352U, 170655424U, 170782016U,
|
||||
170917696U, 171048896U, 171179072U, 171310784U, 171439936U, 171573184U,
|
||||
171702976U, 171835072U, 171966272U, 172097216U, 172228288U, 172359232U,
|
||||
172489664U, 172621376U, 172747712U, 172883264U, 173014208U, 173144512U,
|
||||
173275072U, 173407424U, 173539136U, 173669696U, 173800768U, 173931712U,
|
||||
174063424U, 174193472U, 174325696U, 174455744U, 174586816U, 174718912U,
|
||||
174849728U, 174977728U, 175109696U, 175242688U, 175374272U, 175504832U,
|
||||
175636288U, 175765696U, 175898432U, 176028992U, 176159936U, 176291264U,
|
||||
176422592U, 176552512U, 176684864U, 176815424U, 176946496U, 177076544U,
|
||||
177209152U, 177340096U, 177470528U, 177600704U, 177731648U, 177864256U,
|
||||
177994816U, 178126528U, 178257472U, 178387648U, 178518464U, 178650176U,
|
||||
178781888U, 178912064U, 179044288U, 179174848U, 179305024U, 179436736U,
|
||||
179568448U, 179698496U, 179830208U, 179960512U, 180092608U, 180223808U,
|
||||
180354752U, 180485696U, 180617152U, 180748096U, 180877504U, 181009984U,
|
||||
181139264U, 181272512U, 181402688U, 181532608U, 181663168U, 181795136U,
|
||||
181926592U, 182057536U, 182190016U, 182320192U, 182451904U, 182582336U,
|
||||
182713792U, 182843072U, 182976064U, 183107264U, 183237056U, 183368384U,
|
||||
183494848U, 183631424U, 183762752U, 183893824U, 184024768U, 184154816U,
|
||||
184286656U, 184417984U, 184548928U, 184680128U, 184810816U, 184941248U,
|
||||
185072704U, 185203904U, 185335616U, 185465408U, 185596352U, 185727296U,
|
||||
185859904U, 185989696U, 186121664U, 186252992U, 186383552U, 186514112U,
|
||||
186645952U, 186777152U, 186907328U, 187037504U, 187170112U, 187301824U,
|
||||
187429184U, 187562048U, 187693504U, 187825472U, 187957184U, 188087104U,
|
||||
188218304U, 188349376U, 188481344U, 188609728U, 188743616U, 188874304U,
|
||||
189005248U, 189136448U, 189265088U, 189396544U, 189528128U, 189660992U,
|
||||
189791936U, 189923264U, 190054208U, 190182848U, 190315072U, 190447424U,
|
||||
190577984U, 190709312U, 190840768U, 190971328U, 191102656U, 191233472U,
|
||||
191364032U, 191495872U, 191626816U, 191758016U, 191888192U, 192020288U,
|
||||
192148928U, 192282176U, 192413504U, 192542528U, 192674752U, 192805952U,
|
||||
192937792U, 193068608U, 193198912U, 193330496U, 193462208U, 193592384U,
|
||||
193723456U, 193854272U, 193985984U, 194116672U, 194247232U, 194379712U,
|
||||
194508352U, 194641856U, 194772544U, 194900672U, 195035072U, 195166016U,
|
||||
195296704U, 195428032U, 195558592U, 195690304U, 195818176U, 195952576U,
|
||||
196083392U, 196214336U, 196345792U, 196476736U, 196607552U, 196739008U,
|
||||
196869952U, 197000768U, 197130688U, 197262784U, 197394368U, 197523904U,
|
||||
197656384U, 197787584U, 197916608U, 198049472U, 198180544U, 198310208U,
|
||||
198442432U, 198573632U, 198705088U, 198834368U, 198967232U, 199097792U,
|
||||
199228352U, 199360192U, 199491392U, 199621696U, 199751744U, 199883968U,
|
||||
200014016U, 200146624U, 200276672U, 200408128U, 200540096U, 200671168U,
|
||||
200801984U, 200933312U, 201062464U, 201194944U, 201326144U, 201457472U,
|
||||
201588544U, 201719744U, 201850816U, 201981632U, 202111552U, 202244032U,
|
||||
202374464U, 202505152U, 202636352U, 202767808U, 202898368U, 203030336U,
|
||||
203159872U, 203292608U, 203423296U, 203553472U, 203685824U, 203816896U,
|
||||
203947712U, 204078272U, 204208192U, 204341056U, 204472256U, 204603328U,
|
||||
204733888U, 204864448U, 204996544U, 205125568U, 205258304U, 205388864U,
|
||||
205517632U, 205650112U, 205782208U, 205913536U, 206044736U, 206176192U,
|
||||
206307008U, 206434496U, 206569024U, 206700224U, 206831168U, 206961856U,
|
||||
207093056U, 207223616U, 207355328U, 207486784U, 207616832U, 207749056U,
|
||||
207879104U, 208010048U, 208141888U, 208273216U, 208404032U, 208534336U,
|
||||
208666048U, 208796864U, 208927424U, 209059264U, 209189824U, 209321792U,
|
||||
209451584U, 209582656U, 209715136U, 209845568U, 209976896U, 210106432U,
|
||||
210239296U, 210370112U, 210501568U, 210630976U, 210763712U, 210894272U,
|
||||
211024832U, 211156672U, 211287616U, 211418176U, 211549376U, 211679296U,
|
||||
211812032U, 211942592U, 212074432U, 212204864U, 212334016U, 212467648U,
|
||||
212597824U, 212727616U, 212860352U, 212991424U, 213120832U, 213253952U,
|
||||
213385024U, 213515584U, 213645632U, 213777728U, 213909184U, 214040128U,
|
||||
214170688U, 214302656U, 214433728U, 214564544U, 214695232U, 214826048U,
|
||||
214956992U, 215089088U, 215219776U, 215350592U, 215482304U, 215613248U,
|
||||
215743552U, 215874752U, 216005312U, 216137024U, 216267328U, 216399296U,
|
||||
216530752U, 216661696U, 216790592U, 216923968U, 217054528U, 217183168U,
|
||||
217316672U, 217448128U, 217579072U, 217709504U, 217838912U, 217972672U,
|
||||
218102848U, 218233024U, 218364736U, 218496832U, 218627776U, 218759104U,
|
||||
218888896U, 219021248U, 219151936U, 219281728U, 219413056U, 219545024U,
|
||||
219675968U, 219807296U, 219938624U, 220069312U, 220200128U, 220331456U,
|
||||
220461632U, 220592704U, 220725184U, 220855744U, 220987072U, 221117888U,
|
||||
221249216U, 221378368U, 221510336U, 221642048U, 221772736U, 221904832U,
|
||||
222031808U, 222166976U, 222297536U, 222428992U, 222559936U, 222690368U,
|
||||
222820672U, 222953152U, 223083968U, 223213376U, 223345984U, 223476928U,
|
||||
223608512U, 223738688U, 223869376U, 224001472U, 224132672U, 224262848U,
|
||||
224394944U, 224524864U, 224657344U, 224788288U, 224919488U, 225050432U,
|
||||
225181504U, 225312704U, 225443776U, 225574592U, 225704768U, 225834176U,
|
||||
225966784U, 226097216U, 226229824U, 226360384U, 226491712U, 226623424U,
|
||||
226754368U, 226885312U, 227015104U, 227147456U, 227278528U, 227409472U,
|
||||
227539904U, 227669696U, 227802944U, 227932352U, 228065216U, 228196288U,
|
||||
228326464U, 228457792U, 228588736U, 228720064U, 228850112U, 228981056U,
|
||||
229113152U, 229243328U, 229375936U, 229505344U, 229636928U, 229769152U,
|
||||
229894976U, 230030272U, 230162368U, 230292416U, 230424512U, 230553152U,
|
||||
230684864U, 230816704U, 230948416U, 231079616U, 231210944U, 231342016U,
|
||||
231472448U, 231603776U, 231733952U, 231866176U, 231996736U, 232127296U,
|
||||
232259392U, 232388672U, 232521664U, 232652608U, 232782272U, 232914496U,
|
||||
233043904U, 233175616U, 233306816U, 233438528U, 233569984U, 233699776U,
|
||||
233830592U, 233962688U, 234092224U, 234221888U, 234353984U, 234485312U,
|
||||
234618304U, 234749888U, 234880832U, 235011776U, 235142464U, 235274048U,
|
||||
235403456U, 235535936U, 235667392U, 235797568U, 235928768U, 236057152U,
|
||||
236190272U, 236322752U, 236453312U, 236583616U, 236715712U, 236846528U,
|
||||
236976448U, 237108544U, 237239104U, 237371072U, 237501632U, 237630784U,
|
||||
237764416U, 237895232U, 238026688U, 238157632U, 238286912U, 238419392U,
|
||||
238548032U, 238681024U, 238812608U, 238941632U, 239075008U, 239206336U,
|
||||
239335232U, 239466944U, 239599168U, 239730496U, 239861312U, 239992384U,
|
||||
240122816U, 240254656U, 240385856U, 240516928U, 240647872U, 240779072U,
|
||||
240909632U, 241040704U, 241171904U, 241302848U, 241433408U, 241565248U,
|
||||
241696192U, 241825984U, 241958848U, 242088256U, 242220224U, 242352064U,
|
||||
242481856U, 242611648U, 242744896U, 242876224U, 243005632U, 243138496U,
|
||||
243268672U, 243400384U, 243531712U, 243662656U, 243793856U, 243924544U,
|
||||
244054592U, 244187072U, 244316608U, 244448704U, 244580032U, 244710976U,
|
||||
244841536U, 244972864U, 245104448U, 245233984U, 245365312U, 245497792U,
|
||||
245628736U, 245759936U, 245889856U, 246021056U, 246152512U, 246284224U,
|
||||
246415168U, 246545344U, 246675904U, 246808384U, 246939584U, 247070144U,
|
||||
247199552U, 247331648U, 247463872U, 247593536U, 247726016U, 247857088U,
|
||||
247987648U, 248116928U, 248249536U, 248380736U, 248512064U, 248643008U,
|
||||
248773312U, 248901056U, 249036608U, 249167552U, 249298624U, 249429184U,
|
||||
249560512U, 249692096U, 249822784U, 249954112U, 250085312U, 250215488U,
|
||||
250345792U, 250478528U, 250608704U, 250739264U, 250870976U, 251002816U,
|
||||
251133632U, 251263552U, 251395136U, 251523904U, 251657792U, 251789248U,
|
||||
251919424U, 252051392U, 252182464U, 252313408U, 252444224U, 252575552U,
|
||||
252706624U, 252836032U, 252968512U, 253099712U, 253227584U, 253361728U,
|
||||
253493056U, 253623488U, 253754432U, 253885504U, 254017216U, 254148032U,
|
||||
254279488U, 254410432U, 254541376U, 254672576U, 254803264U, 254933824U,
|
||||
255065792U, 255196736U, 255326528U, 255458752U, 255589952U, 255721408U,
|
||||
255851072U, 255983296U, 256114624U, 256244416U, 256374208U, 256507712U,
|
||||
256636096U, 256768832U, 256900544U, 257031616U, 257162176U, 257294272U,
|
||||
257424448U, 257555776U, 257686976U, 257818432U, 257949632U, 258079552U,
|
||||
258211136U, 258342464U, 258473408U, 258603712U, 258734656U, 258867008U,
|
||||
258996544U, 259127744U, 259260224U, 259391296U, 259522112U, 259651904U,
|
||||
259784384U, 259915328U, 260045888U, 260175424U, 260308544U, 260438336U,
|
||||
260570944U, 260700992U, 260832448U, 260963776U, 261092672U, 261226304U,
|
||||
261356864U, 261487936U, 261619648U, 261750592U, 261879872U, 262011968U,
|
||||
262143424U, 262274752U, 262404416U, 262537024U, 262667968U, 262799296U,
|
||||
262928704U, 263061184U, 263191744U, 263322944U, 263454656U, 263585216U,
|
||||
263716672U, 263847872U, 263978944U, 264108608U, 264241088U, 264371648U,
|
||||
264501184U, 264632768U, 264764096U, 264895936U, 265024576U, 265158464U,
|
||||
265287488U, 265418432U, 265550528U, 265681216U, 265813312U, 265943488U,
|
||||
266075968U, 266206144U, 266337728U, 266468032U, 266600384U, 266731072U,
|
||||
266862272U, 266993344U, 267124288U, 267255616U, 267386432U, 267516992U,
|
||||
267648704U, 267777728U, 267910592U, 268040512U, 268172096U, 268302784U,
|
||||
268435264U, 268566208U, 268696256U, 268828096U, 268959296U, 269090368U,
|
||||
269221312U, 269352256U, 269482688U, 269614784U, 269745856U, 269876416U,
|
||||
270007616U, 270139328U, 270270272U, 270401216U, 270531904U, 270663616U,
|
||||
270791744U, 270924736U, 271056832U, 271186112U, 271317184U, 271449536U,
|
||||
271580992U, 271711936U, 271843136U, 271973056U, 272105408U, 272236352U,
|
||||
272367296U, 272498368U, 272629568U, 272759488U, 272891456U, 273022784U,
|
||||
273153856U, 273284672U, 273415616U, 273547072U, 273677632U, 273808448U,
|
||||
273937088U, 274071488U, 274200896U, 274332992U, 274463296U, 274595392U,
|
||||
274726208U, 274857536U, 274988992U, 275118656U, 275250496U, 275382208U,
|
||||
275513024U, 275643968U, 275775296U, 275906368U, 276037184U, 276167872U,
|
||||
276297664U, 276429376U, 276560576U, 276692672U, 276822976U, 276955072U,
|
||||
277085632U, 277216832U, 277347008U, 277478848U, 277609664U, 277740992U,
|
||||
277868608U, 278002624U, 278134336U, 278265536U, 278395328U, 278526784U,
|
||||
278657728U, 278789824U, 278921152U, 279052096U, 279182912U, 279313088U,
|
||||
279443776U, 279576256U, 279706048U, 279838528U, 279969728U, 280099648U,
|
||||
280230976U, 280361408U, 280493632U, 280622528U, 280755392U, 280887104U,
|
||||
281018176U, 281147968U, 281278912U, 281411392U, 281542592U, 281673152U,
|
||||
281803712U, 281935552U, 282066496U, 282197312U, 282329024U, 282458816U,
|
||||
282590272U, 282720832U, 282853184U, 282983744U, 283115072U, 283246144U,
|
||||
283377344U, 283508416U, 283639744U, 283770304U, 283901504U, 284032576U,
|
||||
284163136U, 284294848U, 284426176U, 284556992U, 284687296U, 284819264U,
|
||||
284950208U, 285081536U
|
||||
};
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
|
@ -1,78 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include "compiler.h"
|
||||
|
||||
#if defined(__MINGW32__) || defined(_WIN32)
|
||||
# define LITTLE_ENDIAN 1234
|
||||
# define BYTE_ORDER LITTLE_ENDIAN
|
||||
#elif defined(__FreeBSD__) || defined(__DragonFly__) || defined(__NetBSD__)
|
||||
# include <sys/endian.h>
|
||||
#elif defined(__OpenBSD__) || defined(__SVR4)
|
||||
# include <sys/types.h>
|
||||
#elif defined(__APPLE__)
|
||||
# include <machine/endian.h>
|
||||
#elif defined( BSD ) && (BSD >= 199103)
|
||||
# include <machine/endian.h>
|
||||
#elif defined( __QNXNTO__ ) && defined( __LITTLEENDIAN__ )
|
||||
# define LITTLE_ENDIAN 1234
|
||||
# define BYTE_ORDER LITTLE_ENDIAN
|
||||
#elif defined( __QNXNTO__ ) && defined( __BIGENDIAN__ )
|
||||
# define BIG_ENDIAN 1234
|
||||
# define BYTE_ORDER BIG_ENDIAN
|
||||
#else
|
||||
# include <endian.h>
|
||||
#endif
|
||||
|
||||
#if defined(_WIN32)
|
||||
#include <stdlib.h>
|
||||
#define ethash_swap_u32(input_) _byteswap_ulong(input_)
|
||||
#define ethash_swap_u64(input_) _byteswap_uint64(input_)
|
||||
#elif defined(__APPLE__)
|
||||
#include <libkern/OSByteOrder.h>
|
||||
#define ethash_swap_u32(input_) OSSwapInt32(input_)
|
||||
#define ethash_swap_u64(input_) OSSwapInt64(input_)
|
||||
#elif defined(__FreeBSD__) || defined(__DragonFly__) || defined(__NetBSD__)
|
||||
#define ethash_swap_u32(input_) bswap32(input_)
|
||||
#define ethash_swap_u64(input_) bswap64(input_)
|
||||
#elif defined(__OpenBSD__)
|
||||
#include <endian.h>
|
||||
#define ethash_swap_u32(input_) swap32(input_)
|
||||
#define ethash_swap_u64(input_) swap64(input_)
|
||||
#else // posix
|
||||
#include <byteswap.h>
|
||||
#define ethash_swap_u32(input_) bswap_32(input_)
|
||||
#define ethash_swap_u64(input_) bswap_64(input_)
|
||||
#endif
|
||||
|
||||
|
||||
#if LITTLE_ENDIAN == BYTE_ORDER
|
||||
|
||||
#define fix_endian32(dst_ ,src_) dst_ = src_
|
||||
#define fix_endian32_same(val_)
|
||||
#define fix_endian64(dst_, src_) dst_ = src_
|
||||
#define fix_endian64_same(val_)
|
||||
#define fix_endian_arr32(arr_, size_)
|
||||
#define fix_endian_arr64(arr_, size_)
|
||||
|
||||
#elif BIG_ENDIAN == BYTE_ORDER
|
||||
|
||||
#define fix_endian32(dst_, src_) dst_ = ethash_swap_u32(src_)
|
||||
#define fix_endian32_same(val_) val_ = ethash_swap_u32(val_)
|
||||
#define fix_endian64(dst_, src_) dst_ = ethash_swap_u64(src_)
|
||||
#define fix_endian64_same(val_) val_ = ethash_swap_u64(val_)
|
||||
#define fix_endian_arr32(arr_, size_) \
|
||||
do { \
|
||||
for (unsigned i_ = 0; i_ < (size_); ++i_) { \
|
||||
arr_[i_] = ethash_swap_u32(arr_[i_]); \
|
||||
} \
|
||||
} while (0)
|
||||
#define fix_endian_arr64(arr_, size_) \
|
||||
do { \
|
||||
for (unsigned i_ = 0; i_ < (size_); ++i_) { \
|
||||
arr_[i_] = ethash_swap_u64(arr_[i_]); \
|
||||
} \
|
||||
} while (0)
|
||||
#else
|
||||
# error "endian not supported"
|
||||
#endif // BYTE_ORDER
|
|
@ -1,147 +0,0 @@
|
|||
/*
|
||||
This file is part of ethash.
|
||||
|
||||
ethash is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
ethash is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with ethash. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
/** @file ethash.h
|
||||
* @date 2015
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <string.h>
|
||||
#include <stddef.h>
|
||||
#include "compiler.h"
|
||||
|
||||
#define ETHASH_REVISION 23
|
||||
#define ETHASH_DATASET_BYTES_INIT 1073741824U // 2**30
|
||||
#define ETHASH_DATASET_BYTES_GROWTH 8388608U // 2**23
|
||||
#define ETHASH_CACHE_BYTES_INIT 1073741824U // 2**24
|
||||
#define ETHASH_CACHE_BYTES_GROWTH 131072U // 2**17
|
||||
#define ETHASH_EPOCH_LENGTH 30000U
|
||||
#define ETHASH_MIX_BYTES 128
|
||||
#define ETHASH_HASH_BYTES 64
|
||||
#define ETHASH_DATASET_PARENTS 256
|
||||
#define ETHASH_CACHE_ROUNDS 3
|
||||
#define ETHASH_ACCESSES 64
|
||||
#define ETHASH_DAG_MAGIC_NUM_SIZE 8
|
||||
#define ETHASH_DAG_MAGIC_NUM 0xFEE1DEADBADDCAFE
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/// Type of a seedhash/blockhash e.t.c.
|
||||
typedef struct ethash_h256 { uint8_t b[32]; } ethash_h256_t;
|
||||
|
||||
// convenience macro to statically initialize an h256_t
|
||||
// usage:
|
||||
// ethash_h256_t a = ethash_h256_static_init(1, 2, 3, ... )
|
||||
// have to provide all 32 values. If you don't provide all the rest
|
||||
// will simply be unitialized (not guranteed to be 0)
|
||||
#define ethash_h256_static_init(...) \
|
||||
{ {__VA_ARGS__} }
|
||||
|
||||
struct ethash_light;
|
||||
typedef struct ethash_light* ethash_light_t;
|
||||
struct ethash_full;
|
||||
typedef struct ethash_full* ethash_full_t;
|
||||
typedef int(*ethash_callback_t)(unsigned);
|
||||
|
||||
typedef struct ethash_return_value {
|
||||
ethash_h256_t result;
|
||||
ethash_h256_t mix_hash;
|
||||
bool success;
|
||||
} ethash_return_value_t;
|
||||
|
||||
/**
|
||||
* Allocate and initialize a new ethash_light handler
|
||||
*
|
||||
* @param block_number The block number for which to create the handler
|
||||
* @return Newly allocated ethash_light handler or NULL in case of
|
||||
* ERRNOMEM or invalid parameters used for @ref ethash_compute_cache_nodes()
|
||||
*/
|
||||
ethash_light_t ethash_light_new(uint64_t block_number);
|
||||
/**
|
||||
* Frees a previously allocated ethash_light handler
|
||||
* @param light The light handler to free
|
||||
*/
|
||||
void ethash_light_delete(ethash_light_t light);
|
||||
/**
|
||||
* Calculate the light client data
|
||||
*
|
||||
* @param light The light client handler
|
||||
* @param header_hash The header hash to pack into the mix
|
||||
* @param nonce The nonce to pack into the mix
|
||||
* @return an object of ethash_return_value_t holding the return values
|
||||
*/
|
||||
ethash_return_value_t ethash_light_compute(
|
||||
ethash_light_t light,
|
||||
ethash_h256_t const header_hash,
|
||||
uint64_t nonce
|
||||
);
|
||||
|
||||
/**
|
||||
* Allocate and initialize a new ethash_full handler
|
||||
*
|
||||
* @param light The light handler containing the cache.
|
||||
* @param callback A callback function with signature of @ref ethash_callback_t
|
||||
* It accepts an unsigned with which a progress of DAG calculation
|
||||
* can be displayed. If all goes well the callback should return 0.
|
||||
* If a non-zero value is returned then DAG generation will stop.
|
||||
* Be advised. A progress value of 100 means that DAG creation is
|
||||
* almost complete and that this function will soon return succesfully.
|
||||
* It does not mean that the function has already had a succesfull return.
|
||||
* @return Newly allocated ethash_full handler or NULL in case of
|
||||
* ERRNOMEM or invalid parameters used for @ref ethash_compute_full_data()
|
||||
*/
|
||||
ethash_full_t ethash_full_new(ethash_light_t light, ethash_callback_t callback);
|
||||
|
||||
/**
|
||||
* Frees a previously allocated ethash_full handler
|
||||
* @param full The light handler to free
|
||||
*/
|
||||
void ethash_full_delete(ethash_full_t full);
|
||||
/**
|
||||
* Calculate the full client data
|
||||
*
|
||||
* @param full The full client handler
|
||||
* @param header_hash The header hash to pack into the mix
|
||||
* @param nonce The nonce to pack into the mix
|
||||
* @return An object of ethash_return_value to hold the return value
|
||||
*/
|
||||
ethash_return_value_t ethash_full_compute(
|
||||
ethash_full_t full,
|
||||
ethash_h256_t const header_hash,
|
||||
uint64_t nonce
|
||||
);
|
||||
/**
|
||||
* Get a pointer to the full DAG data
|
||||
*/
|
||||
void const* ethash_full_dag(ethash_full_t full);
|
||||
/**
|
||||
* Get the size of the DAG data
|
||||
*/
|
||||
uint64_t ethash_full_dag_size(ethash_full_t full);
|
||||
|
||||
/**
|
||||
* Calculate the seedhash for a given block number
|
||||
*/
|
||||
ethash_h256_t ethash_get_seedhash(uint64_t block_number);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
|
@ -1,43 +0,0 @@
|
|||
/*
|
||||
This file is part of cpp-ethereum.
|
||||
|
||||
cpp-ethereum is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
cpp-ethereum is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
/** @file fnv.h
|
||||
* @author Matthew Wampler-Doty <negacthulhu@gmail.com>
|
||||
* @date 2015
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
#include <stdint.h>
|
||||
#include "compiler.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define FNV_PRIME 0x01000193
|
||||
|
||||
/* The FNV-1 spec multiplies the prime with the input one byte (octet) in turn.
|
||||
We instead multiply it with the full 32-bit input.
|
||||
This gives a different result compared to a canonical FNV-1 implementation.
|
||||
*/
|
||||
static inline uint32_t fnv_hash(uint32_t const x, uint32_t const y)
|
||||
{
|
||||
return x * FNV_PRIME ^ y;
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
|
@ -1,507 +0,0 @@
|
|||
/*
|
||||
This file is part of ethash.
|
||||
|
||||
ethash is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
ethash is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
/** @file internal.c
|
||||
* @author Tim Hughes <tim@twistedfury.com>
|
||||
* @author Matthew Wampler-Doty
|
||||
* @date 2015
|
||||
*/
|
||||
|
||||
#include <assert.h>
|
||||
#include <inttypes.h>
|
||||
#include <stddef.h>
|
||||
#include <errno.h>
|
||||
#include <math.h>
|
||||
#include "mmap.h"
|
||||
#include "ethash.h"
|
||||
#include "fnv.h"
|
||||
#include "endian.h"
|
||||
#include "internal.h"
|
||||
#include "data_sizes.h"
|
||||
#include "io.h"
|
||||
|
||||
#ifdef WITH_CRYPTOPP
|
||||
|
||||
#include "sha3_cryptopp.h"
|
||||
|
||||
#else
|
||||
#include "sha3.h"
|
||||
#endif // WITH_CRYPTOPP
|
||||
|
||||
uint64_t ethash_get_datasize(uint64_t const block_number)
|
||||
{
|
||||
assert(block_number / ETHASH_EPOCH_LENGTH < 2048);
|
||||
return dag_sizes[block_number / ETHASH_EPOCH_LENGTH];
|
||||
}
|
||||
|
||||
uint64_t ethash_get_cachesize(uint64_t const block_number)
|
||||
{
|
||||
assert(block_number / ETHASH_EPOCH_LENGTH < 2048);
|
||||
return cache_sizes[block_number / ETHASH_EPOCH_LENGTH];
|
||||
}
|
||||
|
||||
// Follows Sergio's "STRICT MEMORY HARD HASHING FUNCTIONS" (2014)
|
||||
// https://bitslog.files.wordpress.com/2013/12/memohash-v0-3.pdf
|
||||
// SeqMemoHash(s, R, N)
|
||||
bool static ethash_compute_cache_nodes(
|
||||
node* const nodes,
|
||||
uint64_t cache_size,
|
||||
ethash_h256_t const* seed
|
||||
)
|
||||
{
|
||||
if (cache_size % sizeof(node) != 0) {
|
||||
return false;
|
||||
}
|
||||
uint32_t const num_nodes = (uint32_t) (cache_size / sizeof(node));
|
||||
|
||||
SHA3_512(nodes[0].bytes, (uint8_t*)seed, 32);
|
||||
|
||||
for (uint32_t i = 1; i != num_nodes; ++i) {
|
||||
SHA3_512(nodes[i].bytes, nodes[i - 1].bytes, 64);
|
||||
}
|
||||
|
||||
for (uint32_t j = 0; j != ETHASH_CACHE_ROUNDS; j++) {
|
||||
for (uint32_t i = 0; i != num_nodes; i++) {
|
||||
uint32_t const idx = nodes[i].words[0] % num_nodes;
|
||||
node data;
|
||||
data = nodes[(num_nodes - 1 + i) % num_nodes];
|
||||
for (uint32_t w = 0; w != NODE_WORDS; ++w) {
|
||||
data.words[w] ^= nodes[idx].words[w];
|
||||
}
|
||||
SHA3_512(nodes[i].bytes, data.bytes, sizeof(data));
|
||||
}
|
||||
}
|
||||
|
||||
// now perform endian conversion
|
||||
fix_endian_arr32(nodes->words, num_nodes * NODE_WORDS);
|
||||
return true;
|
||||
}
|
||||
|
||||
void ethash_calculate_dag_item(
|
||||
node* const ret,
|
||||
uint32_t node_index,
|
||||
ethash_light_t const light
|
||||
)
|
||||
{
|
||||
uint32_t num_parent_nodes = (uint32_t) (light->cache_size / sizeof(node));
|
||||
node const* cache_nodes = (node const *) light->cache;
|
||||
node const* init = &cache_nodes[node_index % num_parent_nodes];
|
||||
memcpy(ret, init, sizeof(node));
|
||||
ret->words[0] ^= node_index;
|
||||
SHA3_512(ret->bytes, ret->bytes, sizeof(node));
|
||||
#if defined(_M_X64) && ENABLE_SSE
|
||||
__m128i const fnv_prime = _mm_set1_epi32(FNV_PRIME);
|
||||
__m128i xmm0 = ret->xmm[0];
|
||||
__m128i xmm1 = ret->xmm[1];
|
||||
__m128i xmm2 = ret->xmm[2];
|
||||
__m128i xmm3 = ret->xmm[3];
|
||||
#endif
|
||||
|
||||
for (uint32_t i = 0; i != ETHASH_DATASET_PARENTS; ++i) {
|
||||
uint32_t parent_index = fnv_hash(node_index ^ i, ret->words[i % NODE_WORDS]) % num_parent_nodes;
|
||||
node const *parent = &cache_nodes[parent_index];
|
||||
|
||||
#if defined(_M_X64) && ENABLE_SSE
|
||||
{
|
||||
xmm0 = _mm_mullo_epi32(xmm0, fnv_prime);
|
||||
xmm1 = _mm_mullo_epi32(xmm1, fnv_prime);
|
||||
xmm2 = _mm_mullo_epi32(xmm2, fnv_prime);
|
||||
xmm3 = _mm_mullo_epi32(xmm3, fnv_prime);
|
||||
xmm0 = _mm_xor_si128(xmm0, parent->xmm[0]);
|
||||
xmm1 = _mm_xor_si128(xmm1, parent->xmm[1]);
|
||||
xmm2 = _mm_xor_si128(xmm2, parent->xmm[2]);
|
||||
xmm3 = _mm_xor_si128(xmm3, parent->xmm[3]);
|
||||
|
||||
// have to write to ret as values are used to compute index
|
||||
ret->xmm[0] = xmm0;
|
||||
ret->xmm[1] = xmm1;
|
||||
ret->xmm[2] = xmm2;
|
||||
ret->xmm[3] = xmm3;
|
||||
}
|
||||
#else
|
||||
{
|
||||
for (unsigned w = 0; w != NODE_WORDS; ++w) {
|
||||
ret->words[w] = fnv_hash(ret->words[w], parent->words[w]);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
SHA3_512(ret->bytes, ret->bytes, sizeof(node));
|
||||
}
|
||||
|
||||
bool ethash_compute_full_data(
|
||||
void* mem,
|
||||
uint64_t full_size,
|
||||
ethash_light_t const light,
|
||||
ethash_callback_t callback
|
||||
)
|
||||
{
|
||||
if (full_size % (sizeof(uint32_t) * MIX_WORDS) != 0 ||
|
||||
(full_size % sizeof(node)) != 0) {
|
||||
return false;
|
||||
}
|
||||
uint32_t const max_n = (uint32_t)(full_size / sizeof(node));
|
||||
node* full_nodes = mem;
|
||||
double const progress_change = 1.0f / max_n;
|
||||
double progress = 0.0f;
|
||||
// now compute full nodes
|
||||
for (uint32_t n = 0; n != max_n; ++n) {
|
||||
if (callback &&
|
||||
n % (max_n / 100) == 0 &&
|
||||
callback((unsigned int)(ceil(progress * 100.0f))) != 0) {
|
||||
|
||||
return false;
|
||||
}
|
||||
progress += progress_change;
|
||||
ethash_calculate_dag_item(&(full_nodes[n]), n, light);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool ethash_hash(
|
||||
ethash_return_value_t* ret,
|
||||
node const* full_nodes,
|
||||
ethash_light_t const light,
|
||||
uint64_t full_size,
|
||||
ethash_h256_t const header_hash,
|
||||
uint64_t const nonce
|
||||
)
|
||||
{
|
||||
if (full_size % MIX_WORDS != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// pack hash and nonce together into first 40 bytes of s_mix
|
||||
assert(sizeof(node) * 8 == 512);
|
||||
node s_mix[MIX_NODES + 1];
|
||||
memcpy(s_mix[0].bytes, &header_hash, 32);
|
||||
fix_endian64(s_mix[0].double_words[4], nonce);
|
||||
|
||||
// compute sha3-512 hash and replicate across mix
|
||||
SHA3_512(s_mix->bytes, s_mix->bytes, 40);
|
||||
fix_endian_arr32(s_mix[0].words, 16);
|
||||
|
||||
node* const mix = s_mix + 1;
|
||||
for (uint32_t w = 0; w != MIX_WORDS; ++w) {
|
||||
mix->words[w] = s_mix[0].words[w % NODE_WORDS];
|
||||
}
|
||||
|
||||
unsigned const page_size = sizeof(uint32_t) * MIX_WORDS;
|
||||
unsigned const num_full_pages = (unsigned) (full_size / page_size);
|
||||
|
||||
for (unsigned i = 0; i != ETHASH_ACCESSES; ++i) {
|
||||
uint32_t const index = fnv_hash(s_mix->words[0] ^ i, mix->words[i % MIX_WORDS]) % num_full_pages;
|
||||
|
||||
for (unsigned n = 0; n != MIX_NODES; ++n) {
|
||||
node const* dag_node;
|
||||
if (full_nodes) {
|
||||
dag_node = &full_nodes[MIX_NODES * index + n];
|
||||
} else {
|
||||
node tmp_node;
|
||||
ethash_calculate_dag_item(&tmp_node, index * MIX_NODES + n, light);
|
||||
dag_node = &tmp_node;
|
||||
}
|
||||
|
||||
#if defined(_M_X64) && ENABLE_SSE
|
||||
{
|
||||
__m128i fnv_prime = _mm_set1_epi32(FNV_PRIME);
|
||||
__m128i xmm0 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[0]);
|
||||
__m128i xmm1 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[1]);
|
||||
__m128i xmm2 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[2]);
|
||||
__m128i xmm3 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[3]);
|
||||
mix[n].xmm[0] = _mm_xor_si128(xmm0, dag_node->xmm[0]);
|
||||
mix[n].xmm[1] = _mm_xor_si128(xmm1, dag_node->xmm[1]);
|
||||
mix[n].xmm[2] = _mm_xor_si128(xmm2, dag_node->xmm[2]);
|
||||
mix[n].xmm[3] = _mm_xor_si128(xmm3, dag_node->xmm[3]);
|
||||
}
|
||||
#else
|
||||
{
|
||||
for (unsigned w = 0; w != NODE_WORDS; ++w) {
|
||||
mix[n].words[w] = fnv_hash(mix[n].words[w], dag_node->words[w]);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// compress mix
|
||||
for (uint32_t w = 0; w != MIX_WORDS; w += 4) {
|
||||
uint32_t reduction = mix->words[w + 0];
|
||||
reduction = reduction * FNV_PRIME ^ mix->words[w + 1];
|
||||
reduction = reduction * FNV_PRIME ^ mix->words[w + 2];
|
||||
reduction = reduction * FNV_PRIME ^ mix->words[w + 3];
|
||||
mix->words[w / 4] = reduction;
|
||||
}
|
||||
|
||||
fix_endian_arr32(mix->words, MIX_WORDS / 4);
|
||||
memcpy(&ret->mix_hash, mix->bytes, 32);
|
||||
// final Keccak hash
|
||||
SHA3_256(&ret->result, s_mix->bytes, 64 + 32); // Keccak-256(s + compressed_mix)
|
||||
return true;
|
||||
}
|
||||
|
||||
void ethash_quick_hash(
|
||||
ethash_h256_t* return_hash,
|
||||
ethash_h256_t const* header_hash,
|
||||
uint64_t nonce,
|
||||
ethash_h256_t const* mix_hash
|
||||
)
|
||||
{
|
||||
uint8_t buf[64 + 32];
|
||||
memcpy(buf, header_hash, 32);
|
||||
fix_endian64_same(nonce);
|
||||
memcpy(&(buf[32]), &nonce, 8);
|
||||
SHA3_512(buf, buf, 40);
|
||||
memcpy(&(buf[64]), mix_hash, 32);
|
||||
SHA3_256(return_hash, buf, 64 + 32);
|
||||
}
|
||||
|
||||
ethash_h256_t ethash_get_seedhash(uint64_t block_number)
|
||||
{
|
||||
ethash_h256_t ret;
|
||||
ethash_h256_reset(&ret);
|
||||
uint64_t const epochs = block_number / ETHASH_EPOCH_LENGTH;
|
||||
for (uint32_t i = 0; i < epochs; ++i)
|
||||
SHA3_256(&ret, (uint8_t*)&ret, 32);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool ethash_quick_check_difficulty(
|
||||
ethash_h256_t const* header_hash,
|
||||
uint64_t const nonce,
|
||||
ethash_h256_t const* mix_hash,
|
||||
ethash_h256_t const* boundary
|
||||
)
|
||||
{
|
||||
|
||||
ethash_h256_t return_hash;
|
||||
ethash_quick_hash(&return_hash, header_hash, nonce, mix_hash);
|
||||
return ethash_check_difficulty(&return_hash, boundary);
|
||||
}
|
||||
|
||||
ethash_light_t ethash_light_new_internal(uint64_t cache_size, ethash_h256_t const* seed)
|
||||
{
|
||||
struct ethash_light *ret;
|
||||
ret = calloc(sizeof(*ret), 1);
|
||||
if (!ret) {
|
||||
return NULL;
|
||||
}
|
||||
ret->cache = malloc((size_t)cache_size);
|
||||
if (!ret->cache) {
|
||||
goto fail_free_light;
|
||||
}
|
||||
node* nodes = (node*)ret->cache;
|
||||
if (!ethash_compute_cache_nodes(nodes, cache_size, seed)) {
|
||||
goto fail_free_cache_mem;
|
||||
}
|
||||
ret->cache_size = cache_size;
|
||||
return ret;
|
||||
|
||||
fail_free_cache_mem:
|
||||
free(ret->cache);
|
||||
fail_free_light:
|
||||
free(ret);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ethash_light_t ethash_light_new(uint64_t block_number)
|
||||
{
|
||||
ethash_h256_t seedhash = ethash_get_seedhash(block_number);
|
||||
ethash_light_t ret;
|
||||
ret = ethash_light_new_internal(ethash_get_cachesize(block_number), &seedhash);
|
||||
ret->block_number = block_number;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void ethash_light_delete(ethash_light_t light)
|
||||
{
|
||||
if (light->cache) {
|
||||
free(light->cache);
|
||||
}
|
||||
free(light);
|
||||
}
|
||||
|
||||
ethash_return_value_t ethash_light_compute_internal(
|
||||
ethash_light_t light,
|
||||
uint64_t full_size,
|
||||
ethash_h256_t const header_hash,
|
||||
uint64_t nonce
|
||||
)
|
||||
{
|
||||
ethash_return_value_t ret;
|
||||
ret.success = true;
|
||||
if (!ethash_hash(&ret, NULL, light, full_size, header_hash, nonce)) {
|
||||
ret.success = false;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
ethash_return_value_t ethash_light_compute(
|
||||
ethash_light_t light,
|
||||
ethash_h256_t const header_hash,
|
||||
uint64_t nonce
|
||||
)
|
||||
{
|
||||
uint64_t full_size = ethash_get_datasize(light->block_number);
|
||||
return ethash_light_compute_internal(light, full_size, header_hash, nonce);
|
||||
}
|
||||
|
||||
static bool ethash_mmap(struct ethash_full* ret, FILE* f)
|
||||
{
|
||||
int fd;
|
||||
char* mmapped_data;
|
||||
errno = 0;
|
||||
ret->file = f;
|
||||
if ((fd = ethash_fileno(ret->file)) == -1) {
|
||||
return false;
|
||||
}
|
||||
mmapped_data= mmap(
|
||||
NULL,
|
||||
(size_t)ret->file_size + ETHASH_DAG_MAGIC_NUM_SIZE,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED,
|
||||
fd,
|
||||
0
|
||||
);
|
||||
if (mmapped_data == MAP_FAILED) {
|
||||
return false;
|
||||
}
|
||||
ret->data = (node*)(mmapped_data + ETHASH_DAG_MAGIC_NUM_SIZE);
|
||||
return true;
|
||||
}
|
||||
|
||||
ethash_full_t ethash_full_new_internal(
|
||||
char const* dirname,
|
||||
ethash_h256_t const seed_hash,
|
||||
uint64_t full_size,
|
||||
ethash_light_t const light,
|
||||
ethash_callback_t callback
|
||||
)
|
||||
{
|
||||
struct ethash_full* ret;
|
||||
FILE *f = NULL;
|
||||
ret = calloc(sizeof(*ret), 1);
|
||||
if (!ret) {
|
||||
return NULL;
|
||||
}
|
||||
ret->file_size = (size_t)full_size;
|
||||
switch (ethash_io_prepare(dirname, seed_hash, &f, (size_t)full_size, false)) {
|
||||
case ETHASH_IO_FAIL:
|
||||
// ethash_io_prepare will do all ETHASH_CRITICAL() logging in fail case
|
||||
goto fail_free_full;
|
||||
case ETHASH_IO_MEMO_MATCH:
|
||||
if (!ethash_mmap(ret, f)) {
|
||||
ETHASH_CRITICAL("mmap failure()");
|
||||
goto fail_close_file;
|
||||
}
|
||||
return ret;
|
||||
case ETHASH_IO_MEMO_SIZE_MISMATCH:
|
||||
// if a DAG of same filename but unexpected size is found, silently force new file creation
|
||||
if (ethash_io_prepare(dirname, seed_hash, &f, (size_t)full_size, true) != ETHASH_IO_MEMO_MISMATCH) {
|
||||
ETHASH_CRITICAL("Could not recreate DAG file after finding existing DAG with unexpected size.");
|
||||
goto fail_free_full;
|
||||
}
|
||||
// fallthrough to the mismatch case here, DO NOT go through match
|
||||
case ETHASH_IO_MEMO_MISMATCH:
|
||||
if (!ethash_mmap(ret, f)) {
|
||||
ETHASH_CRITICAL("mmap failure()");
|
||||
goto fail_close_file;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (!ethash_compute_full_data(ret->data, full_size, light, callback)) {
|
||||
ETHASH_CRITICAL("Failure at computing DAG data.");
|
||||
goto fail_free_full_data;
|
||||
}
|
||||
|
||||
// after the DAG has been filled then we finalize it by writting the magic number at the beginning
|
||||
if (fseek(f, 0, SEEK_SET) != 0) {
|
||||
ETHASH_CRITICAL("Could not seek to DAG file start to write magic number.");
|
||||
goto fail_free_full_data;
|
||||
}
|
||||
uint64_t const magic_num = ETHASH_DAG_MAGIC_NUM;
|
||||
if (fwrite(&magic_num, ETHASH_DAG_MAGIC_NUM_SIZE, 1, f) != 1) {
|
||||
ETHASH_CRITICAL("Could not write magic number to DAG's beginning.");
|
||||
goto fail_free_full_data;
|
||||
}
|
||||
if (fflush(f) != 0) {// make sure the magic number IS there
|
||||
ETHASH_CRITICAL("Could not flush memory mapped data to DAG file. Insufficient space?");
|
||||
goto fail_free_full_data;
|
||||
}
|
||||
return ret;
|
||||
|
||||
fail_free_full_data:
|
||||
// could check that munmap(..) == 0 but even if it did not can't really do anything here
|
||||
munmap(ret->data, (size_t)full_size);
|
||||
fail_close_file:
|
||||
fclose(ret->file);
|
||||
fail_free_full:
|
||||
free(ret);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ethash_full_t ethash_full_new(ethash_light_t light, ethash_callback_t callback)
|
||||
{
|
||||
char strbuf[256];
|
||||
if (!ethash_get_default_dirname(strbuf, 256)) {
|
||||
return NULL;
|
||||
}
|
||||
uint64_t full_size = ethash_get_datasize(light->block_number);
|
||||
ethash_h256_t seedhash = ethash_get_seedhash(light->block_number);
|
||||
return ethash_full_new_internal(strbuf, seedhash, full_size, light, callback);
|
||||
}
|
||||
|
||||
void ethash_full_delete(ethash_full_t full)
|
||||
{
|
||||
// could check that munmap(..) == 0 but even if it did not can't really do anything here
|
||||
munmap(full->data, (size_t)full->file_size);
|
||||
if (full->file) {
|
||||
fclose(full->file);
|
||||
}
|
||||
free(full);
|
||||
}
|
||||
|
||||
ethash_return_value_t ethash_full_compute(
|
||||
ethash_full_t full,
|
||||
ethash_h256_t const header_hash,
|
||||
uint64_t nonce
|
||||
)
|
||||
{
|
||||
ethash_return_value_t ret;
|
||||
ret.success = true;
|
||||
if (!ethash_hash(
|
||||
&ret,
|
||||
(node const*)full->data,
|
||||
NULL,
|
||||
full->file_size,
|
||||
header_hash,
|
||||
nonce)) {
|
||||
ret.success = false;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void const* ethash_full_dag(ethash_full_t full)
|
||||
{
|
||||
return full->data;
|
||||
}
|
||||
|
||||
uint64_t ethash_full_dag_size(ethash_full_t full)
|
||||
{
|
||||
return full->file_size;
|
||||
}
|
|
@ -1,179 +0,0 @@
|
|||
#pragma once
|
||||
#include "compiler.h"
|
||||
#include "endian.h"
|
||||
#include "ethash.h"
|
||||
#include <stdio.h>
|
||||
|
||||
#define ENABLE_SSE 0
|
||||
|
||||
#if defined(_M_X64) && ENABLE_SSE
|
||||
#include <smmintrin.h>
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// compile time settings
|
||||
#define NODE_WORDS (64/4)
|
||||
#define MIX_WORDS (ETHASH_MIX_BYTES/4)
|
||||
#define MIX_NODES (MIX_WORDS / NODE_WORDS)
|
||||
#include <stdint.h>
|
||||
|
||||
typedef union node {
|
||||
uint8_t bytes[NODE_WORDS * 4];
|
||||
uint32_t words[NODE_WORDS];
|
||||
uint64_t double_words[NODE_WORDS / 2];
|
||||
|
||||
#if defined(_M_X64) && ENABLE_SSE
|
||||
__m128i xmm[NODE_WORDS/4];
|
||||
#endif
|
||||
|
||||
} node;
|
||||
|
||||
static inline uint8_t ethash_h256_get(ethash_h256_t const* hash, unsigned int i)
|
||||
{
|
||||
return hash->b[i];
|
||||
}
|
||||
|
||||
static inline void ethash_h256_set(ethash_h256_t* hash, unsigned int i, uint8_t v)
|
||||
{
|
||||
hash->b[i] = v;
|
||||
}
|
||||
|
||||
static inline void ethash_h256_reset(ethash_h256_t* hash)
|
||||
{
|
||||
memset(hash, 0, 32);
|
||||
}
|
||||
|
||||
// Returns if hash is less than or equal to boundary (2^256/difficulty)
|
||||
static inline bool ethash_check_difficulty(
|
||||
ethash_h256_t const* hash,
|
||||
ethash_h256_t const* boundary
|
||||
)
|
||||
{
|
||||
// Boundary is big endian
|
||||
for (int i = 0; i < 32; i++) {
|
||||
if (ethash_h256_get(hash, i) == ethash_h256_get(boundary, i)) {
|
||||
continue;
|
||||
}
|
||||
return ethash_h256_get(hash, i) < ethash_h256_get(boundary, i);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Difficulty quick check for POW preverification
|
||||
*
|
||||
* @param header_hash The hash of the header
|
||||
* @param nonce The block's nonce
|
||||
* @param mix_hash The mix digest hash
|
||||
* @param boundary The boundary is defined as (2^256 / difficulty)
|
||||
* @return true for succesful pre-verification and false otherwise
|
||||
*/
|
||||
bool ethash_quick_check_difficulty(
|
||||
ethash_h256_t const* header_hash,
|
||||
uint64_t const nonce,
|
||||
ethash_h256_t const* mix_hash,
|
||||
ethash_h256_t const* boundary
|
||||
);
|
||||
|
||||
struct ethash_light {
|
||||
void* cache;
|
||||
uint64_t cache_size;
|
||||
uint64_t block_number;
|
||||
};
|
||||
|
||||
/**
|
||||
* Allocate and initialize a new ethash_light handler. Internal version
|
||||
*
|
||||
* @param cache_size The size of the cache in bytes
|
||||
* @param seed Block seedhash to be used during the computation of the
|
||||
* cache nodes
|
||||
* @return Newly allocated ethash_light handler or NULL in case of
|
||||
* ERRNOMEM or invalid parameters used for @ref ethash_compute_cache_nodes()
|
||||
*/
|
||||
ethash_light_t ethash_light_new_internal(uint64_t cache_size, ethash_h256_t const* seed);
|
||||
|
||||
/**
|
||||
* Calculate the light client data. Internal version.
|
||||
*
|
||||
* @param light The light client handler
|
||||
* @param full_size The size of the full data in bytes.
|
||||
* @param header_hash The header hash to pack into the mix
|
||||
* @param nonce The nonce to pack into the mix
|
||||
* @return The resulting hash.
|
||||
*/
|
||||
ethash_return_value_t ethash_light_compute_internal(
|
||||
ethash_light_t light,
|
||||
uint64_t full_size,
|
||||
ethash_h256_t const header_hash,
|
||||
uint64_t nonce
|
||||
);
|
||||
|
||||
struct ethash_full {
|
||||
FILE* file;
|
||||
uint64_t file_size;
|
||||
node* data;
|
||||
};
|
||||
|
||||
/**
|
||||
* Allocate and initialize a new ethash_full handler. Internal version.
|
||||
*
|
||||
* @param dirname The directory in which to put the DAG file.
|
||||
* @param seedhash The seed hash of the block. Used in the DAG file naming.
|
||||
* @param full_size The size of the full data in bytes.
|
||||
* @param cache A cache object to use that was allocated with @ref ethash_cache_new().
|
||||
* Iff this function succeeds the ethash_full_t will take memory
|
||||
* memory ownership of the cache and free it at deletion. If
|
||||
* not then the user still has to handle freeing of the cache himself.
|
||||
* @param callback A callback function with signature of @ref ethash_callback_t
|
||||
* It accepts an unsigned with which a progress of DAG calculation
|
||||
* can be displayed. If all goes well the callback should return 0.
|
||||
* If a non-zero value is returned then DAG generation will stop.
|
||||
* @return Newly allocated ethash_full handler or NULL in case of
|
||||
* ERRNOMEM or invalid parameters used for @ref ethash_compute_full_data()
|
||||
*/
|
||||
ethash_full_t ethash_full_new_internal(
|
||||
char const* dirname,
|
||||
ethash_h256_t const seed_hash,
|
||||
uint64_t full_size,
|
||||
ethash_light_t const light,
|
||||
ethash_callback_t callback
|
||||
);
|
||||
|
||||
void ethash_calculate_dag_item(
|
||||
node* const ret,
|
||||
uint32_t node_index,
|
||||
ethash_light_t const cache
|
||||
);
|
||||
|
||||
void ethash_quick_hash(
|
||||
ethash_h256_t* return_hash,
|
||||
ethash_h256_t const* header_hash,
|
||||
const uint64_t nonce,
|
||||
ethash_h256_t const* mix_hash
|
||||
);
|
||||
|
||||
uint64_t ethash_get_datasize(uint64_t const block_number);
|
||||
uint64_t ethash_get_cachesize(uint64_t const block_number);
|
||||
|
||||
/**
|
||||
* Compute the memory data for a full node's memory
|
||||
*
|
||||
* @param mem A pointer to an ethash full's memory
|
||||
* @param full_size The size of the full data in bytes
|
||||
* @param cache A cache object to use in the calculation
|
||||
* @param callback The callback function. Check @ref ethash_full_new() for details.
|
||||
* @return true if all went fine and false for invalid parameters
|
||||
*/
|
||||
bool ethash_compute_full_data(
|
||||
void* mem,
|
||||
uint64_t full_size,
|
||||
ethash_light_t const light,
|
||||
ethash_callback_t callback
|
||||
);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
|
@ -1,119 +0,0 @@
|
|||
/*
|
||||
This file is part of ethash.
|
||||
|
||||
ethash is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
ethash is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with ethash. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
/** @file io.c
|
||||
* @author Lefteris Karapetsas <lefteris@ethdev.com>
|
||||
* @date 2015
|
||||
*/
|
||||
#include "io.h"
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include <errno.h>
|
||||
|
||||
enum ethash_io_rc ethash_io_prepare(
|
||||
char const* dirname,
|
||||
ethash_h256_t const seedhash,
|
||||
FILE** output_file,
|
||||
uint64_t file_size,
|
||||
bool force_create
|
||||
)
|
||||
{
|
||||
char mutable_name[DAG_MUTABLE_NAME_MAX_SIZE];
|
||||
enum ethash_io_rc ret = ETHASH_IO_FAIL;
|
||||
// reset errno before io calls
|
||||
errno = 0;
|
||||
|
||||
// assert directory exists
|
||||
if (!ethash_mkdir(dirname)) {
|
||||
ETHASH_CRITICAL("Could not create the ethash directory");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ethash_io_mutable_name(ETHASH_REVISION, &seedhash, mutable_name);
|
||||
char* tmpfile = ethash_io_create_filename(dirname, mutable_name, strlen(mutable_name));
|
||||
if (!tmpfile) {
|
||||
ETHASH_CRITICAL("Could not create the full DAG pathname");
|
||||
goto end;
|
||||
}
|
||||
|
||||
FILE *f;
|
||||
if (!force_create) {
|
||||
// try to open the file
|
||||
f = ethash_fopen(tmpfile, "rb+");
|
||||
if (f) {
|
||||
size_t found_size;
|
||||
if (!ethash_file_size(f, &found_size)) {
|
||||
fclose(f);
|
||||
ETHASH_CRITICAL("Could not query size of DAG file: \"%s\"", tmpfile);
|
||||
goto free_memo;
|
||||
}
|
||||
if (file_size != found_size - ETHASH_DAG_MAGIC_NUM_SIZE) {
|
||||
fclose(f);
|
||||
ret = ETHASH_IO_MEMO_SIZE_MISMATCH;
|
||||
goto free_memo;
|
||||
}
|
||||
// compare the magic number, no need to care about endianess since it's local
|
||||
uint64_t magic_num;
|
||||
if (fread(&magic_num, ETHASH_DAG_MAGIC_NUM_SIZE, 1, f) != 1) {
|
||||
// I/O error
|
||||
fclose(f);
|
||||
ETHASH_CRITICAL("Could not read from DAG file: \"%s\"", tmpfile);
|
||||
ret = ETHASH_IO_MEMO_SIZE_MISMATCH;
|
||||
goto free_memo;
|
||||
}
|
||||
if (magic_num != ETHASH_DAG_MAGIC_NUM) {
|
||||
fclose(f);
|
||||
ret = ETHASH_IO_MEMO_SIZE_MISMATCH;
|
||||
goto free_memo;
|
||||
}
|
||||
ret = ETHASH_IO_MEMO_MATCH;
|
||||
goto set_file;
|
||||
}
|
||||
}
|
||||
|
||||
// file does not exist, will need to be created
|
||||
f = ethash_fopen(tmpfile, "wb+");
|
||||
if (!f) {
|
||||
ETHASH_CRITICAL("Could not create DAG file: \"%s\"", tmpfile);
|
||||
goto free_memo;
|
||||
}
|
||||
// make sure it's of the proper size
|
||||
if (fseek(f, (long int)(file_size + ETHASH_DAG_MAGIC_NUM_SIZE - 1), SEEK_SET) != 0) {
|
||||
fclose(f);
|
||||
ETHASH_CRITICAL("Could not seek to the end of DAG file: \"%s\". Insufficient space?", tmpfile);
|
||||
goto free_memo;
|
||||
}
|
||||
if (fputc('\n', f) == EOF) {
|
||||
fclose(f);
|
||||
ETHASH_CRITICAL("Could not write in the end of DAG file: \"%s\". Insufficient space?", tmpfile);
|
||||
goto free_memo;
|
||||
}
|
||||
if (fflush(f) != 0) {
|
||||
fclose(f);
|
||||
ETHASH_CRITICAL("Could not flush at end of DAG file: \"%s\". Insufficient space?", tmpfile);
|
||||
goto free_memo;
|
||||
}
|
||||
ret = ETHASH_IO_MEMO_MISMATCH;
|
||||
goto set_file;
|
||||
|
||||
ret = ETHASH_IO_MEMO_MATCH;
|
||||
set_file:
|
||||
*output_file = f;
|
||||
free_memo:
|
||||
free(tmpfile);
|
||||
end:
|
||||
return ret;
|
||||
}
|
|
@ -1,202 +0,0 @@
|
|||
/*
|
||||
This file is part of ethash.
|
||||
|
||||
ethash is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
ethash is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with ethash. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
/** @file io.h
|
||||
* @author Lefteris Karapetsas <lefteris@ethdev.com>
|
||||
* @date 2015
|
||||
*/
|
||||
#pragma once
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
#ifdef __cplusplus
|
||||
#define __STDC_FORMAT_MACROS 1
|
||||
#endif
|
||||
#include <inttypes.h>
|
||||
#include "endian.h"
|
||||
#include "ethash.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
// Maximum size for mutable part of DAG file name
|
||||
// 6 is for "full-R", the suffix of the filename
|
||||
// 10 is for maximum number of digits of a uint32_t (for REVISION)
|
||||
// 1 is for - and 16 is for the first 16 hex digits for first 8 bytes of
|
||||
// the seedhash and last 1 is for the null terminating character
|
||||
// Reference: https://github.com/ethereum/wiki/wiki/Ethash-DAG
|
||||
#define DAG_MUTABLE_NAME_MAX_SIZE (6 + 10 + 1 + 16 + 1)
|
||||
/// Possible return values of @see ethash_io_prepare
|
||||
enum ethash_io_rc {
|
||||
ETHASH_IO_FAIL = 0, ///< There has been an IO failure
|
||||
ETHASH_IO_MEMO_SIZE_MISMATCH, ///< DAG with revision/hash match, but file size was wrong.
|
||||
ETHASH_IO_MEMO_MISMATCH, ///< The DAG file did not exist or there was revision/hash mismatch
|
||||
ETHASH_IO_MEMO_MATCH, ///< DAG file existed and revision/hash matched. No need to do anything
|
||||
};
|
||||
|
||||
// small hack for windows. I don't feel I should use va_args and forward just
|
||||
// to have this one function properly cross-platform abstracted
|
||||
#if defined(_WIN32) && !defined(__GNUC__)
|
||||
#define snprintf(...) sprintf_s(__VA_ARGS__)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Logs a critical error in important parts of ethash. Should mostly help
|
||||
* figure out what kind of problem (I/O, memory e.t.c.) causes a NULL
|
||||
* ethash_full_t
|
||||
*/
|
||||
#ifdef ETHASH_PRINT_CRITICAL_OUTPUT
|
||||
#define ETHASH_CRITICAL(...) \
|
||||
do \
|
||||
{ \
|
||||
printf("ETHASH CRITICAL ERROR: "__VA_ARGS__); \
|
||||
printf("\n"); \
|
||||
fflush(stdout); \
|
||||
} while (0)
|
||||
#else
|
||||
#define ETHASH_CRITICAL(...)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Prepares io for ethash
|
||||
*
|
||||
* Create the DAG directory and the DAG file if they don't exist.
|
||||
*
|
||||
* @param[in] dirname A null terminated c-string of the path of the ethash
|
||||
* data directory. If it does not exist it's created.
|
||||
* @param[in] seedhash The seedhash of the current block number, used in the
|
||||
* naming of the file as can be seen from the spec at:
|
||||
* https://github.com/ethereum/wiki/wiki/Ethash-DAG
|
||||
* @param[out] output_file If there was no failure then this will point to an open
|
||||
* file descriptor. User is responsible for closing it.
|
||||
* In the case of memo match then the file is open on read
|
||||
* mode, while on the case of mismatch a new file is created
|
||||
* on write mode
|
||||
* @param[in] file_size The size that the DAG file should have on disk
|
||||
* @param[out] force_create If true then there is no check to see if the file
|
||||
* already exists
|
||||
* @return For possible return values @see enum ethash_io_rc
|
||||
*/
|
||||
enum ethash_io_rc ethash_io_prepare(
|
||||
char const* dirname,
|
||||
ethash_h256_t const seedhash,
|
||||
FILE** output_file,
|
||||
uint64_t file_size,
|
||||
bool force_create
|
||||
);
|
||||
|
||||
/**
|
||||
* An fopen wrapper for no-warnings crossplatform fopen.
|
||||
*
|
||||
* Msvc compiler considers fopen to be insecure and suggests to use their
|
||||
* alternative. This is a wrapper for this alternative. Another way is to
|
||||
* #define _CRT_SECURE_NO_WARNINGS, but disabling all security warnings does
|
||||
* not sound like a good idea.
|
||||
*
|
||||
* @param file_name The path to the file to open
|
||||
* @param mode Opening mode. Check fopen()
|
||||
* @return The FILE* or NULL in failure
|
||||
*/
|
||||
FILE* ethash_fopen(char const* file_name, char const* mode);
|
||||
|
||||
/**
|
||||
* An strncat wrapper for no-warnings crossplatform strncat.
|
||||
*
|
||||
* Msvc compiler considers strncat to be insecure and suggests to use their
|
||||
* alternative. This is a wrapper for this alternative. Another way is to
|
||||
* #define _CRT_SECURE_NO_WARNINGS, but disabling all security warnings does
|
||||
* not sound like a good idea.
|
||||
*
|
||||
* @param des Destination buffer
|
||||
* @param dest_size Maximum size of the destination buffer. This is the
|
||||
* extra argument for the MSVC secure strncat
|
||||
* @param src Souce buffer
|
||||
* @param count Number of bytes to copy from source
|
||||
* @return If all is well returns the dest buffer. If there is an
|
||||
* error returns NULL
|
||||
*/
|
||||
char* ethash_strncat(char* dest, size_t dest_size, char const* src, size_t count);
|
||||
|
||||
/**
|
||||
* A cross-platform mkdir wrapper to create a directory or assert it's there
|
||||
*
|
||||
* @param dirname The full path of the directory to create
|
||||
* @return true if the directory was created or if it already
|
||||
* existed
|
||||
*/
|
||||
bool ethash_mkdir(char const* dirname);
|
||||
|
||||
/**
|
||||
* Get a file's size
|
||||
*
|
||||
* @param[in] f The open file stream whose size to get
|
||||
* @param[out] size Pass a size_t by reference to contain the file size
|
||||
* @return true in success and false if there was a failure
|
||||
*/
|
||||
bool ethash_file_size(FILE* f, size_t* ret_size);
|
||||
|
||||
/**
|
||||
* Get a file descriptor number from a FILE stream
|
||||
*
|
||||
* @param f The file stream whose fd to get
|
||||
* @return Platform specific fd handler
|
||||
*/
|
||||
int ethash_fileno(FILE* f);
|
||||
|
||||
/**
|
||||
* Create the filename for the DAG.
|
||||
*
|
||||
* @param dirname The directory name in which the DAG file should reside
|
||||
* If it does not end with a directory separator it is appended.
|
||||
* @param filename The actual name of the file
|
||||
* @param filename_length The length of the filename in bytes
|
||||
* @return A char* containing the full name. User must deallocate.
|
||||
*/
|
||||
char* ethash_io_create_filename(
|
||||
char const* dirname,
|
||||
char const* filename,
|
||||
size_t filename_length
|
||||
);
|
||||
|
||||
/**
|
||||
* Gets the default directory name for the DAG depending on the system
|
||||
*
|
||||
* The spec defining this directory is here: https://github.com/ethereum/wiki/wiki/Ethash-DAG
|
||||
*
|
||||
* @param[out] strbuf A string buffer of sufficient size to keep the
|
||||
* null termninated string of the directory name
|
||||
* @param[in] buffsize Size of @a strbuf in bytes
|
||||
* @return true for success and false otherwise
|
||||
*/
|
||||
bool ethash_get_default_dirname(char* strbuf, size_t buffsize);
|
||||
|
||||
static inline bool ethash_io_mutable_name(
|
||||
uint32_t revision,
|
||||
ethash_h256_t const* seed_hash,
|
||||
char* output
|
||||
)
|
||||
{
|
||||
uint64_t hash = *((uint64_t*)seed_hash);
|
||||
#if LITTLE_ENDIAN == BYTE_ORDER
|
||||
hash = ethash_swap_u64(hash);
|
||||
#endif
|
||||
return snprintf(output, DAG_MUTABLE_NAME_MAX_SIZE, "full-R%u-%016" PRIx64, revision, hash) >= 0;
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
|
@ -1,111 +0,0 @@
|
|||
/*
|
||||
This file is part of ethash.
|
||||
|
||||
ethash is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
ethash is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with ethash. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
/** @file io_posix.c
|
||||
* @author Lefteris Karapetsas <lefteris@ethdev.com>
|
||||
* @date 2015
|
||||
*/
|
||||
|
||||
#include "io.h"
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <errno.h>
|
||||
#include <libgen.h>
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
#include <stdlib.h>
|
||||
#include <pwd.h>
|
||||
|
||||
FILE* ethash_fopen(char const* file_name, char const* mode)
|
||||
{
|
||||
return fopen(file_name, mode);
|
||||
}
|
||||
|
||||
char* ethash_strncat(char* dest, size_t dest_size, char const* src, size_t count)
|
||||
{
|
||||
return strlen(dest) + count + 1 <= dest_size ? strncat(dest, src, count) : NULL;
|
||||
}
|
||||
|
||||
bool ethash_mkdir(char const* dirname)
|
||||
{
|
||||
int rc = mkdir(dirname, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
|
||||
return rc != -1 || errno == EEXIST;
|
||||
}
|
||||
|
||||
int ethash_fileno(FILE *f)
|
||||
{
|
||||
return fileno(f);
|
||||
}
|
||||
|
||||
char* ethash_io_create_filename(
|
||||
char const* dirname,
|
||||
char const* filename,
|
||||
size_t filename_length
|
||||
)
|
||||
{
|
||||
size_t dirlen = strlen(dirname);
|
||||
size_t dest_size = dirlen + filename_length + 1;
|
||||
if (dirname[dirlen] != '/') {
|
||||
dest_size += 1;
|
||||
}
|
||||
char* name = malloc(dest_size);
|
||||
if (!name) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
name[0] = '\0';
|
||||
ethash_strncat(name, dest_size, dirname, dirlen);
|
||||
if (dirname[dirlen] != '/') {
|
||||
ethash_strncat(name, dest_size, "/", 1);
|
||||
}
|
||||
ethash_strncat(name, dest_size, filename, filename_length);
|
||||
return name;
|
||||
}
|
||||
|
||||
bool ethash_file_size(FILE* f, size_t* ret_size)
|
||||
{
|
||||
struct stat st;
|
||||
int fd;
|
||||
if ((fd = fileno(f)) == -1 || fstat(fd, &st) != 0) {
|
||||
return false;
|
||||
}
|
||||
*ret_size = st.st_size;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ethash_get_default_dirname(char* strbuf, size_t buffsize)
|
||||
{
|
||||
static const char dir_suffix[] = ".ethash/";
|
||||
strbuf[0] = '\0';
|
||||
char* home_dir = getenv("HOME");
|
||||
if (!home_dir || strlen(home_dir) == 0)
|
||||
{
|
||||
struct passwd* pwd = getpwuid(getuid());
|
||||
if (pwd)
|
||||
home_dir = pwd->pw_dir;
|
||||
}
|
||||
|
||||
size_t len = strlen(home_dir);
|
||||
if (!ethash_strncat(strbuf, buffsize, home_dir, len)) {
|
||||
return false;
|
||||
}
|
||||
if (home_dir[len] != '/') {
|
||||
if (!ethash_strncat(strbuf, buffsize, "/", 1)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return ethash_strncat(strbuf, buffsize, dir_suffix, sizeof(dir_suffix));
|
||||
}
|
|
@ -1,100 +0,0 @@
|
|||
/*
|
||||
This file is part of ethash.
|
||||
|
||||
ethash is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
ethash is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with ethash. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
/** @file io_win32.c
|
||||
* @author Lefteris Karapetsas <lefteris@ethdev.com>
|
||||
* @date 2015
|
||||
*/
|
||||
|
||||
#include "io.h"
|
||||
#include <direct.h>
|
||||
#include <errno.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <shlobj.h>
|
||||
|
||||
FILE* ethash_fopen(char const* file_name, char const* mode)
|
||||
{
|
||||
FILE* f;
|
||||
return fopen_s(&f, file_name, mode) == 0 ? f : NULL;
|
||||
}
|
||||
|
||||
char* ethash_strncat(char* dest, size_t dest_size, char const* src, size_t count)
|
||||
{
|
||||
return strncat_s(dest, dest_size, src, count) == 0 ? dest : NULL;
|
||||
}
|
||||
|
||||
bool ethash_mkdir(char const* dirname)
|
||||
{
|
||||
int rc = _mkdir(dirname);
|
||||
return rc != -1 || errno == EEXIST;
|
||||
}
|
||||
|
||||
int ethash_fileno(FILE* f)
|
||||
{
|
||||
return _fileno(f);
|
||||
}
|
||||
|
||||
char* ethash_io_create_filename(
|
||||
char const* dirname,
|
||||
char const* filename,
|
||||
size_t filename_length
|
||||
)
|
||||
{
|
||||
size_t dirlen = strlen(dirname);
|
||||
size_t dest_size = dirlen + filename_length + 1;
|
||||
if (dirname[dirlen] != '\\' || dirname[dirlen] != '/') {
|
||||
dest_size += 1;
|
||||
}
|
||||
char* name = malloc(dest_size);
|
||||
if (!name) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
name[0] = '\0';
|
||||
ethash_strncat(name, dest_size, dirname, dirlen);
|
||||
if (dirname[dirlen] != '\\' || dirname[dirlen] != '/') {
|
||||
ethash_strncat(name, dest_size, "\\", 1);
|
||||
}
|
||||
ethash_strncat(name, dest_size, filename, filename_length);
|
||||
return name;
|
||||
}
|
||||
|
||||
bool ethash_file_size(FILE* f, size_t* ret_size)
|
||||
{
|
||||
struct _stat st;
|
||||
int fd;
|
||||
if ((fd = _fileno(f)) == -1 || _fstat(fd, &st) != 0) {
|
||||
return false;
|
||||
}
|
||||
*ret_size = st.st_size;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ethash_get_default_dirname(char* strbuf, size_t buffsize)
|
||||
{
|
||||
static const char dir_suffix[] = "Ethash\\";
|
||||
strbuf[0] = '\0';
|
||||
if (!SUCCEEDED(SHGetFolderPathA(NULL, CSIDL_LOCAL_APPDATA, NULL, 0, (CHAR*)strbuf))) {
|
||||
return false;
|
||||
}
|
||||
if (!ethash_strncat(strbuf, buffsize, "\\", 1)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return ethash_strncat(strbuf, buffsize, dir_suffix, sizeof(dir_suffix));
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
/*
|
||||
This file is part of ethash.
|
||||
|
||||
ethash is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
ethash is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with ethash. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
/** @file mmap.h
|
||||
* @author Lefteris Karapetsas <lefteris@ethdev.com>
|
||||
* @date 2015
|
||||
*/
|
||||
#pragma once
|
||||
#if defined(__MINGW32__) || defined(_WIN32)
|
||||
#include <sys/types.h>
|
||||
|
||||
#define PROT_READ 0x1
|
||||
#define PROT_WRITE 0x2
|
||||
/* This flag is only available in WinXP+ */
|
||||
#ifdef FILE_MAP_EXECUTE
|
||||
#define PROT_EXEC 0x4
|
||||
#else
|
||||
#define PROT_EXEC 0x0
|
||||
#define FILE_MAP_EXECUTE 0
|
||||
#endif
|
||||
|
||||
#define MAP_SHARED 0x01
|
||||
#define MAP_PRIVATE 0x02
|
||||
#define MAP_ANONYMOUS 0x20
|
||||
#define MAP_ANON MAP_ANONYMOUS
|
||||
#define MAP_FAILED ((void *) -1)
|
||||
|
||||
void* mmap(void* start, size_t length, int prot, int flags, int fd, off_t offset);
|
||||
void munmap(void* addr, size_t length);
|
||||
#else // posix, yay! ^_^
|
||||
#include <sys/mman.h>
|
||||
#endif
|
||||
|
||||
|
|
@ -1,84 +0,0 @@
|
|||
/* mmap() replacement for Windows
|
||||
*
|
||||
* Author: Mike Frysinger <vapier@gentoo.org>
|
||||
* Placed into the public domain
|
||||
*/
|
||||
|
||||
/* References:
|
||||
* CreateFileMapping: http://msdn.microsoft.com/en-us/library/aa366537(VS.85).aspx
|
||||
* CloseHandle: http://msdn.microsoft.com/en-us/library/ms724211(VS.85).aspx
|
||||
* MapViewOfFile: http://msdn.microsoft.com/en-us/library/aa366761(VS.85).aspx
|
||||
* UnmapViewOfFile: http://msdn.microsoft.com/en-us/library/aa366882(VS.85).aspx
|
||||
*/
|
||||
|
||||
#include <io.h>
|
||||
#include <windows.h>
|
||||
#include "mmap.h"
|
||||
|
||||
#ifdef __USE_FILE_OFFSET64
|
||||
# define DWORD_HI(x) (x >> 32)
|
||||
# define DWORD_LO(x) ((x) & 0xffffffff)
|
||||
#else
|
||||
# define DWORD_HI(x) (0)
|
||||
# define DWORD_LO(x) (x)
|
||||
#endif
|
||||
|
||||
void* mmap(void* start, size_t length, int prot, int flags, int fd, off_t offset)
|
||||
{
|
||||
if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
|
||||
return MAP_FAILED;
|
||||
if (fd == -1) {
|
||||
if (!(flags & MAP_ANON) || offset)
|
||||
return MAP_FAILED;
|
||||
} else if (flags & MAP_ANON)
|
||||
return MAP_FAILED;
|
||||
|
||||
DWORD flProtect;
|
||||
if (prot & PROT_WRITE) {
|
||||
if (prot & PROT_EXEC)
|
||||
flProtect = PAGE_EXECUTE_READWRITE;
|
||||
else
|
||||
flProtect = PAGE_READWRITE;
|
||||
} else if (prot & PROT_EXEC) {
|
||||
if (prot & PROT_READ)
|
||||
flProtect = PAGE_EXECUTE_READ;
|
||||
else if (prot & PROT_EXEC)
|
||||
flProtect = PAGE_EXECUTE;
|
||||
} else
|
||||
flProtect = PAGE_READONLY;
|
||||
|
||||
off_t end = length + offset;
|
||||
HANDLE mmap_fd, h;
|
||||
if (fd == -1)
|
||||
mmap_fd = INVALID_HANDLE_VALUE;
|
||||
else
|
||||
mmap_fd = (HANDLE)_get_osfhandle(fd);
|
||||
h = CreateFileMapping(mmap_fd, NULL, flProtect, DWORD_HI(end), DWORD_LO(end), NULL);
|
||||
if (h == NULL)
|
||||
return MAP_FAILED;
|
||||
|
||||
DWORD dwDesiredAccess;
|
||||
if (prot & PROT_WRITE)
|
||||
dwDesiredAccess = FILE_MAP_WRITE;
|
||||
else
|
||||
dwDesiredAccess = FILE_MAP_READ;
|
||||
if (prot & PROT_EXEC)
|
||||
dwDesiredAccess |= FILE_MAP_EXECUTE;
|
||||
if (flags & MAP_PRIVATE)
|
||||
dwDesiredAccess |= FILE_MAP_COPY;
|
||||
void *ret = MapViewOfFile(h, dwDesiredAccess, DWORD_HI(offset), DWORD_LO(offset), length);
|
||||
if (ret == NULL) {
|
||||
ret = MAP_FAILED;
|
||||
}
|
||||
// since we are handling the file ourselves with fd, close the Windows Handle here
|
||||
CloseHandle(h);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void munmap(void* addr, size_t length)
|
||||
{
|
||||
UnmapViewOfFile(addr);
|
||||
}
|
||||
|
||||
#undef DWORD_HI
|
||||
#undef DWORD_LO
|
|
@ -1,151 +0,0 @@
|
|||
/** libkeccak-tiny
|
||||
*
|
||||
* A single-file implementation of SHA-3 and SHAKE.
|
||||
*
|
||||
* Implementor: David Leon Gil
|
||||
* License: CC0, attribution kindly requested. Blame taken too,
|
||||
* but not liability.
|
||||
*/
|
||||
#include "sha3.h"
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
/******** The Keccak-f[1600] permutation ********/
|
||||
|
||||
/*** Constants. ***/
|
||||
static const uint8_t rho[24] = \
|
||||
{ 1, 3, 6, 10, 15, 21,
|
||||
28, 36, 45, 55, 2, 14,
|
||||
27, 41, 56, 8, 25, 43,
|
||||
62, 18, 39, 61, 20, 44};
|
||||
static const uint8_t pi[24] = \
|
||||
{10, 7, 11, 17, 18, 3,
|
||||
5, 16, 8, 21, 24, 4,
|
||||
15, 23, 19, 13, 12, 2,
|
||||
20, 14, 22, 9, 6, 1};
|
||||
static const uint64_t RC[24] = \
|
||||
{1ULL, 0x8082ULL, 0x800000000000808aULL, 0x8000000080008000ULL,
|
||||
0x808bULL, 0x80000001ULL, 0x8000000080008081ULL, 0x8000000000008009ULL,
|
||||
0x8aULL, 0x88ULL, 0x80008009ULL, 0x8000000aULL,
|
||||
0x8000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL, 0x8000000000008003ULL,
|
||||
0x8000000000008002ULL, 0x8000000000000080ULL, 0x800aULL, 0x800000008000000aULL,
|
||||
0x8000000080008081ULL, 0x8000000000008080ULL, 0x80000001ULL, 0x8000000080008008ULL};
|
||||
|
||||
/*** Helper macros to unroll the permutation. ***/
|
||||
#define rol(x, s) (((x) << s) | ((x) >> (64 - s)))
|
||||
#define REPEAT6(e) e e e e e e
|
||||
#define REPEAT24(e) REPEAT6(e e e e)
|
||||
#define REPEAT5(e) e e e e e
|
||||
#define FOR5(v, s, e) \
|
||||
v = 0; \
|
||||
REPEAT5(e; v += s;)
|
||||
|
||||
/*** Keccak-f[1600] ***/
|
||||
static inline void keccakf(void* state) {
|
||||
uint64_t* a = (uint64_t*)state;
|
||||
uint64_t b[5] = {0};
|
||||
uint64_t t = 0;
|
||||
uint8_t x, y;
|
||||
|
||||
for (int i = 0; i < 24; i++) {
|
||||
// Theta
|
||||
FOR5(x, 1,
|
||||
b[x] = 0;
|
||||
FOR5(y, 5,
|
||||
b[x] ^= a[x + y]; ))
|
||||
FOR5(x, 1,
|
||||
FOR5(y, 5,
|
||||
a[y + x] ^= b[(x + 4) % 5] ^ rol(b[(x + 1) % 5], 1); ))
|
||||
// Rho and pi
|
||||
t = a[1];
|
||||
x = 0;
|
||||
REPEAT24(b[0] = a[pi[x]];
|
||||
a[pi[x]] = rol(t, rho[x]);
|
||||
t = b[0];
|
||||
x++; )
|
||||
// Chi
|
||||
FOR5(y,
|
||||
5,
|
||||
FOR5(x, 1,
|
||||
b[x] = a[y + x];)
|
||||
FOR5(x, 1,
|
||||
a[y + x] = b[x] ^ ((~b[(x + 1) % 5]) & b[(x + 2) % 5]); ))
|
||||
// Iota
|
||||
a[0] ^= RC[i];
|
||||
}
|
||||
}
|
||||
|
||||
/******** The FIPS202-defined functions. ********/
|
||||
|
||||
/*** Some helper macros. ***/
|
||||
|
||||
#define _(S) do { S } while (0)
|
||||
#define FOR(i, ST, L, S) \
|
||||
_(for (size_t i = 0; i < L; i += ST) { S; })
|
||||
#define mkapply_ds(NAME, S) \
|
||||
static inline void NAME(uint8_t* dst, \
|
||||
const uint8_t* src, \
|
||||
size_t len) { \
|
||||
FOR(i, 1, len, S); \
|
||||
}
|
||||
#define mkapply_sd(NAME, S) \
|
||||
static inline void NAME(const uint8_t* src, \
|
||||
uint8_t* dst, \
|
||||
size_t len) { \
|
||||
FOR(i, 1, len, S); \
|
||||
}
|
||||
|
||||
mkapply_ds(xorin, dst[i] ^= src[i]) // xorin
|
||||
mkapply_sd(setout, dst[i] = src[i]) // setout
|
||||
|
||||
#define P keccakf
|
||||
#define Plen 200
|
||||
|
||||
// Fold P*F over the full blocks of an input.
|
||||
#define foldP(I, L, F) \
|
||||
while (L >= rate) { \
|
||||
F(a, I, rate); \
|
||||
P(a); \
|
||||
I += rate; \
|
||||
L -= rate; \
|
||||
}
|
||||
|
||||
/** The sponge-based hash construction. **/
|
||||
static inline int hash(uint8_t* out, size_t outlen,
|
||||
const uint8_t* in, size_t inlen,
|
||||
size_t rate, uint8_t delim) {
|
||||
if ((out == NULL) || ((in == NULL) && inlen != 0) || (rate >= Plen)) {
|
||||
return -1;
|
||||
}
|
||||
uint8_t a[Plen] = {0};
|
||||
// Absorb input.
|
||||
foldP(in, inlen, xorin);
|
||||
// Xor in the DS and pad frame.
|
||||
a[inlen] ^= delim;
|
||||
a[rate - 1] ^= 0x80;
|
||||
// Xor in the last block.
|
||||
xorin(a, in, inlen);
|
||||
// Apply P
|
||||
P(a);
|
||||
// Squeeze output.
|
||||
foldP(out, outlen, setout);
|
||||
setout(a, out, outlen);
|
||||
memset(a, 0, 200);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define defsha3(bits) \
|
||||
int sha3_##bits(uint8_t* out, size_t outlen, \
|
||||
const uint8_t* in, size_t inlen) { \
|
||||
if (outlen > (bits/8)) { \
|
||||
return -1; \
|
||||
} \
|
||||
return hash(out, outlen, in, inlen, 200 - (bits / 4), 0x01); \
|
||||
}
|
||||
|
||||
/*** FIPS202 SHA3 FOFs ***/
|
||||
defsha3(256)
|
||||
defsha3(512)
|
|
@ -1,31 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "compiler.h"
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
struct ethash_h256;
|
||||
|
||||
#define decsha3(bits) \
|
||||
int sha3_##bits(uint8_t*, size_t, uint8_t const*, size_t);
|
||||
|
||||
decsha3(256)
|
||||
decsha3(512)
|
||||
|
||||
static inline void SHA3_256(struct ethash_h256 const* ret, uint8_t const* data, size_t const size)
|
||||
{
|
||||
sha3_256((uint8_t*)ret, 32, data, size);
|
||||
}
|
||||
|
||||
static inline void SHA3_512(uint8_t* ret, uint8_t const* data, size_t const size)
|
||||
{
|
||||
sha3_512(ret, 64, data, size);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
|
@ -1,37 +0,0 @@
|
|||
/*
|
||||
This file is part of ethash.
|
||||
|
||||
ethash is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
ethash is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with ethash. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
/** @file sha3.cpp
|
||||
* @author Tim Hughes <tim@twistedfury.com>
|
||||
* @date 2015
|
||||
*/
|
||||
#include <stdint.h>
|
||||
#include <cryptopp/sha3.h>
|
||||
|
||||
extern "C" {
|
||||
struct ethash_h256;
|
||||
typedef struct ethash_h256 ethash_h256_t;
|
||||
void SHA3_256(ethash_h256_t const* ret, uint8_t const* data, size_t size)
|
||||
{
|
||||
CryptoPP::SHA3_256().CalculateDigest((uint8_t*)ret, data, size);
|
||||
}
|
||||
|
||||
void SHA3_512(uint8_t* const ret, uint8_t const* data, size_t size)
|
||||
{
|
||||
CryptoPP::SHA3_512().CalculateDigest(ret, data, size);
|
||||
}
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include "compiler.h"
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct ethash_h256;
|
||||
|
||||
void SHA3_256(struct ethash_h256 const* ret, uint8_t const* data, size_t size);
|
||||
void SHA3_512(uint8_t* const ret, uint8_t const* data, size_t size);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
|
@ -1,47 +0,0 @@
|
|||
/*
|
||||
This file is part of ethash.
|
||||
|
||||
ethash is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
ethash is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with ethash. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
/** @file util.h
|
||||
* @author Tim Hughes <tim@twistedfury.com>
|
||||
* @date 2015
|
||||
*/
|
||||
#pragma once
|
||||
#include <stdint.h>
|
||||
#include "compiler.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
void debugf(char const* str, ...);
|
||||
#else
|
||||
#define debugf printf
|
||||
#endif
|
||||
|
||||
static inline uint32_t min_u32(uint32_t a, uint32_t b)
|
||||
{
|
||||
return a < b ? a : b;
|
||||
}
|
||||
|
||||
static inline uint32_t clamp_u32(uint32_t x, uint32_t min_, uint32_t max_)
|
||||
{
|
||||
return x < min_ ? min_ : (x > max_ ? max_ : x);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
|
@ -1,38 +0,0 @@
|
|||
/*
|
||||
This file is part of cpp-ethereum.
|
||||
|
||||
cpp-ethereum is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
cpp-ethereum is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
/** @file util.c
|
||||
* @author Tim Hughes <tim@twistedfury.com>
|
||||
* @date 2015
|
||||
*/
|
||||
#include <stdarg.h>
|
||||
#include <stdio.h>
|
||||
#include "util.h"
|
||||
|
||||
|
||||
// foward declare without all of Windows.h
|
||||
__declspec(dllimport) void __stdcall OutputDebugStringA(char const* lpOutputString);
|
||||
|
||||
void debugf(char const* str, ...)
|
||||
{
|
||||
va_list args;
|
||||
va_start(args, str);
|
||||
|
||||
char buf[1<<16];
|
||||
_vsnprintf_s(buf, sizeof(buf), sizeof(buf), str, args);
|
||||
buf[sizeof(buf)-1] = '\0';
|
||||
OutputDebugStringA(buf);
|
||||
}
|
|
@ -1,267 +0,0 @@
|
|||
#include <Python.h>
|
||||
#include <alloca.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
#include "../libethash/ethash.h"
|
||||
#include "../libethash/internal.h"
|
||||
|
||||
#if PY_MAJOR_VERSION >= 3
|
||||
#define PY_STRING_FORMAT "y#"
|
||||
#define PY_CONST_STRING_FORMAT "y"
|
||||
#else
|
||||
#define PY_STRING_FORMAT "s#"
|
||||
#define PY_CONST_STRING_FORMAT "s"
|
||||
#endif
|
||||
|
||||
#define MIX_WORDS (ETHASH_MIX_BYTES/4)
|
||||
|
||||
static PyObject *
|
||||
mkcache_bytes(PyObject *self, PyObject *args) {
|
||||
unsigned long block_number;
|
||||
unsigned long cache_size;
|
||||
|
||||
if (!PyArg_ParseTuple(args, "k", &block_number))
|
||||
return 0;
|
||||
|
||||
ethash_light_t L = ethash_light_new(block_number);
|
||||
PyObject * val = Py_BuildValue(PY_STRING_FORMAT, L->cache, L->cache_size);
|
||||
free(L->cache);
|
||||
return val;
|
||||
}
|
||||
|
||||
/*
|
||||
static PyObject *
|
||||
calc_dataset_bytes(PyObject *self, PyObject *args) {
|
||||
char *cache_bytes;
|
||||
unsigned long full_size;
|
||||
int cache_size;
|
||||
|
||||
if (!PyArg_ParseTuple(args, "k" PY_STRING_FORMAT, &full_size, &cache_bytes, &cache_size))
|
||||
return 0;
|
||||
|
||||
if (full_size % MIX_WORDS != 0) {
|
||||
char error_message[1024];
|
||||
sprintf(error_message, "The size of data set must be a multiple of %i bytes (was %lu)", MIX_WORDS, full_size);
|
||||
PyErr_SetString(PyExc_ValueError, error_message);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (cache_size % ETHASH_HASH_BYTES != 0) {
|
||||
char error_message[1024];
|
||||
sprintf(error_message, "The size of the cache must be a multiple of %i bytes (was %i)", ETHASH_HASH_BYTES, cache_size);
|
||||
PyErr_SetString(PyExc_ValueError, error_message);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ethash_params params;
|
||||
params.cache_size = (size_t) cache_size;
|
||||
params.full_size = (size_t) full_size;
|
||||
ethash_cache cache;
|
||||
cache.mem = (void *) cache_bytes;
|
||||
void *mem = malloc(params.full_size);
|
||||
ethash_compute_full_data(mem, ¶ms, &cache);
|
||||
PyObject * val = Py_BuildValue(PY_STRING_FORMAT, (char *) mem, full_size);
|
||||
free(mem);
|
||||
return val;
|
||||
}*/
|
||||
|
||||
// hashimoto_light(full_size, cache, header, nonce)
|
||||
static PyObject *
|
||||
hashimoto_light(PyObject *self, PyObject *args) {
|
||||
char *cache_bytes;
|
||||
char *header;
|
||||
unsigned long block_number;
|
||||
unsigned long long nonce;
|
||||
int cache_size, header_size;
|
||||
if (!PyArg_ParseTuple(args, "k" PY_STRING_FORMAT PY_STRING_FORMAT "K", &block_number, &cache_bytes, &cache_size, &header, &header_size, &nonce))
|
||||
return 0;
|
||||
if (header_size != 32) {
|
||||
char error_message[1024];
|
||||
sprintf(error_message, "Seed must be 32 bytes long (was %i)", header_size);
|
||||
PyErr_SetString(PyExc_ValueError, error_message);
|
||||
return 0;
|
||||
}
|
||||
struct ethash_light *s;
|
||||
s = calloc(sizeof(*s), 1);
|
||||
s->cache = cache_bytes;
|
||||
s->cache_size = cache_size;
|
||||
s->block_number = block_number;
|
||||
struct ethash_h256 *h;
|
||||
h = calloc(sizeof(*h), 1);
|
||||
for (int i = 0; i < 32; i++) h->b[i] = header[i];
|
||||
struct ethash_return_value out = ethash_light_compute(s, *h, nonce);
|
||||
return Py_BuildValue("{" PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT "," PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT "}",
|
||||
"mix digest", &out.mix_hash, 32,
|
||||
"result", &out.result, 32);
|
||||
}
|
||||
/*
|
||||
// hashimoto_full(dataset, header, nonce)
|
||||
static PyObject *
|
||||
hashimoto_full(PyObject *self, PyObject *args) {
|
||||
char *full_bytes;
|
||||
char *header;
|
||||
unsigned long long nonce;
|
||||
int full_size, header_size;
|
||||
|
||||
if (!PyArg_ParseTuple(args, PY_STRING_FORMAT PY_STRING_FORMAT "K", &full_bytes, &full_size, &header, &header_size, &nonce))
|
||||
return 0;
|
||||
|
||||
if (full_size % MIX_WORDS != 0) {
|
||||
char error_message[1024];
|
||||
sprintf(error_message, "The size of data set must be a multiple of %i bytes (was %i)", MIX_WORDS, full_size);
|
||||
PyErr_SetString(PyExc_ValueError, error_message);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (header_size != 32) {
|
||||
char error_message[1024];
|
||||
sprintf(error_message, "Header must be 32 bytes long (was %i)", header_size);
|
||||
PyErr_SetString(PyExc_ValueError, error_message);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
ethash_return_value out;
|
||||
ethash_params params;
|
||||
params.full_size = (size_t) full_size;
|
||||
ethash_full(&out, (void *) full_bytes, ¶ms, (ethash_h256_t *) header, nonce);
|
||||
return Py_BuildValue("{" PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT ", " PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT "}",
|
||||
"mix digest", &out.mix_hash, 32,
|
||||
"result", &out.result, 32);
|
||||
}
|
||||
|
||||
// mine(dataset_bytes, header, difficulty_bytes)
|
||||
static PyObject *
|
||||
mine(PyObject *self, PyObject *args) {
|
||||
char *full_bytes;
|
||||
char *header;
|
||||
char *difficulty;
|
||||
srand(time(0));
|
||||
uint64_t nonce = ((uint64_t) rand()) << 32 | rand();
|
||||
int full_size, header_size, difficulty_size;
|
||||
|
||||
if (!PyArg_ParseTuple(args, PY_STRING_FORMAT PY_STRING_FORMAT PY_STRING_FORMAT, &full_bytes, &full_size, &header, &header_size, &difficulty, &difficulty_size))
|
||||
return 0;
|
||||
|
||||
if (full_size % MIX_WORDS != 0) {
|
||||
char error_message[1024];
|
||||
sprintf(error_message, "The size of data set must be a multiple of %i bytes (was %i)", MIX_WORDS, full_size);
|
||||
PyErr_SetString(PyExc_ValueError, error_message);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (header_size != 32) {
|
||||
char error_message[1024];
|
||||
sprintf(error_message, "Header must be 32 bytes long (was %i)", header_size);
|
||||
PyErr_SetString(PyExc_ValueError, error_message);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (difficulty_size != 32) {
|
||||
char error_message[1024];
|
||||
sprintf(error_message, "Difficulty must be an array of 32 bytes (only had %i)", difficulty_size);
|
||||
PyErr_SetString(PyExc_ValueError, error_message);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ethash_return_value out;
|
||||
ethash_params params;
|
||||
params.full_size = (size_t) full_size;
|
||||
|
||||
// TODO: Multi threading?
|
||||
do {
|
||||
ethash_full(&out, (void *) full_bytes, ¶ms, (const ethash_h256_t *) header, nonce++);
|
||||
// TODO: disagrees with the spec https://github.com/ethereum/wiki/wiki/Ethash#mining
|
||||
} while (!ethash_check_difficulty(&out.result, (const ethash_h256_t *) difficulty));
|
||||
|
||||
return Py_BuildValue("{" PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT ", " PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT ", " PY_CONST_STRING_FORMAT ":K}",
|
||||
"mix digest", &out.mix_hash, 32,
|
||||
"result", &out.result, 32,
|
||||
"nonce", nonce);
|
||||
}
|
||||
*/
|
||||
|
||||
//get_seedhash(block_number)
|
||||
static PyObject *
|
||||
get_seedhash(PyObject *self, PyObject *args) {
|
||||
unsigned long block_number;
|
||||
if (!PyArg_ParseTuple(args, "k", &block_number))
|
||||
return 0;
|
||||
if (block_number >= ETHASH_EPOCH_LENGTH * 2048) {
|
||||
char error_message[1024];
|
||||
sprintf(error_message, "Block number must be less than %i (was %lu)", ETHASH_EPOCH_LENGTH * 2048, block_number);
|
||||
|
||||
PyErr_SetString(PyExc_ValueError, error_message);
|
||||
return 0;
|
||||
}
|
||||
ethash_h256_t seedhash = ethash_get_seedhash(block_number);
|
||||
return Py_BuildValue(PY_STRING_FORMAT, (char *) &seedhash, 32);
|
||||
}
|
||||
|
||||
static PyMethodDef PyethashMethods[] =
|
||||
{
|
||||
{"get_seedhash", get_seedhash, METH_VARARGS,
|
||||
"get_seedhash(block_number)\n\n"
|
||||
"Gets the seedhash for a block."},
|
||||
{"mkcache_bytes", mkcache_bytes, METH_VARARGS,
|
||||
"mkcache_bytes(block_number)\n\n"
|
||||
"Makes a byte array for the cache for given block number\n"},
|
||||
/*{"calc_dataset_bytes", calc_dataset_bytes, METH_VARARGS,
|
||||
"calc_dataset_bytes(full_size, cache_bytes)\n\n"
|
||||
"Makes a byte array for the dataset for a given size given cache bytes"},*/
|
||||
{"hashimoto_light", hashimoto_light, METH_VARARGS,
|
||||
"hashimoto_light(block_number, cache_bytes, header, nonce)\n\n"
|
||||
"Runs the hashimoto hashing function just using cache bytes. Takes an int (full_size), byte array (cache_bytes), another byte array (header), and an int (nonce). Returns an object containing the mix digest, and hash result."},
|
||||
/*{"hashimoto_full", hashimoto_full, METH_VARARGS,
|
||||
"hashimoto_full(dataset_bytes, header, nonce)\n\n"
|
||||
"Runs the hashimoto hashing function using the dataset bytes. Useful for testing. Returns an object containing the mix digest (byte array), and hash result (another byte array)."},
|
||||
{"mine", mine, METH_VARARGS,
|
||||
"mine(dataset_bytes, header, difficulty_bytes)\n\n"
|
||||
"Mine for an adequate header. Returns an object containing the mix digest (byte array), hash result (another byte array) and nonce (an int)."},*/
|
||||
{NULL, NULL, 0, NULL}
|
||||
};
|
||||
|
||||
#if PY_MAJOR_VERSION >= 3
|
||||
static struct PyModuleDef PyethashModule = {
|
||||
PyModuleDef_HEAD_INIT,
|
||||
"pyethash",
|
||||
"...",
|
||||
-1,
|
||||
PyethashMethods
|
||||
};
|
||||
|
||||
PyMODINIT_FUNC PyInit_pyethash(void) {
|
||||
PyObject *module = PyModule_Create(&PyethashModule);
|
||||
// Following Spec: https://github.com/ethereum/wiki/wiki/Ethash#definitions
|
||||
PyModule_AddIntConstant(module, "REVISION", (long) ETHASH_REVISION);
|
||||
PyModule_AddIntConstant(module, "DATASET_BYTES_INIT", (long) ETHASH_DATASET_BYTES_INIT);
|
||||
PyModule_AddIntConstant(module, "DATASET_BYTES_GROWTH", (long) ETHASH_DATASET_BYTES_GROWTH);
|
||||
PyModule_AddIntConstant(module, "CACHE_BYTES_INIT", (long) ETHASH_CACHE_BYTES_INIT);
|
||||
PyModule_AddIntConstant(module, "CACHE_BYTES_GROWTH", (long) ETHASH_CACHE_BYTES_GROWTH);
|
||||
PyModule_AddIntConstant(module, "EPOCH_LENGTH", (long) ETHASH_EPOCH_LENGTH);
|
||||
PyModule_AddIntConstant(module, "MIX_BYTES", (long) ETHASH_MIX_BYTES);
|
||||
PyModule_AddIntConstant(module, "HASH_BYTES", (long) ETHASH_HASH_BYTES);
|
||||
PyModule_AddIntConstant(module, "DATASET_PARENTS", (long) ETHASH_DATASET_PARENTS);
|
||||
PyModule_AddIntConstant(module, "CACHE_ROUNDS", (long) ETHASH_CACHE_ROUNDS);
|
||||
PyModule_AddIntConstant(module, "ACCESSES", (long) ETHASH_ACCESSES);
|
||||
return module;
|
||||
}
|
||||
#else
|
||||
PyMODINIT_FUNC
|
||||
initpyethash(void) {
|
||||
PyObject *module = Py_InitModule("pyethash", PyethashMethods);
|
||||
// Following Spec: https://github.com/ethereum/wiki/wiki/Ethash#definitions
|
||||
PyModule_AddIntConstant(module, "REVISION", (long) ETHASH_REVISION);
|
||||
PyModule_AddIntConstant(module, "DATASET_BYTES_INIT", (long) ETHASH_DATASET_BYTES_INIT);
|
||||
PyModule_AddIntConstant(module, "DATASET_BYTES_GROWTH", (long) ETHASH_DATASET_BYTES_GROWTH);
|
||||
PyModule_AddIntConstant(module, "CACHE_BYTES_INIT", (long) ETHASH_CACHE_BYTES_INIT);
|
||||
PyModule_AddIntConstant(module, "CACHE_BYTES_GROWTH", (long) ETHASH_CACHE_BYTES_GROWTH);
|
||||
PyModule_AddIntConstant(module, "EPOCH_LENGTH", (long) ETHASH_EPOCH_LENGTH);
|
||||
PyModule_AddIntConstant(module, "MIX_BYTES", (long) ETHASH_MIX_BYTES);
|
||||
PyModule_AddIntConstant(module, "HASH_BYTES", (long) ETHASH_HASH_BYTES);
|
||||
PyModule_AddIntConstant(module, "DATASET_PARENTS", (long) ETHASH_DATASET_PARENTS);
|
||||
PyModule_AddIntConstant(module, "CACHE_ROUNDS", (long) ETHASH_CACHE_ROUNDS);
|
||||
PyModule_AddIntConstant(module, "ACCESSES", (long) ETHASH_ACCESSES);
|
||||
}
|
||||
#endif
|
|
@ -17,6 +17,7 @@
|
|||
package abi
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -129,16 +130,15 @@ func toGoSlice(i int, t Argument, output []byte) (interface{}, error) {
|
|||
var size int
|
||||
var offset int
|
||||
if t.Type.IsSlice {
|
||||
|
||||
// get the offset which determines the start of this array ...
|
||||
offset = int(common.BytesToBig(output[index : index+32]).Uint64())
|
||||
offset = int(binary.BigEndian.Uint64(output[index+24 : index+32]))
|
||||
if offset+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go slice: offset %d would go over slice boundary (len=%d)", len(output), offset+32)
|
||||
}
|
||||
|
||||
slice = output[offset:]
|
||||
// ... starting with the size of the array in elements ...
|
||||
size = int(common.BytesToBig(slice[:32]).Uint64())
|
||||
size = int(binary.BigEndian.Uint64(slice[24:32]))
|
||||
slice = slice[32:]
|
||||
// ... and make sure that we've at the very least the amount of bytes
|
||||
// available in the buffer.
|
||||
|
@ -147,7 +147,7 @@ func toGoSlice(i int, t Argument, output []byte) (interface{}, error) {
|
|||
}
|
||||
|
||||
// reslice to match the required size
|
||||
slice = slice[:(size * 32)]
|
||||
slice = slice[:size*32]
|
||||
} else if t.Type.IsArray {
|
||||
//get the number of elements in the array
|
||||
size = t.Type.SliceSize
|
||||
|
@ -165,33 +165,12 @@ func toGoSlice(i int, t Argument, output []byte) (interface{}, error) {
|
|||
inter interface{} // interface type
|
||||
returnOutput = slice[i*32 : i*32+32] // the return output
|
||||
)
|
||||
|
||||
// set inter to the correct type (cast)
|
||||
switch elem.T {
|
||||
case IntTy, UintTy:
|
||||
bigNum := common.BytesToBig(returnOutput)
|
||||
switch t.Type.Kind {
|
||||
case reflect.Uint8:
|
||||
inter = uint8(bigNum.Uint64())
|
||||
case reflect.Uint16:
|
||||
inter = uint16(bigNum.Uint64())
|
||||
case reflect.Uint32:
|
||||
inter = uint32(bigNum.Uint64())
|
||||
case reflect.Uint64:
|
||||
inter = bigNum.Uint64()
|
||||
case reflect.Int8:
|
||||
inter = int8(bigNum.Int64())
|
||||
case reflect.Int16:
|
||||
inter = int16(bigNum.Int64())
|
||||
case reflect.Int32:
|
||||
inter = int32(bigNum.Int64())
|
||||
case reflect.Int64:
|
||||
inter = bigNum.Int64()
|
||||
default:
|
||||
inter = common.BytesToBig(returnOutput)
|
||||
}
|
||||
inter = readInteger(t.Type.Kind, returnOutput)
|
||||
case BoolTy:
|
||||
inter = common.BytesToBig(returnOutput).Uint64() > 0
|
||||
inter = !allZero(returnOutput)
|
||||
case AddressTy:
|
||||
inter = common.BytesToAddress(returnOutput)
|
||||
case HashTy:
|
||||
|
@ -207,6 +186,38 @@ func toGoSlice(i int, t Argument, output []byte) (interface{}, error) {
|
|||
return refSlice.Interface(), nil
|
||||
}
|
||||
|
||||
func readInteger(kind reflect.Kind, b []byte) interface{} {
|
||||
switch kind {
|
||||
case reflect.Uint8:
|
||||
return uint8(b[len(b)-1])
|
||||
case reflect.Uint16:
|
||||
return binary.BigEndian.Uint16(b[len(b)-2:])
|
||||
case reflect.Uint32:
|
||||
return binary.BigEndian.Uint32(b[len(b)-4:])
|
||||
case reflect.Uint64:
|
||||
return binary.BigEndian.Uint64(b[len(b)-8:])
|
||||
case reflect.Int8:
|
||||
return int8(b[len(b)-1])
|
||||
case reflect.Int16:
|
||||
return int16(binary.BigEndian.Uint16(b[len(b)-2:]))
|
||||
case reflect.Int32:
|
||||
return int32(binary.BigEndian.Uint32(b[len(b)-4:]))
|
||||
case reflect.Int64:
|
||||
return int64(binary.BigEndian.Uint64(b[len(b)-8:]))
|
||||
default:
|
||||
return new(big.Int).SetBytes(b)
|
||||
}
|
||||
}
|
||||
|
||||
func allZero(b []byte) bool {
|
||||
for _, byte := range b {
|
||||
if byte != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// toGoType parses the input and casts it to the proper type defined by the ABI
|
||||
// argument in T.
|
||||
func toGoType(i int, t Argument, output []byte) (interface{}, error) {
|
||||
|
@ -226,12 +237,12 @@ func toGoType(i int, t Argument, output []byte) (interface{}, error) {
|
|||
switch t.Type.T {
|
||||
case StringTy, BytesTy: // variable arrays are written at the end of the return bytes
|
||||
// parse offset from which we should start reading
|
||||
offset := int(common.BytesToBig(output[index : index+32]).Uint64())
|
||||
offset := int(binary.BigEndian.Uint64(output[index+24 : index+32]))
|
||||
if offset+32 > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), offset+32)
|
||||
}
|
||||
// parse the size up until we should be reading
|
||||
size := int(common.BytesToBig(output[offset : offset+32]).Uint64())
|
||||
size := int(binary.BigEndian.Uint64(output[offset+24 : offset+32]))
|
||||
if offset+32+size > len(output) {
|
||||
return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), offset+32+size)
|
||||
}
|
||||
|
@ -245,32 +256,9 @@ func toGoType(i int, t Argument, output []byte) (interface{}, error) {
|
|||
// convert the bytes to whatever is specified by the ABI.
|
||||
switch t.Type.T {
|
||||
case IntTy, UintTy:
|
||||
bigNum := common.BytesToBig(returnOutput)
|
||||
|
||||
// If the type is a integer convert to the integer type
|
||||
// specified by the ABI.
|
||||
switch t.Type.Kind {
|
||||
case reflect.Uint8:
|
||||
return uint8(bigNum.Uint64()), nil
|
||||
case reflect.Uint16:
|
||||
return uint16(bigNum.Uint64()), nil
|
||||
case reflect.Uint32:
|
||||
return uint32(bigNum.Uint64()), nil
|
||||
case reflect.Uint64:
|
||||
return uint64(bigNum.Uint64()), nil
|
||||
case reflect.Int8:
|
||||
return int8(bigNum.Int64()), nil
|
||||
case reflect.Int16:
|
||||
return int16(bigNum.Int64()), nil
|
||||
case reflect.Int32:
|
||||
return int32(bigNum.Int64()), nil
|
||||
case reflect.Int64:
|
||||
return int64(bigNum.Int64()), nil
|
||||
case reflect.Ptr:
|
||||
return bigNum, nil
|
||||
}
|
||||
return readInteger(t.Type.Kind, returnOutput), nil
|
||||
case BoolTy:
|
||||
return common.BytesToBig(returnOutput).Uint64() > 0, nil
|
||||
return !allZero(returnOutput), nil
|
||||
case AddressTy:
|
||||
return common.BytesToAddress(returnOutput), nil
|
||||
case HashTy:
|
||||
|
|
|
@ -17,13 +17,13 @@
|
|||
package bind
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
30
vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/backends/simulated.go
generated
vendored
30
vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/backends/simulated.go
generated
vendored
|
@ -17,6 +17,7 @@
|
|||
package backends
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
@ -25,6 +26,8 @@ import (
|
|||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
|
@ -32,12 +35,8 @@ import (
|
|||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Default chain configuration which sets homestead phase at block 0 (i.e. no frontier)
|
||||
var chainConfig = ¶ms.ChainConfig{HomesteadBlock: big.NewInt(0), EIP150Block: new(big.Int), EIP158Block: new(big.Int)}
|
||||
|
||||
// This nil assignment ensures compile time that SimulatedBackend implements bind.ContractBackend.
|
||||
var _ bind.ContractBackend = (*SimulatedBackend)(nil)
|
||||
|
||||
|
@ -58,11 +57,12 @@ type SimulatedBackend struct {
|
|||
|
||||
// NewSimulatedBackend creates a new binding backend using a simulated blockchain
|
||||
// for testing purposes.
|
||||
func NewSimulatedBackend(accounts ...core.GenesisAccount) *SimulatedBackend {
|
||||
func NewSimulatedBackend(alloc core.GenesisAlloc) *SimulatedBackend {
|
||||
database, _ := ethdb.NewMemDatabase()
|
||||
core.WriteGenesisBlockForTesting(database, accounts...)
|
||||
blockchain, _ := core.NewBlockChain(database, chainConfig, new(core.FakePow), new(event.TypeMux), vm.Config{})
|
||||
backend := &SimulatedBackend{database: database, blockchain: blockchain}
|
||||
genesis := core.Genesis{Config: params.AllProtocolChanges, Alloc: alloc}
|
||||
genesis.MustCommit(database)
|
||||
blockchain, _ := core.NewBlockChain(database, genesis.Config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
|
||||
backend := &SimulatedBackend{database: database, blockchain: blockchain, config: genesis.Config}
|
||||
backend.rollback()
|
||||
return backend
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ func (b *SimulatedBackend) Rollback() {
|
|||
}
|
||||
|
||||
func (b *SimulatedBackend) rollback() {
|
||||
blocks, _ := core.GenerateChain(chainConfig, b.blockchain.CurrentBlock(), b.database, 1, func(int, *core.BlockGen) {})
|
||||
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), b.database, 1, func(int, *core.BlockGen) {})
|
||||
b.pendingBlock = blocks[0]
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), b.database)
|
||||
}
|
||||
|
@ -236,7 +236,7 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
|
|||
if call.GasPrice == nil {
|
||||
call.GasPrice = big.NewInt(1)
|
||||
}
|
||||
if call.Gas == nil || call.Gas.BitLen() == 0 {
|
||||
if call.Gas == nil || call.Gas.Sign() == 0 {
|
||||
call.Gas = big.NewInt(50000000)
|
||||
}
|
||||
if call.Value == nil {
|
||||
|
@ -244,15 +244,15 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
|
|||
}
|
||||
// Set infinite balance to the fake caller account.
|
||||
from := statedb.GetOrNewStateObject(call.From)
|
||||
from.SetBalance(common.MaxBig)
|
||||
from.SetBalance(math.MaxBig256)
|
||||
// Execute the call.
|
||||
msg := callmsg{call}
|
||||
|
||||
evmContext := core.NewEVMContext(msg, block.Header(), b.blockchain)
|
||||
evmContext := core.NewEVMContext(msg, block.Header(), b.blockchain, nil)
|
||||
// Create a new environment which holds all relevant information
|
||||
// about the transaction and calling mechanisms.
|
||||
vmenv := vm.NewEVM(evmContext, statedb, chainConfig, vm.Config{})
|
||||
gaspool := new(core.GasPool).AddGas(common.MaxBig)
|
||||
vmenv := vm.NewEVM(evmContext, statedb, b.config, vm.Config{})
|
||||
gaspool := new(core.GasPool).AddGas(math.MaxBig256)
|
||||
ret, gasUsed, _, err := core.NewStateTransition(vmenv, msg, gaspool).TransitionDb()
|
||||
return ret, gasUsed, err
|
||||
}
|
||||
|
@ -272,7 +272,7 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
|
|||
panic(fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce))
|
||||
}
|
||||
|
||||
blocks, _ := core.GenerateChain(chainConfig, b.blockchain.CurrentBlock(), b.database, 1, func(number int, block *core.BlockGen) {
|
||||
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), b.database, 1, func(number int, block *core.BlockGen) {
|
||||
for _, tx := range b.pendingBlock.Transactions() {
|
||||
block.AddTx(tx)
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
package bind
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
@ -26,7 +27,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// SignerFn is a signer function callback when a contract requires a method to
|
||||
|
@ -35,7 +35,8 @@ type SignerFn func(types.Signer, common.Address, *types.Transaction) (*types.Tra
|
|||
|
||||
// CallOpts is the collection of options to fine tune a contract call request.
|
||||
type CallOpts struct {
|
||||
Pending bool // Whether to operate on the pending state or the last known one
|
||||
Pending bool // Whether to operate on the pending state or the last known one
|
||||
From common.Address // Optional the sender address, otherwise the first account is used
|
||||
|
||||
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
|
||||
}
|
||||
|
@ -108,7 +109,7 @@ func (c *BoundContract) Call(opts *CallOpts, result interface{}, method string,
|
|||
return err
|
||||
}
|
||||
var (
|
||||
msg = ethereum.CallMsg{To: &c.address, Data: input}
|
||||
msg = ethereum.CallMsg{From: opts.From, To: &c.address, Data: input}
|
||||
ctx = ensureContext(opts.Context)
|
||||
code []byte
|
||||
output []byte
|
||||
|
|
|
@ -17,31 +17,31 @@
|
|||
package bind
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"golang.org/x/net/context"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
// WaitMined waits for tx to be mined on the blockchain.
|
||||
// It stops waiting when the context is canceled.
|
||||
func WaitMined(ctx context.Context, b DeployBackend, tx *types.Transaction) (*types.Receipt, error) {
|
||||
queryTicker := time.NewTicker(1 * time.Second)
|
||||
queryTicker := time.NewTicker(time.Second)
|
||||
defer queryTicker.Stop()
|
||||
loghash := tx.Hash().Hex()[:8]
|
||||
|
||||
logger := log.New("hash", tx.Hash())
|
||||
for {
|
||||
receipt, err := b.TransactionReceipt(ctx, tx.Hash())
|
||||
if receipt != nil {
|
||||
return receipt, nil
|
||||
}
|
||||
if err != nil {
|
||||
glog.V(logger.Detail).Infof("tx %x error: %v", loghash, err)
|
||||
logger.Trace("Receipt retrieval failed", "err", err)
|
||||
} else {
|
||||
glog.V(logger.Detail).Infof("tx %x not yet mined...", loghash)
|
||||
logger.Trace("Transaction not yet mined")
|
||||
}
|
||||
// Wait for the next round.
|
||||
select {
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"reflect"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -58,7 +59,7 @@ var (
|
|||
|
||||
// U256 converts a big Int into a 256bit EVM number.
|
||||
func U256(n *big.Int) []byte {
|
||||
return common.LeftPadBytes(common.U256(n).Bytes(), 32)
|
||||
return math.PaddedBigBytes(math.U256(n), 32)
|
||||
}
|
||||
|
||||
// packNum packs the given number (using the reflect value) and will cast it to appropriate number representation
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"reflect"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
)
|
||||
|
||||
// packBytesSlice packs the given bytes as [L, V] as the canonical representation
|
||||
|
@ -45,9 +46,9 @@ func packElement(t Type, reflectValue reflect.Value) []byte {
|
|||
return common.LeftPadBytes(reflectValue.Bytes(), 32)
|
||||
case BoolTy:
|
||||
if reflectValue.Bool() {
|
||||
return common.LeftPadBytes(common.Big1.Bytes(), 32)
|
||||
return math.PaddedBigBytes(common.Big1, 32)
|
||||
} else {
|
||||
return common.LeftPadBytes(common.Big0.Bytes(), 32)
|
||||
return math.PaddedBigBytes(common.Big0, 32)
|
||||
}
|
||||
case BytesTy:
|
||||
if reflectValue.Kind() == reflect.Array {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2016 The go-ethereum Authors
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
|
@ -30,8 +30,7 @@ import (
|
|||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
// Minimum amount of time between cache reloads. This limit applies if the platform does
|
||||
|
@ -210,8 +209,8 @@ func (ac *accountCache) close() {
|
|||
// Callers must hold ac.mu.
|
||||
func (ac *accountCache) reload() {
|
||||
accounts, err := ac.scan()
|
||||
if err != nil && glog.V(logger.Debug) {
|
||||
glog.Errorf("can't load keys: %v", err)
|
||||
if err != nil {
|
||||
log.Debug("Failed to reload keystore contents", "err", err)
|
||||
}
|
||||
ac.all = accounts
|
||||
sort.Sort(ac.all)
|
||||
|
@ -225,7 +224,7 @@ func (ac *accountCache) reload() {
|
|||
case ac.notify <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
glog.V(logger.Debug).Infof("reloaded keys, cache has %d accounts", len(ac.all))
|
||||
log.Debug("Reloaded keystore contents", "accounts", len(ac.all))
|
||||
}
|
||||
|
||||
func (ac *accountCache) scan() ([]accounts.Account, error) {
|
||||
|
@ -244,12 +243,14 @@ func (ac *accountCache) scan() ([]accounts.Account, error) {
|
|||
for _, fi := range files {
|
||||
path := filepath.Join(ac.keydir, fi.Name())
|
||||
if skipKeyFile(fi) {
|
||||
glog.V(logger.Detail).Infof("ignoring file %s", path)
|
||||
log.Trace("Ignoring file on account scan", "path", path)
|
||||
continue
|
||||
}
|
||||
logger := log.New("path", path)
|
||||
|
||||
fd, err := os.Open(path)
|
||||
if err != nil {
|
||||
glog.V(logger.Detail).Infoln(err)
|
||||
logger.Trace("Failed to open keystore file", "err", err)
|
||||
continue
|
||||
}
|
||||
buf.Reset(fd)
|
||||
|
@ -259,9 +260,9 @@ func (ac *accountCache) scan() ([]accounts.Account, error) {
|
|||
addr := common.HexToAddress(keyJSON.Address)
|
||||
switch {
|
||||
case err != nil:
|
||||
glog.V(logger.Debug).Infof("can't decode key %s: %v", path, err)
|
||||
logger.Debug("Failed to decode keystore key", "err", err)
|
||||
case (addr == common.Address{}):
|
||||
glog.V(logger.Debug).Infof("can't decode key %s: missing or zero address", path)
|
||||
logger.Debug("Failed to decode keystore key", "err", "missing or zero address")
|
||||
default:
|
||||
addrs = append(addrs, accounts.Account{Address: addr, URL: accounts.URL{Scheme: KeyStoreScheme, Path: path}})
|
||||
}
|
||||
|
|
|
@ -32,7 +32,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/crypto/secp256k1"
|
||||
"github.com/pborman/uuid"
|
||||
"github.com/status-im/status-go/extkeys"
|
||||
)
|
||||
|
@ -202,7 +201,7 @@ func NewKeyForDirectICAP(rand io.Reader) *Key {
|
|||
panic("key generation: could not read from random source: " + err.Error())
|
||||
}
|
||||
reader := bytes.NewReader(randBytes)
|
||||
privateKeyECDSA, err := ecdsa.GenerateKey(secp256k1.S256(), reader)
|
||||
privateKeyECDSA, err := ecdsa.GenerateKey(crypto.S256(), reader)
|
||||
if err != nil {
|
||||
panic("key generation: ecdsa.GenerateKey failed: " + err.Error())
|
||||
}
|
||||
|
@ -214,7 +213,7 @@ func NewKeyForDirectICAP(rand io.Reader) *Key {
|
|||
}
|
||||
|
||||
func newKey(rand io.Reader) (*Key, error) {
|
||||
privateKeyECDSA, err := ecdsa.GenerateKey(secp256k1.S256(), rand)
|
||||
privateKeyECDSA, err := ecdsa.GenerateKey(crypto.S256(), rand)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The go-ethereum Authors
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
|
|
4
vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_passphrase.go
generated
vendored
4
vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_passphrase.go
generated
vendored
|
@ -36,6 +36,7 @@ import (
|
|||
"path/filepath"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/crypto/randentropy"
|
||||
"github.com/pborman/uuid"
|
||||
|
@ -116,8 +117,7 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
|
|||
return nil, err
|
||||
}
|
||||
encryptKey := derivedKey[:16]
|
||||
keyBytes0 := crypto.FromECDSA(key.PrivateKey)
|
||||
keyBytes := common.LeftPadBytes(keyBytes0, 32)
|
||||
keyBytes := math.PaddedBigBytes(key.PrivateKey.D, 32)
|
||||
|
||||
iv := randentropy.GetEntropyCSPRNG(aes.BlockSize) // 16
|
||||
cipherText, err := aesCTRXOR(encryptKey, keyBytes, iv)
|
||||
|
|
|
@ -21,8 +21,7 @@ package keystore
|
|||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/rjeczalik/notify"
|
||||
)
|
||||
|
||||
|
@ -64,15 +63,16 @@ func (w *watcher) loop() {
|
|||
w.starting = false
|
||||
w.ac.mu.Unlock()
|
||||
}()
|
||||
logger := log.New("path", w.ac.keydir)
|
||||
|
||||
err := notify.Watch(w.ac.keydir, w.ev, notify.All)
|
||||
if err != nil {
|
||||
glog.V(logger.Detail).Infof("can't watch %s: %v", w.ac.keydir, err)
|
||||
if err := notify.Watch(w.ac.keydir, w.ev, notify.All); err != nil {
|
||||
logger.Trace("Failed to watch keystore folder", "err", err)
|
||||
return
|
||||
}
|
||||
defer notify.Stop(w.ev)
|
||||
glog.V(logger.Detail).Infof("now watching %s", w.ac.keydir)
|
||||
defer glog.V(logger.Detail).Infof("no longer watching %s", w.ac.keydir)
|
||||
|
||||
logger.Trace("Started watching keystore folder")
|
||||
defer logger.Trace("Stopped watching keystore folder")
|
||||
|
||||
w.ac.mu.Lock()
|
||||
w.running = true
|
||||
|
|
|
@ -60,6 +60,15 @@ func (u URL) String() string {
|
|||
return u.Path
|
||||
}
|
||||
|
||||
// TerminalString implements the log.TerminalStringer interface.
|
||||
func (u URL) TerminalString() string {
|
||||
url := u.String()
|
||||
if len(url) > 32 {
|
||||
return url[:31] + "…"
|
||||
}
|
||||
return url
|
||||
}
|
||||
|
||||
// MarshalJSON implements the json.Marshaller interface.
|
||||
func (u URL) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(u.String())
|
||||
|
|
|
@ -18,18 +18,18 @@
|
|||
// wallets. The wire protocol spec can be found in the Ledger Blue GitHub repo:
|
||||
// https://raw.githubusercontent.com/LedgerHQ/blue-app-eth/master/doc/ethapp.asc
|
||||
|
||||
// +build !ios
|
||||
|
||||
package usbwallet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"errors"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/karalabe/gousb/usb"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/karalabe/hid"
|
||||
)
|
||||
|
||||
// LedgerScheme is the protocol scheme prefixing account and wallet URLs.
|
||||
|
@ -49,8 +49,6 @@ const ledgerRefreshThrottling = 500 * time.Millisecond
|
|||
|
||||
// LedgerHub is a accounts.Backend that can find and handle Ledger hardware wallets.
|
||||
type LedgerHub struct {
|
||||
ctx *usb.Context // Context interfacing with a libusb instance
|
||||
|
||||
refreshed time.Time // Time instance when the list of wallets was last refreshed
|
||||
wallets []accounts.Wallet // List of Ledger devices currently tracking
|
||||
updateFeed event.Feed // Event feed to notify wallet additions/removals
|
||||
|
@ -58,23 +56,23 @@ type LedgerHub struct {
|
|||
updating bool // Whether the event notification loop is running
|
||||
|
||||
quit chan chan error
|
||||
lock sync.RWMutex
|
||||
|
||||
stateLock sync.RWMutex // Protects the internals of the hub from racey access
|
||||
|
||||
// TODO(karalabe): remove if hotplug lands on Windows
|
||||
commsPend int // Number of operations blocking enumeration
|
||||
commsLock sync.Mutex // Lock protecting the pending counter and enumeration
|
||||
}
|
||||
|
||||
// NewLedgerHub creates a new hardware wallet manager for Ledger devices.
|
||||
func NewLedgerHub() (*LedgerHub, error) {
|
||||
// Initialize the USB library to access Ledgers through
|
||||
ctx, err := usb.NewContext()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if !hid.Supported() {
|
||||
return nil, errors.New("unsupported platform")
|
||||
}
|
||||
// Create the USB hub, start and return it
|
||||
hub := &LedgerHub{
|
||||
ctx: ctx,
|
||||
quit: make(chan chan error),
|
||||
}
|
||||
hub.refreshWallets()
|
||||
|
||||
return hub, nil
|
||||
}
|
||||
|
||||
|
@ -84,8 +82,8 @@ func (hub *LedgerHub) Wallets() []accounts.Wallet {
|
|||
// Make sure the list of wallets is up to date
|
||||
hub.refreshWallets()
|
||||
|
||||
hub.lock.RLock()
|
||||
defer hub.lock.RUnlock()
|
||||
hub.stateLock.RLock()
|
||||
defer hub.stateLock.RUnlock()
|
||||
|
||||
cpy := make([]accounts.Wallet, len(hub.wallets))
|
||||
copy(cpy, hub.wallets)
|
||||
|
@ -96,39 +94,49 @@ func (hub *LedgerHub) Wallets() []accounts.Wallet {
|
|||
// list of wallets based on the found devices.
|
||||
func (hub *LedgerHub) refreshWallets() {
|
||||
// Don't scan the USB like crazy it the user fetches wallets in a loop
|
||||
hub.lock.RLock()
|
||||
hub.stateLock.RLock()
|
||||
elapsed := time.Since(hub.refreshed)
|
||||
hub.lock.RUnlock()
|
||||
hub.stateLock.RUnlock()
|
||||
|
||||
if elapsed < ledgerRefreshThrottling {
|
||||
return
|
||||
}
|
||||
// Retrieve the current list of Ledger devices
|
||||
var devIDs []deviceID
|
||||
var busIDs []uint16
|
||||
var ledgers []hid.DeviceInfo
|
||||
|
||||
hub.ctx.ListDevices(func(desc *usb.Descriptor) bool {
|
||||
// Gather Ledger devices, don't connect any just yet
|
||||
if runtime.GOOS == "linux" {
|
||||
// hidapi on Linux opens the device during enumeration to retrieve some infos,
|
||||
// breaking the Ledger protocol if that is waiting for user confirmation. This
|
||||
// is a bug acknowledged at Ledger, but it won't be fixed on old devices so we
|
||||
// need to prevent concurrent comms ourselves. The more elegant solution would
|
||||
// be to ditch enumeration in favor of hutplug events, but that don't work yet
|
||||
// on Windows so if we need to hack it anyway, this is more elegant for now.
|
||||
hub.commsLock.Lock()
|
||||
if hub.commsPend > 0 { // A confirmation is pending, don't refresh
|
||||
hub.commsLock.Unlock()
|
||||
return
|
||||
}
|
||||
}
|
||||
for _, info := range hid.Enumerate(0, 0) { // Can't enumerate directly, one valid ID is the 0 wildcard
|
||||
for _, id := range ledgerDeviceIDs {
|
||||
if desc.Vendor == id.Vendor && desc.Product == id.Product {
|
||||
devIDs = append(devIDs, deviceID{Vendor: desc.Vendor, Product: desc.Product})
|
||||
busIDs = append(busIDs, uint16(desc.Bus)<<8+uint16(desc.Address))
|
||||
return false
|
||||
if info.VendorID == id.Vendor && info.ProductID == id.Product {
|
||||
ledgers = append(ledgers, info)
|
||||
break
|
||||
}
|
||||
}
|
||||
// Not ledger, ignore and don't connect either
|
||||
return false
|
||||
})
|
||||
}
|
||||
if runtime.GOOS == "linux" {
|
||||
// See rationale before the enumeration why this is needed and only on Linux.
|
||||
hub.commsLock.Unlock()
|
||||
}
|
||||
// Transform the current list of wallets into the new one
|
||||
hub.lock.Lock()
|
||||
hub.stateLock.Lock()
|
||||
|
||||
wallets := make([]accounts.Wallet, 0, len(devIDs))
|
||||
wallets := make([]accounts.Wallet, 0, len(ledgers))
|
||||
events := []accounts.WalletEvent{}
|
||||
|
||||
for i := 0; i < len(devIDs); i++ {
|
||||
devID, busID := devIDs[i], busIDs[i]
|
||||
|
||||
url := accounts.URL{Scheme: LedgerScheme, Path: fmt.Sprintf("%03d:%03d", busID>>8, busID&0xff)}
|
||||
for _, ledger := range ledgers {
|
||||
url := accounts.URL{Scheme: LedgerScheme, Path: ledger.Path}
|
||||
|
||||
// Drop wallets in front of the next device or those that failed for some reason
|
||||
for len(hub.wallets) > 0 && (hub.wallets[0].URL().Cmp(url) < 0 || hub.wallets[0].(*ledgerWallet).failed()) {
|
||||
|
@ -137,7 +145,7 @@ func (hub *LedgerHub) refreshWallets() {
|
|||
}
|
||||
// If there are no more wallets or the device is before the next, wrap new wallet
|
||||
if len(hub.wallets) == 0 || hub.wallets[0].URL().Cmp(url) > 0 {
|
||||
wallet := &ledgerWallet{context: hub.ctx, hardwareID: devID, locationID: busID, url: &url}
|
||||
wallet := &ledgerWallet{hub: hub, url: &url, info: ledger, log: log.New("url", url)}
|
||||
|
||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Arrive: true})
|
||||
wallets = append(wallets, wallet)
|
||||
|
@ -156,7 +164,7 @@ func (hub *LedgerHub) refreshWallets() {
|
|||
}
|
||||
hub.refreshed = time.Now()
|
||||
hub.wallets = wallets
|
||||
hub.lock.Unlock()
|
||||
hub.stateLock.Unlock()
|
||||
|
||||
// Fire all wallet events and return
|
||||
for _, event := range events {
|
||||
|
@ -168,8 +176,8 @@ func (hub *LedgerHub) refreshWallets() {
|
|||
// receive notifications on the addition or removal of Ledger wallets.
|
||||
func (hub *LedgerHub) Subscribe(sink chan<- accounts.WalletEvent) event.Subscription {
|
||||
// We need the mutex to reliably start/stop the update loop
|
||||
hub.lock.Lock()
|
||||
defer hub.lock.Unlock()
|
||||
hub.stateLock.Lock()
|
||||
defer hub.stateLock.Unlock()
|
||||
|
||||
// Subscribe the caller and track the subscriber count
|
||||
sub := hub.updateScope.Track(hub.updateFeed.Subscribe(sink))
|
||||
|
@ -198,12 +206,12 @@ func (hub *LedgerHub) updater() {
|
|||
hub.refreshWallets()
|
||||
|
||||
// If all our subscribers left, stop the updater
|
||||
hub.lock.Lock()
|
||||
hub.stateLock.Lock()
|
||||
if hub.updateScope.Count() == 0 {
|
||||
hub.updating = false
|
||||
hub.lock.Unlock()
|
||||
hub.stateLock.Unlock()
|
||||
return
|
||||
}
|
||||
hub.lock.Unlock()
|
||||
hub.stateLock.Unlock()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,11 +18,10 @@
|
|||
// wallets. The wire protocol spec can be found in the Ledger Blue GitHub repo:
|
||||
// https://raw.githubusercontent.com/LedgerHQ/blue-app-eth/master/doc/ethapp.asc
|
||||
|
||||
// +build !ios
|
||||
|
||||
package usbwallet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
|
@ -35,12 +34,11 @@ import (
|
|||
ethereum "github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/karalabe/gousb/usb"
|
||||
"golang.org/x/net/context"
|
||||
"github.com/karalabe/hid"
|
||||
)
|
||||
|
||||
// Maximum time between wallet health checks to detect USB unplugs.
|
||||
|
@ -74,22 +72,23 @@ const (
|
|||
ledgerP2ReturnAddressChainCode ledgerParam2 = 0x01 // Require a user confirmation before returning the address
|
||||
)
|
||||
|
||||
// errReplyInvalidHeader is the error message returned by a Ledfer data exchange
|
||||
// errReplyInvalidHeader is the error message returned by a Ledger data exchange
|
||||
// if the device replies with a mismatching header. This usually means the device
|
||||
// is in browser mode.
|
||||
var errReplyInvalidHeader = errors.New("invalid reply header")
|
||||
|
||||
// errInvalidVersionReply is the error message returned by a Ledger version retrieval
|
||||
// when a response does arrive, but it does not contain the expected data.
|
||||
var errInvalidVersionReply = errors.New("invalid version reply")
|
||||
|
||||
// ledgerWallet represents a live USB Ledger hardware wallet.
|
||||
type ledgerWallet struct {
|
||||
context *usb.Context // USB context to interface libusb through
|
||||
hardwareID deviceID // USB identifiers to identify this device type
|
||||
locationID uint16 // USB bus and address to identify this device instance
|
||||
url *accounts.URL // Textual URL uniquely identifying this wallet
|
||||
hub *LedgerHub // USB hub the device originates from (TODO(karalabe): remove if hotplug lands on Windows)
|
||||
url *accounts.URL // Textual URL uniquely identifying this wallet
|
||||
|
||||
device *usb.Device // USB device advertising itself as a Ledger wallet
|
||||
input usb.Endpoint // Input endpoint to send data to this device
|
||||
output usb.Endpoint // Output endpoint to receive data from this device
|
||||
failure error // Any failure that would make the device unusable
|
||||
info hid.DeviceInfo // Known USB device infos about the wallet
|
||||
device *hid.Device // USB device advertising itself as a Ledger wallet
|
||||
failure error // Any failure that would make the device unusable
|
||||
|
||||
version [3]byte // Current version of the Ledger Ethereum app (zero if app is offline)
|
||||
browser bool // Flag whether the Ledger is in browser mode (reply channel mismatch)
|
||||
|
@ -125,6 +124,8 @@ type ledgerWallet struct {
|
|||
// must only ever hold a *read* lock to stateLock.
|
||||
commsLock chan struct{} // Mutex (buf=1) for the USB comms without keeping the state locked
|
||||
stateLock sync.RWMutex // Protects read and write access to the wallet struct fields
|
||||
|
||||
log log.Logger // Contextual logger to tag the ledger with its id
|
||||
}
|
||||
|
||||
// URL implements accounts.Wallet, returning the URL of the Ledger device.
|
||||
|
@ -183,59 +184,12 @@ func (w *ledgerWallet) Open(passphrase string) error {
|
|||
return accounts.ErrWalletAlreadyOpen
|
||||
}
|
||||
// Otherwise iterate over all USB devices and find this again (no way to directly do this)
|
||||
// Iterate over all attached devices and fetch those seemingly Ledger
|
||||
devices, err := w.context.ListDevices(func(desc *usb.Descriptor) bool {
|
||||
// Only open this single specific device
|
||||
return desc.Vendor == w.hardwareID.Vendor && desc.Product == w.hardwareID.Product &&
|
||||
uint16(desc.Bus)<<8+uint16(desc.Address) == w.locationID
|
||||
})
|
||||
device, err := w.info.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(devices) == 0 {
|
||||
return accounts.ErrUnknownWallet
|
||||
}
|
||||
// Device opened, attach to the input and output endpoints
|
||||
device := devices[0]
|
||||
|
||||
var invalid string
|
||||
switch {
|
||||
case len(device.Descriptor.Configs) == 0:
|
||||
invalid = "no endpoint config available"
|
||||
case len(device.Descriptor.Configs[0].Interfaces) == 0:
|
||||
invalid = "no endpoint interface available"
|
||||
case len(device.Descriptor.Configs[0].Interfaces[0].Setups) == 0:
|
||||
invalid = "no endpoint setup available"
|
||||
case len(device.Descriptor.Configs[0].Interfaces[0].Setups[0].Endpoints) < 2:
|
||||
invalid = "not enough IO endpoints available"
|
||||
}
|
||||
if invalid != "" {
|
||||
device.Close()
|
||||
return fmt.Errorf("ledger wallet [%s] invalid: %s", w.url, invalid)
|
||||
}
|
||||
// Open the input and output endpoints to the device
|
||||
input, err := device.OpenEndpoint(
|
||||
device.Descriptor.Configs[0].Config,
|
||||
device.Descriptor.Configs[0].Interfaces[0].Number,
|
||||
device.Descriptor.Configs[0].Interfaces[0].Setups[0].Number,
|
||||
device.Descriptor.Configs[0].Interfaces[0].Setups[0].Endpoints[1].Address,
|
||||
)
|
||||
if err != nil {
|
||||
device.Close()
|
||||
return fmt.Errorf("ledger wallet [%s] input open failed: %v", w.url, err)
|
||||
}
|
||||
output, err := device.OpenEndpoint(
|
||||
device.Descriptor.Configs[0].Config,
|
||||
device.Descriptor.Configs[0].Interfaces[0].Number,
|
||||
device.Descriptor.Configs[0].Interfaces[0].Setups[0].Number,
|
||||
device.Descriptor.Configs[0].Interfaces[0].Setups[0].Endpoints[0].Address,
|
||||
)
|
||||
if err != nil {
|
||||
device.Close()
|
||||
return fmt.Errorf("ledger wallet [%s] output open failed: %v", w.url, err)
|
||||
}
|
||||
// Wallet seems to be successfully opened, guess if the Ethereum app is running
|
||||
w.device, w.input, w.output = device, input, output
|
||||
w.device = device
|
||||
w.commsLock = make(chan struct{}, 1)
|
||||
w.commsLock <- struct{}{} // Enable lock
|
||||
|
||||
|
@ -269,8 +223,8 @@ func (w *ledgerWallet) Open(passphrase string) error {
|
|||
// - libusb on Windows doesn't support hotplug, so we can't detect USB unplugs
|
||||
// - communication timeout on the Ledger requires a device power cycle to fix
|
||||
func (w *ledgerWallet) heartbeat() {
|
||||
glog.V(logger.Debug).Infof("%s health-check started", w.url.String())
|
||||
defer glog.V(logger.Debug).Infof("%s health-check stopped", w.url.String())
|
||||
w.log.Debug("Ledger health-check started")
|
||||
defer w.log.Debug("Ledger health-check stopped")
|
||||
|
||||
// Execute heartbeat checks until termination or error
|
||||
var (
|
||||
|
@ -298,18 +252,18 @@ func (w *ledgerWallet) heartbeat() {
|
|||
w.commsLock <- struct{}{}
|
||||
w.stateLock.RUnlock()
|
||||
|
||||
if err == usb.ERROR_IO || err == usb.ERROR_NO_DEVICE {
|
||||
if err != nil && err != errInvalidVersionReply {
|
||||
w.stateLock.Lock() // Lock state to tear the wallet down
|
||||
w.failure = err
|
||||
w.close()
|
||||
w.stateLock.Unlock()
|
||||
}
|
||||
// Ignore uninteresting errors
|
||||
// Ignore non hardware related errors
|
||||
err = nil
|
||||
}
|
||||
// In case of error, wait for termination
|
||||
if err != nil {
|
||||
glog.V(logger.Debug).Infof("%s health-check failed: %v", w.url.String(), err)
|
||||
w.log.Debug("Ledger health-check failed", "err", err)
|
||||
errc = <-w.healthQuit
|
||||
}
|
||||
errc <- err
|
||||
|
@ -363,13 +317,13 @@ func (w *ledgerWallet) close() error {
|
|||
return nil
|
||||
}
|
||||
// Close the device, clear everything, then return
|
||||
err := w.device.Close()
|
||||
w.device.Close()
|
||||
w.device = nil
|
||||
|
||||
w.device, w.input, w.output = nil, nil, nil
|
||||
w.browser, w.version = false, [3]byte{}
|
||||
w.accounts, w.paths = nil, nil
|
||||
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
// Accounts implements accounts.Wallet, returning the list of accounts pinned to
|
||||
|
@ -397,8 +351,8 @@ func (w *ledgerWallet) Accounts() []accounts.Account {
|
|||
// selfDerive is an account derivation loop that upon request attempts to find
|
||||
// new non-zero accounts.
|
||||
func (w *ledgerWallet) selfDerive() {
|
||||
glog.V(logger.Debug).Infof("%s self-derivation started", w.url.String())
|
||||
defer glog.V(logger.Debug).Infof("%s self-derivation stopped", w.url.String())
|
||||
w.log.Debug("Ledger self-derivation started")
|
||||
defer w.log.Debug("Ledger self-derivation stopped")
|
||||
|
||||
// Execute self-derivations until termination or error
|
||||
var (
|
||||
|
@ -443,7 +397,7 @@ func (w *ledgerWallet) selfDerive() {
|
|||
// Retrieve the next derived Ethereum account
|
||||
if nextAddr == (common.Address{}) {
|
||||
if nextAddr, err = w.ledgerDerive(nextPath); err != nil {
|
||||
glog.V(logger.Warn).Infof("%s self-derivation failed: %v", w.url.String(), err)
|
||||
w.log.Warn("Ledger account derivation failed", "err", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -454,16 +408,16 @@ func (w *ledgerWallet) selfDerive() {
|
|||
)
|
||||
balance, err = w.deriveChain.BalanceAt(context, nextAddr, nil)
|
||||
if err != nil {
|
||||
glog.V(logger.Warn).Infof("%s self-derivation balance retrieval failed: %v", w.url.String(), err)
|
||||
w.log.Warn("Ledger balance retrieval failed", "err", err)
|
||||
break
|
||||
}
|
||||
nonce, err = w.deriveChain.NonceAt(context, nextAddr, nil)
|
||||
if err != nil {
|
||||
glog.V(logger.Warn).Infof("%s self-derivation nonce retrieval failed: %v", w.url.String(), err)
|
||||
w.log.Warn("Ledger nonce retrieval failed", "err", err)
|
||||
break
|
||||
}
|
||||
// If the next account is empty, stop self-derivation, but add it nonetheless
|
||||
if balance.BitLen() == 0 && nonce == 0 {
|
||||
if balance.Sign() == 0 && nonce == 0 {
|
||||
empty = true
|
||||
}
|
||||
// We've just self-derived a new account, start tracking it locally
|
||||
|
@ -479,7 +433,7 @@ func (w *ledgerWallet) selfDerive() {
|
|||
|
||||
// Display a log message to the user for new (or previously empty accounts)
|
||||
if _, known := w.paths[nextAddr]; !known || (!empty && nextAddr == w.deriveNextAddr) {
|
||||
glog.V(logger.Info).Infof("%s discovered %s (balance %22v, nonce %4d) at %s", w.url.String(), nextAddr.Hex(), balance, nonce, path)
|
||||
w.log.Info("Ledger discovered new account", "address", nextAddr, "path", path, "balance", balance, "nonce", nonce)
|
||||
}
|
||||
// Fetch the next potential account
|
||||
if !empty {
|
||||
|
@ -518,7 +472,7 @@ func (w *ledgerWallet) selfDerive() {
|
|||
}
|
||||
// In case of error, wait for termination
|
||||
if err != nil {
|
||||
glog.V(logger.Debug).Infof("%s self-derivation failed: %s", w.url.String(), err)
|
||||
w.log.Debug("Ledger self-derivation failed", "err", err)
|
||||
errc = <-w.deriveQuit
|
||||
}
|
||||
errc <- err
|
||||
|
@ -623,6 +577,17 @@ func (w *ledgerWallet) SignTx(account accounts.Account, tx *types.Transaction, c
|
|||
<-w.commsLock
|
||||
defer func() { w.commsLock <- struct{}{} }()
|
||||
|
||||
// Ensure the device isn't screwed with while user confirmation is pending
|
||||
// TODO(karalabe): remove if hotplug lands on Windows
|
||||
w.hub.commsLock.Lock()
|
||||
w.hub.commsPend++
|
||||
w.hub.commsLock.Unlock()
|
||||
|
||||
defer func() {
|
||||
w.hub.commsLock.Lock()
|
||||
w.hub.commsPend--
|
||||
w.hub.commsLock.Unlock()
|
||||
}()
|
||||
return w.ledgerSign(path, account.Address, tx, chainID)
|
||||
}
|
||||
|
||||
|
@ -664,7 +629,7 @@ func (w *ledgerWallet) ledgerVersion() ([3]byte, error) {
|
|||
return [3]byte{}, err
|
||||
}
|
||||
if len(reply) != 4 {
|
||||
return [3]byte{}, errors.New("reply not of correct size")
|
||||
return [3]byte{}, errInvalidVersionReply
|
||||
}
|
||||
// Cache the version for future reference
|
||||
var version [3]byte
|
||||
|
@ -768,10 +733,6 @@ func (w *ledgerWallet) ledgerDerive(derivationPath []uint32) (common.Address, er
|
|||
// signature R | 32 bytes
|
||||
// signature S | 32 bytes
|
||||
func (w *ledgerWallet) ledgerSign(derivationPath []uint32, address common.Address, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
|
||||
// We need to modify the timeouts to account for user feedback
|
||||
defer func(old time.Duration) { w.device.ReadTimeout = old }(w.device.ReadTimeout)
|
||||
w.device.ReadTimeout = time.Hour * 24 * 30 // Timeout requires a Ledger power cycle, only if you must
|
||||
|
||||
// Flatten the derivation path into the Ledger request
|
||||
path := make([]byte, 1+4*len(derivationPath))
|
||||
path[0] = byte(len(derivationPath))
|
||||
|
@ -902,10 +863,8 @@ func (w *ledgerWallet) ledgerExchange(opcode ledgerOpcode, p1 ledgerParam1, p2 l
|
|||
apdu = nil
|
||||
}
|
||||
// Send over to the device
|
||||
if glog.V(logger.Detail) {
|
||||
glog.Infof("-> %03d.%03d: %x", w.device.Bus, w.device.Address, chunk)
|
||||
}
|
||||
if _, err := w.input.Write(chunk); err != nil {
|
||||
w.log.Trace("Data chunk sent to the Ledger", "chunk", hexutil.Bytes(chunk))
|
||||
if _, err := w.device.Write(chunk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
@ -914,12 +873,11 @@ func (w *ledgerWallet) ledgerExchange(opcode ledgerOpcode, p1 ledgerParam1, p2 l
|
|||
chunk = chunk[:64] // Yeah, we surely have enough space
|
||||
for {
|
||||
// Read the next chunk from the Ledger wallet
|
||||
if _, err := io.ReadFull(w.output, chunk); err != nil {
|
||||
if _, err := io.ReadFull(w.device, chunk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if glog.V(logger.Detail) {
|
||||
glog.Infof("<- %03d.%03d: %x", w.device.Bus, w.device.Address, chunk)
|
||||
}
|
||||
w.log.Trace("Data chunk received from the Ledger", "chunk", hexutil.Bytes(chunk))
|
||||
|
||||
// Make sure the transport header matches
|
||||
if chunk[0] != 0x01 || chunk[1] != 0x01 || chunk[2] != 0x05 {
|
||||
return nil, errReplyInvalidHeader
|
||||
|
|
|
@ -14,16 +14,12 @@
|
|||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build !ios
|
||||
|
||||
// Package usbwallet implements support for USB hardware wallets.
|
||||
package usbwallet
|
||||
|
||||
import "github.com/karalabe/gousb/usb"
|
||||
|
||||
// deviceID is a combined vendor/product identifier to uniquely identify a USB
|
||||
// hardware device.
|
||||
type deviceID struct {
|
||||
Vendor usb.ID // The Vendor identifer
|
||||
Product usb.ID // The Product identifier
|
||||
Vendor uint16 // The Vendor identifer
|
||||
Product uint16 // The Product identifier
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/discv5"
|
||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||
|
@ -42,15 +42,19 @@ func main() {
|
|||
natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|extip:<IP>)")
|
||||
netrestrict = flag.String("netrestrict", "", "restrict network communication to the given IP networks (CIDR masks)")
|
||||
runv5 = flag.Bool("v5", false, "run a v5 topic discovery bootnode")
|
||||
verbosity = flag.Int("verbosity", int(log.LvlInfo), "log verbosity (0-9)")
|
||||
vmodule = flag.String("vmodule", "", "log verbosity pattern")
|
||||
|
||||
nodeKey *ecdsa.PrivateKey
|
||||
err error
|
||||
)
|
||||
flag.Var(glog.GetVerbosity(), "verbosity", "log verbosity (0-9)")
|
||||
flag.Var(glog.GetVModule(), "vmodule", "log verbosity pattern")
|
||||
glog.SetToStderr(true)
|
||||
flag.Parse()
|
||||
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
||||
glogger.Verbosity(log.Lvl(*verbosity))
|
||||
glogger.Vmodule(*vmodule)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
natm, err := nat.Parse(*natdesc)
|
||||
if err != nil {
|
||||
utils.Fatalf("-nat: %v", err)
|
||||
|
@ -64,6 +68,7 @@ func main() {
|
|||
if err = crypto.SaveECDSA(*genKey, nodeKey); err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
return
|
||||
case *nodeKeyFile == "" && *nodeKeyHex == "":
|
||||
utils.Fatalf("Use -nodekey or -nodekeyhex to specify a private key")
|
||||
case *nodeKeyFile != "" && *nodeKeyHex != "":
|
||||
|
|
|
@ -1,60 +0,0 @@
|
|||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// disasm is a pretty-printer for EVM bytecode.
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
)
|
||||
|
||||
func main() {
|
||||
code, err := ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
code, err = hex.DecodeString(strings.TrimSpace(string(code[:])))
|
||||
if err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Printf("%x\n", code)
|
||||
|
||||
for pc := uint64(0); pc < uint64(len(code)); pc++ {
|
||||
op := vm.OpCode(code[pc])
|
||||
|
||||
switch op {
|
||||
case vm.PUSH1, vm.PUSH2, vm.PUSH3, vm.PUSH4, vm.PUSH5, vm.PUSH6, vm.PUSH7, vm.PUSH8, vm.PUSH9, vm.PUSH10, vm.PUSH11, vm.PUSH12, vm.PUSH13, vm.PUSH14, vm.PUSH15, vm.PUSH16, vm.PUSH17, vm.PUSH18, vm.PUSH19, vm.PUSH20, vm.PUSH21, vm.PUSH22, vm.PUSH23, vm.PUSH24, vm.PUSH25, vm.PUSH26, vm.PUSH27, vm.PUSH28, vm.PUSH29, vm.PUSH30, vm.PUSH31, vm.PUSH32:
|
||||
a := uint64(op) - uint64(vm.PUSH1) + 1
|
||||
u := pc + 1 + a
|
||||
if uint64(len(code)) <= pc || uint64(len(code)) < u {
|
||||
fmt.Printf("Error: incomplete push instruction at %v\n", pc)
|
||||
return
|
||||
}
|
||||
fmt.Printf("%-5d %v => %x\n", pc, op, code[pc+1:u])
|
||||
pc += a
|
||||
default:
|
||||
fmt.Printf("%-5d %v\n", pc, op)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,55 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/evm/internal/compiler"
|
||||
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var compileCommand = cli.Command{
|
||||
Action: compileCmd,
|
||||
Name: "compile",
|
||||
Usage: "compiles easm source to evm binary",
|
||||
ArgsUsage: "<file>",
|
||||
}
|
||||
|
||||
func compileCmd(ctx *cli.Context) error {
|
||||
debug := ctx.GlobalBool(DebugFlag.Name)
|
||||
|
||||
if len(ctx.Args().First()) == 0 {
|
||||
return errors.New("filename required")
|
||||
}
|
||||
|
||||
fn := ctx.Args().First()
|
||||
src, err := ioutil.ReadFile(fn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bin, err := compiler.Compile(fn, src, debug)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(bin)
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/asm"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var disasmCommand = cli.Command{
|
||||
Action: disasmCmd,
|
||||
Name: "disasm",
|
||||
Usage: "disassembles evm binary",
|
||||
ArgsUsage: "<file>",
|
||||
}
|
||||
|
||||
func disasmCmd(ctx *cli.Context) error {
|
||||
if len(ctx.Args().First()) == 0 {
|
||||
return errors.New("filename required")
|
||||
}
|
||||
|
||||
fn := ctx.Args().First()
|
||||
in, err := ioutil.ReadFile(fn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
code := strings.TrimSpace(string(in[:]))
|
||||
fmt.Printf("%v\n", code)
|
||||
if err = asm.PrintDisassembled(code); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
39
vendor/github.com/ethereum/go-ethereum/cmd/evm/internal/compiler/compiler.go
generated
vendored
Normal file
39
vendor/github.com/ethereum/go-ethereum/cmd/evm/internal/compiler/compiler.go
generated
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package compiler
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/asm"
|
||||
)
|
||||
|
||||
func Compile(fn string, src []byte, debug bool) (string, error) {
|
||||
compiler := asm.NewCompiler(debug)
|
||||
compiler.Feed(asm.Lex(fn, src, debug))
|
||||
|
||||
bin, compileErrors := compiler.Compile()
|
||||
if len(compileErrors) > 0 {
|
||||
// report errors
|
||||
for _, err := range compileErrors {
|
||||
fmt.Printf("%s:%v\n", fn, err)
|
||||
}
|
||||
return "", errors.New("compiling failed")
|
||||
}
|
||||
return bin, nil
|
||||
}
|
|
@ -19,19 +19,10 @@ package main
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
goruntime "runtime"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/core/vm/runtime"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
|
@ -52,20 +43,20 @@ var (
|
|||
Name: "codefile",
|
||||
Usage: "file containing EVM code",
|
||||
}
|
||||
GasFlag = cli.StringFlag{
|
||||
GasFlag = cli.Uint64Flag{
|
||||
Name: "gas",
|
||||
Usage: "gas limit for the evm",
|
||||
Value: "10000000000",
|
||||
Value: 10000000000,
|
||||
}
|
||||
PriceFlag = cli.StringFlag{
|
||||
PriceFlag = utils.BigFlag{
|
||||
Name: "price",
|
||||
Usage: "price set for the evm",
|
||||
Value: "0",
|
||||
Value: new(big.Int),
|
||||
}
|
||||
ValueFlag = cli.StringFlag{
|
||||
ValueFlag = utils.BigFlag{
|
||||
Name: "value",
|
||||
Usage: "value set for the evm",
|
||||
Value: "0",
|
||||
Value: new(big.Int),
|
||||
}
|
||||
DumpFlag = cli.BoolFlag{
|
||||
Name: "dump",
|
||||
|
@ -75,10 +66,6 @@ var (
|
|||
Name: "input",
|
||||
Usage: "input for the EVM",
|
||||
}
|
||||
SysStatFlag = cli.BoolFlag{
|
||||
Name: "sysstat",
|
||||
Usage: "display system stats",
|
||||
}
|
||||
VerbosityFlag = cli.IntFlag{
|
||||
Name: "verbosity",
|
||||
Usage: "sets the verbosity level",
|
||||
|
@ -98,7 +85,6 @@ func init() {
|
|||
CreateFlag,
|
||||
DebugFlag,
|
||||
VerbosityFlag,
|
||||
SysStatFlag,
|
||||
CodeFlag,
|
||||
CodeFileFlag,
|
||||
GasFlag,
|
||||
|
@ -108,107 +94,11 @@ func init() {
|
|||
InputFlag,
|
||||
DisableGasMeteringFlag,
|
||||
}
|
||||
app.Action = run
|
||||
}
|
||||
|
||||
func run(ctx *cli.Context) error {
|
||||
glog.SetToStderr(true)
|
||||
glog.SetV(ctx.GlobalInt(VerbosityFlag.Name))
|
||||
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
statedb, _ := state.New(common.Hash{}, db)
|
||||
sender := statedb.CreateAccount(common.StringToAddress("sender"))
|
||||
|
||||
logger := vm.NewStructLogger(nil)
|
||||
|
||||
tstart := time.Now()
|
||||
|
||||
var (
|
||||
code []byte
|
||||
ret []byte
|
||||
err error
|
||||
)
|
||||
|
||||
if ctx.GlobalString(CodeFlag.Name) != "" {
|
||||
code = common.Hex2Bytes(ctx.GlobalString(CodeFlag.Name))
|
||||
} else {
|
||||
var hexcode []byte
|
||||
if ctx.GlobalString(CodeFileFlag.Name) != "" {
|
||||
var err error
|
||||
hexcode, err = ioutil.ReadFile(ctx.GlobalString(CodeFileFlag.Name))
|
||||
if err != nil {
|
||||
fmt.Printf("Could not load code from file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
hexcode, err = ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
fmt.Printf("Could not load code from stdin: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
code = common.Hex2Bytes(string(hexcode[:]))
|
||||
app.Commands = []cli.Command{
|
||||
compileCommand,
|
||||
disasmCommand,
|
||||
runCommand,
|
||||
}
|
||||
|
||||
if ctx.GlobalBool(CreateFlag.Name) {
|
||||
input := append(code, common.Hex2Bytes(ctx.GlobalString(InputFlag.Name))...)
|
||||
ret, _, err = runtime.Create(input, &runtime.Config{
|
||||
Origin: sender.Address(),
|
||||
State: statedb,
|
||||
GasLimit: common.Big(ctx.GlobalString(GasFlag.Name)),
|
||||
GasPrice: common.Big(ctx.GlobalString(PriceFlag.Name)),
|
||||
Value: common.Big(ctx.GlobalString(ValueFlag.Name)),
|
||||
EVMConfig: vm.Config{
|
||||
Tracer: logger,
|
||||
Debug: ctx.GlobalBool(DebugFlag.Name),
|
||||
DisableGasMetering: ctx.GlobalBool(DisableGasMeteringFlag.Name),
|
||||
},
|
||||
})
|
||||
} else {
|
||||
receiver := statedb.CreateAccount(common.StringToAddress("receiver"))
|
||||
receiver.SetCode(crypto.Keccak256Hash(code), code)
|
||||
|
||||
ret, err = runtime.Call(receiver.Address(), common.Hex2Bytes(ctx.GlobalString(InputFlag.Name)), &runtime.Config{
|
||||
Origin: sender.Address(),
|
||||
State: statedb,
|
||||
GasLimit: common.Big(ctx.GlobalString(GasFlag.Name)),
|
||||
GasPrice: common.Big(ctx.GlobalString(PriceFlag.Name)),
|
||||
Value: common.Big(ctx.GlobalString(ValueFlag.Name)),
|
||||
EVMConfig: vm.Config{
|
||||
Tracer: logger,
|
||||
Debug: ctx.GlobalBool(DebugFlag.Name),
|
||||
DisableGasMetering: ctx.GlobalBool(DisableGasMeteringFlag.Name),
|
||||
},
|
||||
})
|
||||
}
|
||||
vmdone := time.Since(tstart)
|
||||
|
||||
if ctx.GlobalBool(DumpFlag.Name) {
|
||||
statedb.Commit(true)
|
||||
fmt.Println(string(statedb.Dump()))
|
||||
}
|
||||
vm.StdErrFormat(logger.StructLogs())
|
||||
|
||||
if ctx.GlobalBool(SysStatFlag.Name) {
|
||||
var mem goruntime.MemStats
|
||||
goruntime.ReadMemStats(&mem)
|
||||
fmt.Printf("vm took %v\n", vmdone)
|
||||
fmt.Printf(`alloc: %d
|
||||
tot alloc: %d
|
||||
no. malloc: %d
|
||||
heap alloc: %d
|
||||
heap objs: %d
|
||||
num gc: %d
|
||||
`, mem.Alloc, mem.TotalAlloc, mem.Mallocs, mem.HeapAlloc, mem.HeapObjects, mem.NumGC)
|
||||
}
|
||||
|
||||
fmt.Printf("OUT: 0x%x", ret)
|
||||
if err != nil {
|
||||
fmt.Printf(" error: %v", err)
|
||||
}
|
||||
fmt.Println()
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
|
|
@ -0,0 +1,151 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
goruntime "runtime"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/evm/internal/compiler"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/core/vm/runtime"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var runCommand = cli.Command{
|
||||
Action: runCmd,
|
||||
Name: "run",
|
||||
Usage: "run arbitrary evm binary",
|
||||
ArgsUsage: "<code>",
|
||||
Description: `The run command runs arbitrary EVM code.`,
|
||||
}
|
||||
|
||||
func runCmd(ctx *cli.Context) error {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
||||
glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name)))
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
var (
|
||||
db, _ = ethdb.NewMemDatabase()
|
||||
statedb, _ = state.New(common.Hash{}, db)
|
||||
sender = common.StringToAddress("sender")
|
||||
logger = vm.NewStructLogger(nil)
|
||||
)
|
||||
statedb.CreateAccount(sender)
|
||||
|
||||
var (
|
||||
code []byte
|
||||
ret []byte
|
||||
err error
|
||||
)
|
||||
if fn := ctx.Args().First(); len(fn) > 0 {
|
||||
src, err := ioutil.ReadFile(fn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bin, err := compiler.Compile(fn, src, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
code = common.Hex2Bytes(bin)
|
||||
} else if ctx.GlobalString(CodeFlag.Name) != "" {
|
||||
code = common.Hex2Bytes(ctx.GlobalString(CodeFlag.Name))
|
||||
} else {
|
||||
var hexcode []byte
|
||||
if ctx.GlobalString(CodeFileFlag.Name) != "" {
|
||||
var err error
|
||||
hexcode, err = ioutil.ReadFile(ctx.GlobalString(CodeFileFlag.Name))
|
||||
if err != nil {
|
||||
fmt.Printf("Could not load code from file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
hexcode, err = ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
fmt.Printf("Could not load code from stdin: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
code = common.Hex2Bytes(string(bytes.TrimRight(hexcode, "\n")))
|
||||
}
|
||||
|
||||
runtimeConfig := runtime.Config{
|
||||
Origin: sender,
|
||||
State: statedb,
|
||||
GasLimit: ctx.GlobalUint64(GasFlag.Name),
|
||||
GasPrice: utils.GlobalBig(ctx, PriceFlag.Name),
|
||||
Value: utils.GlobalBig(ctx, ValueFlag.Name),
|
||||
EVMConfig: vm.Config{
|
||||
Tracer: logger,
|
||||
Debug: ctx.GlobalBool(DebugFlag.Name),
|
||||
DisableGasMetering: ctx.GlobalBool(DisableGasMeteringFlag.Name),
|
||||
},
|
||||
}
|
||||
|
||||
tstart := time.Now()
|
||||
if ctx.GlobalBool(CreateFlag.Name) {
|
||||
input := append(code, common.Hex2Bytes(ctx.GlobalString(InputFlag.Name))...)
|
||||
ret, _, err = runtime.Create(input, &runtimeConfig)
|
||||
} else {
|
||||
receiver := common.StringToAddress("receiver")
|
||||
statedb.SetCode(receiver, code)
|
||||
|
||||
ret, err = runtime.Call(receiver, common.Hex2Bytes(ctx.GlobalString(InputFlag.Name)), &runtimeConfig)
|
||||
}
|
||||
execTime := time.Since(tstart)
|
||||
|
||||
if ctx.GlobalBool(DumpFlag.Name) {
|
||||
statedb.Commit(true)
|
||||
fmt.Println(string(statedb.Dump()))
|
||||
}
|
||||
|
||||
if ctx.GlobalBool(DebugFlag.Name) {
|
||||
fmt.Fprintln(os.Stderr, "#### TRACE ####")
|
||||
vm.WriteTrace(os.Stderr, logger.StructLogs())
|
||||
fmt.Fprintln(os.Stderr, "#### LOGS ####")
|
||||
vm.WriteLogs(os.Stderr, statedb.Logs())
|
||||
|
||||
var mem goruntime.MemStats
|
||||
goruntime.ReadMemStats(&mem)
|
||||
fmt.Fprintf(os.Stderr, `evm execution time: %v
|
||||
heap objects: %d
|
||||
allocations: %d
|
||||
total allocations: %d
|
||||
GC calls: %d
|
||||
|
||||
`, execTime, mem.HeapObjects, mem.Alloc, mem.TotalAlloc, mem.NumGC)
|
||||
}
|
||||
|
||||
fmt.Printf("0x%x", ret)
|
||||
if err != nil {
|
||||
fmt.Printf(" error: %v", err)
|
||||
}
|
||||
fmt.Println()
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,499 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// faucet is a Ether faucet backed by a light client.
|
||||
package main
|
||||
|
||||
//go:generate go-bindata -nometadata -o website.go faucet.html
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/ethstats"
|
||||
"github.com/ethereum/go-ethereum/les"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/discv5"
|
||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
var (
|
||||
genesisFlag = flag.String("genesis", "", "Genesis json file to seed the chain with")
|
||||
apiPortFlag = flag.Int("apiport", 8080, "Listener port for the HTTP API connection")
|
||||
ethPortFlag = flag.Int("ethport", 30303, "Listener port for the devp2p connection")
|
||||
bootFlag = flag.String("bootnodes", "", "Comma separated bootnode enode URLs to seed with")
|
||||
netFlag = flag.Uint64("network", 0, "Network ID to use for the Ethereum protocol")
|
||||
statsFlag = flag.String("ethstats", "", "Ethstats network monitoring auth string")
|
||||
|
||||
netnameFlag = flag.String("faucet.name", "", "Network name to assign to the faucet")
|
||||
payoutFlag = flag.Int("faucet.amount", 1, "Number of Ethers to pay out per user request")
|
||||
minutesFlag = flag.Int("faucet.minutes", 1440, "Number of minutes to wait between funding rounds")
|
||||
|
||||
accJSONFlag = flag.String("account.json", "", "Key json file to fund user requests with")
|
||||
accPassFlag = flag.String("account.pass", "", "Decryption password to access faucet funds")
|
||||
|
||||
githubUser = flag.String("github.user", "", "GitHub user to authenticate with for Gist access")
|
||||
githubToken = flag.String("github.token", "", "GitHub personal token to access Gists with")
|
||||
|
||||
captchaToken = flag.String("captcha.token", "", "Recaptcha site key to authenticate client side")
|
||||
captchaSecret = flag.String("captcha.secret", "", "Recaptcha secret key to authenticate server side")
|
||||
|
||||
logFlag = flag.Int("loglevel", 3, "Log level to use for Ethereum and the faucet")
|
||||
)
|
||||
|
||||
var (
|
||||
ether = new(big.Int).Exp(big.NewInt(10), big.NewInt(18), nil)
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Parse the flags and set up the logger to print everything requested
|
||||
flag.Parse()
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*logFlag), log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
||||
|
||||
// Load up and render the faucet website
|
||||
tmpl, err := Asset("faucet.html")
|
||||
if err != nil {
|
||||
log.Crit("Failed to load the faucet template", "err", err)
|
||||
}
|
||||
period := fmt.Sprintf("%d minute(s)", *minutesFlag)
|
||||
if *minutesFlag%60 == 0 {
|
||||
period = fmt.Sprintf("%d hour(s)", *minutesFlag/60)
|
||||
}
|
||||
website := new(bytes.Buffer)
|
||||
template.Must(template.New("").Parse(string(tmpl))).Execute(website, map[string]interface{}{
|
||||
"Network": *netnameFlag,
|
||||
"Amount": *payoutFlag,
|
||||
"Period": period,
|
||||
"Recaptcha": *captchaToken,
|
||||
})
|
||||
// Load and parse the genesis block requested by the user
|
||||
blob, err := ioutil.ReadFile(*genesisFlag)
|
||||
if err != nil {
|
||||
log.Crit("Failed to read genesis block contents", "genesis", *genesisFlag, "err", err)
|
||||
}
|
||||
genesis := new(core.Genesis)
|
||||
if err = json.Unmarshal(blob, genesis); err != nil {
|
||||
log.Crit("Failed to parse genesis block json", "err", err)
|
||||
}
|
||||
// Convert the bootnodes to internal enode representations
|
||||
var enodes []*discv5.Node
|
||||
for _, boot := range strings.Split(*bootFlag, ",") {
|
||||
if url, err := discv5.ParseNode(boot); err == nil {
|
||||
enodes = append(enodes, url)
|
||||
} else {
|
||||
log.Error("Failed to parse bootnode URL", "url", boot, "err", err)
|
||||
}
|
||||
}
|
||||
// Load up the account key and decrypt its password
|
||||
if blob, err = ioutil.ReadFile(*accPassFlag); err != nil {
|
||||
log.Crit("Failed to read account password contents", "file", *accPassFlag, "err", err)
|
||||
}
|
||||
pass := string(blob)
|
||||
|
||||
ks := keystore.NewKeyStore(filepath.Join(os.Getenv("HOME"), ".faucet", "keys"), keystore.StandardScryptN, keystore.StandardScryptP)
|
||||
if blob, err = ioutil.ReadFile(*accJSONFlag); err != nil {
|
||||
log.Crit("Failed to read account key contents", "file", *accJSONFlag, "err", err)
|
||||
}
|
||||
acc, err := ks.Import(blob, pass, pass)
|
||||
if err != nil {
|
||||
log.Crit("Failed to import faucet signer account", "err", err)
|
||||
}
|
||||
ks.Unlock(acc, pass)
|
||||
|
||||
// Assemble and start the faucet light service
|
||||
faucet, err := newFaucet(genesis, *ethPortFlag, enodes, *netFlag, *statsFlag, ks, website.Bytes())
|
||||
if err != nil {
|
||||
log.Crit("Failed to start faucet", "err", err)
|
||||
}
|
||||
defer faucet.close()
|
||||
|
||||
if err := faucet.listenAndServe(*apiPortFlag); err != nil {
|
||||
log.Crit("Failed to launch faucet API", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// request represents an accepted funding request.
|
||||
type request struct {
|
||||
Username string `json:"username"` // GitHub user for displaying an avatar
|
||||
Account common.Address `json:"account"` // Ethereum address being funded
|
||||
Time time.Time `json:"time"` // Timestamp when te request was accepted
|
||||
Tx *types.Transaction `json:"tx"` // Transaction funding the account
|
||||
}
|
||||
|
||||
// faucet represents a crypto faucet backed by an Ethereum light client.
|
||||
type faucet struct {
|
||||
config *params.ChainConfig // Chain configurations for signing
|
||||
stack *node.Node // Ethereum protocol stack
|
||||
client *ethclient.Client // Client connection to the Ethereum chain
|
||||
index []byte // Index page to serve up on the web
|
||||
|
||||
keystore *keystore.KeyStore // Keystore containing the single signer
|
||||
account accounts.Account // Account funding user faucet requests
|
||||
nonce uint64 // Current pending nonce of the faucet
|
||||
price *big.Int // Current gas price to issue funds with
|
||||
|
||||
conns []*websocket.Conn // Currently live websocket connections
|
||||
history map[string]time.Time // History of users and their funding requests
|
||||
reqs []*request // Currently pending funding requests
|
||||
update chan struct{} // Channel to signal request updates
|
||||
|
||||
lock sync.RWMutex // Lock protecting the faucet's internals
|
||||
}
|
||||
|
||||
func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network uint64, stats string, ks *keystore.KeyStore, index []byte) (*faucet, error) {
|
||||
// Assemble the raw devp2p protocol stack
|
||||
stack, err := node.New(&node.Config{
|
||||
Name: "geth",
|
||||
Version: params.Version,
|
||||
DataDir: filepath.Join(os.Getenv("HOME"), ".faucet"),
|
||||
P2P: p2p.Config{
|
||||
NAT: nat.Any(),
|
||||
NoDiscovery: true,
|
||||
DiscoveryV5: true,
|
||||
ListenAddr: fmt.Sprintf(":%d", port),
|
||||
DiscoveryV5Addr: fmt.Sprintf(":%d", port+1),
|
||||
MaxPeers: 25,
|
||||
BootstrapNodesV5: enodes,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Assemble the Ethereum light client protocol
|
||||
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
cfg := eth.DefaultConfig
|
||||
cfg.SyncMode = downloader.LightSync
|
||||
cfg.NetworkId = network
|
||||
cfg.Genesis = genesis
|
||||
return les.New(ctx, &cfg)
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Assemble the ethstats monitoring and reporting service'
|
||||
if stats != "" {
|
||||
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
var serv *les.LightEthereum
|
||||
ctx.Service(&serv)
|
||||
return ethstats.New(stats, nil, serv)
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Boot up the client and ensure it connects to bootnodes
|
||||
if err := stack.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, boot := range enodes {
|
||||
old, _ := discover.ParseNode(boot.String())
|
||||
stack.Server().AddPeer(old)
|
||||
}
|
||||
// Attach to the client and retrieve and interesting metadatas
|
||||
api, err := stack.Attach()
|
||||
if err != nil {
|
||||
stack.Stop()
|
||||
return nil, err
|
||||
}
|
||||
client := ethclient.NewClient(api)
|
||||
|
||||
return &faucet{
|
||||
config: genesis.Config,
|
||||
stack: stack,
|
||||
client: client,
|
||||
index: index,
|
||||
keystore: ks,
|
||||
account: ks.Accounts()[0],
|
||||
history: make(map[string]time.Time),
|
||||
update: make(chan struct{}, 1),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// close terminates the Ethereum connection and tears down the faucet.
|
||||
func (f *faucet) close() error {
|
||||
return f.stack.Stop()
|
||||
}
|
||||
|
||||
// listenAndServe registers the HTTP handlers for the faucet and boots it up
|
||||
// for service user funding requests.
|
||||
func (f *faucet) listenAndServe(port int) error {
|
||||
go f.loop()
|
||||
|
||||
http.HandleFunc("/", f.webHandler)
|
||||
http.Handle("/api", websocket.Handler(f.apiHandler))
|
||||
|
||||
return http.ListenAndServe(fmt.Sprintf(":%d", port), nil)
|
||||
}
|
||||
|
||||
// webHandler handles all non-api requests, simply flattening and returning the
|
||||
// faucet website.
|
||||
func (f *faucet) webHandler(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write(f.index)
|
||||
}
|
||||
|
||||
// apiHandler handles requests for Ether grants and transaction statuses.
|
||||
func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
// Start tracking the connection and drop at the end
|
||||
f.lock.Lock()
|
||||
f.conns = append(f.conns, conn)
|
||||
f.lock.Unlock()
|
||||
|
||||
defer func() {
|
||||
f.lock.Lock()
|
||||
for i, c := range f.conns {
|
||||
if c == conn {
|
||||
f.conns = append(f.conns[:i], f.conns[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
f.lock.Unlock()
|
||||
}()
|
||||
// Send a few initial stats to the client
|
||||
balance, _ := f.client.BalanceAt(context.Background(), f.account.Address, nil)
|
||||
nonce, _ := f.client.NonceAt(context.Background(), f.account.Address, nil)
|
||||
|
||||
websocket.JSON.Send(conn, map[string]interface{}{
|
||||
"funds": balance.Div(balance, ether),
|
||||
"funded": nonce,
|
||||
"peers": f.stack.Server().PeerCount(),
|
||||
"requests": f.reqs,
|
||||
})
|
||||
header, _ := f.client.HeaderByNumber(context.Background(), nil)
|
||||
websocket.JSON.Send(conn, header)
|
||||
|
||||
// Keep reading requests from the websocket until the connection breaks
|
||||
for {
|
||||
// Fetch the next funding request and validate against github
|
||||
var msg struct {
|
||||
URL string `json:"url"`
|
||||
Captcha string `json:"captcha"`
|
||||
}
|
||||
if err := websocket.JSON.Receive(conn, &msg); err != nil {
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(msg.URL, "https://gist.github.com/") {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "URL doesn't link to GitHub Gists"})
|
||||
continue
|
||||
}
|
||||
log.Info("Faucet funds requested", "gist", msg.URL)
|
||||
|
||||
// If captcha verifications are enabled, make sure we're not dealing with a robot
|
||||
if *captchaToken != "" {
|
||||
form := url.Values{}
|
||||
form.Add("secret", *captchaSecret)
|
||||
form.Add("response", msg.Captcha)
|
||||
|
||||
res, err := http.PostForm("https://www.google.com/recaptcha/api/siteverify", form)
|
||||
if err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
continue
|
||||
}
|
||||
var result struct {
|
||||
Success bool `json:"success"`
|
||||
Errors json.RawMessage `json:"error-codes"`
|
||||
}
|
||||
err = json.NewDecoder(res.Body).Decode(&result)
|
||||
res.Body.Close()
|
||||
if err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
continue
|
||||
}
|
||||
if !result.Success {
|
||||
log.Warn("Captcha verification failed", "err", string(result.Errors))
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "Beep-boop, you're a robot!"})
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Retrieve the gist from the GitHub Gist APIs
|
||||
parts := strings.Split(msg.URL, "/")
|
||||
req, _ := http.NewRequest("GET", "https://api.github.com/gists/"+parts[len(parts)-1], nil)
|
||||
if *githubUser != "" {
|
||||
req.SetBasicAuth(*githubUser, *githubToken)
|
||||
}
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
continue
|
||||
}
|
||||
var gist struct {
|
||||
Owner struct {
|
||||
Login string `json:"login"`
|
||||
} `json:"owner"`
|
||||
Files map[string]struct {
|
||||
Content string `json:"content"`
|
||||
} `json:"files"`
|
||||
}
|
||||
err = json.NewDecoder(res.Body).Decode(&gist)
|
||||
res.Body.Close()
|
||||
if err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
continue
|
||||
}
|
||||
if gist.Owner.Login == "" {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "Anonymous Gists not allowed"})
|
||||
continue
|
||||
}
|
||||
// Iterate over all the files and look for Ethereum addresses
|
||||
var address common.Address
|
||||
for _, file := range gist.Files {
|
||||
if len(file.Content) == 2+common.AddressLength*2 {
|
||||
address = common.HexToAddress(file.Content)
|
||||
}
|
||||
}
|
||||
if address == (common.Address{}) {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "No Ethereum address found to fund"})
|
||||
continue
|
||||
}
|
||||
// Validate the user's existence since the API is unhelpful here
|
||||
if res, err = http.Head("https://github.com/" + gist.Owner.Login); err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
continue
|
||||
}
|
||||
res.Body.Close()
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "Invalid user... boom!"})
|
||||
continue
|
||||
}
|
||||
// Ensure the user didn't request funds too recently
|
||||
f.lock.Lock()
|
||||
var (
|
||||
fund bool
|
||||
elapsed time.Duration
|
||||
)
|
||||
if elapsed = time.Since(f.history[gist.Owner.Login]); elapsed > time.Duration(*minutesFlag)*time.Minute {
|
||||
// User wasn't funded recently, create the funding transaction
|
||||
tx := types.NewTransaction(f.nonce+uint64(len(f.reqs)), address, new(big.Int).Mul(big.NewInt(int64(*payoutFlag)), ether), big.NewInt(21000), f.price, nil)
|
||||
signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainId)
|
||||
if err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
f.lock.Unlock()
|
||||
continue
|
||||
}
|
||||
// Submit the transaction and mark as funded if successful
|
||||
if err := f.client.SendTransaction(context.Background(), signed); err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
f.lock.Unlock()
|
||||
continue
|
||||
}
|
||||
f.reqs = append(f.reqs, &request{
|
||||
Username: gist.Owner.Login,
|
||||
Account: address,
|
||||
Time: time.Now(),
|
||||
Tx: signed,
|
||||
})
|
||||
f.history[gist.Owner.Login] = time.Now()
|
||||
fund = true
|
||||
}
|
||||
f.lock.Unlock()
|
||||
|
||||
// Send an error if too frequent funding, othewise a success
|
||||
if !fund {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": fmt.Sprintf("User already funded %s ago", common.PrettyDuration(elapsed))})
|
||||
continue
|
||||
}
|
||||
websocket.JSON.Send(conn, map[string]string{"success": fmt.Sprintf("Funding request accepted for %s into %s", gist.Owner.Login, address.Hex())})
|
||||
select {
|
||||
case f.update <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// loop keeps waiting for interesting events and pushes them out to connected
|
||||
// websockets.
|
||||
func (f *faucet) loop() {
|
||||
// Wait for chain events and push them to clients
|
||||
heads := make(chan *types.Header, 16)
|
||||
sub, err := f.client.SubscribeNewHead(context.Background(), heads)
|
||||
if err != nil {
|
||||
log.Crit("Failed to subscribe to head events", "err", err)
|
||||
}
|
||||
defer sub.Unsubscribe()
|
||||
|
||||
for {
|
||||
select {
|
||||
case head := <-heads:
|
||||
// New chain head arrived, query the current stats and stream to clients
|
||||
balance, _ := f.client.BalanceAt(context.Background(), f.account.Address, nil)
|
||||
balance = new(big.Int).Div(balance, ether)
|
||||
|
||||
price, _ := f.client.SuggestGasPrice(context.Background())
|
||||
nonce, _ := f.client.NonceAt(context.Background(), f.account.Address, nil)
|
||||
|
||||
f.lock.Lock()
|
||||
f.price, f.nonce = price, nonce
|
||||
for len(f.reqs) > 0 && f.reqs[0].Tx.Nonce() < f.nonce {
|
||||
f.reqs = f.reqs[1:]
|
||||
}
|
||||
f.lock.Unlock()
|
||||
|
||||
f.lock.RLock()
|
||||
for _, conn := range f.conns {
|
||||
if err := websocket.JSON.Send(conn, map[string]interface{}{
|
||||
"funds": balance,
|
||||
"funded": f.nonce,
|
||||
"peers": f.stack.Server().PeerCount(),
|
||||
"requests": f.reqs,
|
||||
}); err != nil {
|
||||
log.Warn("Failed to send stats to client", "err", err)
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
if err := websocket.JSON.Send(conn, head); err != nil {
|
||||
log.Warn("Failed to send header to client", "err", err)
|
||||
conn.Close()
|
||||
}
|
||||
}
|
||||
f.lock.RUnlock()
|
||||
|
||||
case <-f.update:
|
||||
// Pending requests updated, stream to clients
|
||||
f.lock.RLock()
|
||||
for _, conn := range f.conns {
|
||||
if err := websocket.JSON.Send(conn, map[string]interface{}{"requests": f.reqs}); err != nil {
|
||||
log.Warn("Failed to send requests to client", "err", err)
|
||||
conn.Close()
|
||||
}
|
||||
}
|
||||
f.lock.RUnlock()
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,145 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
|
||||
<title>{{.Network}}: GitHub Faucet</title>
|
||||
|
||||
<link href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/css/bootstrap.min.css" rel="stylesheet" />
|
||||
<link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css" rel="stylesheet" />
|
||||
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.1.1/jquery.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery-noty/2.4.1/packaged/jquery.noty.packaged.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/js/bootstrap.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.18.0/moment.min.js"></script>
|
||||
|
||||
<style>
|
||||
.vertical-center {
|
||||
min-height: 100%;
|
||||
min-height: 100vh;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
.progress {
|
||||
position: relative;
|
||||
}
|
||||
.progress span {
|
||||
position: absolute;
|
||||
display: block;
|
||||
width: 100%;
|
||||
color: white;
|
||||
}
|
||||
pre {
|
||||
padding: 6px;
|
||||
margin: 0;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div class="vertical-center">
|
||||
<div class="container">
|
||||
<div class="row" style="margin-bottom: 16px;">
|
||||
<div class="col-lg-12">
|
||||
<h1 style="text-align: center;"><i class="fa fa-bath" aria-hidden="true"></i> {{.Network}} GitHub Authenticated Faucet <i class="fa fa-github-alt" aria-hidden="true"></i></h1>
|
||||
</div>
|
||||
</div>
|
||||
<div class="row">
|
||||
<div class="col-lg-8 col-lg-offset-2">
|
||||
<div class="input-group">
|
||||
<input id="gist" type="text" class="form-control" placeholder="GitHub Gist URL containing your Ethereum address...">
|
||||
<span class="input-group-btn">
|
||||
<button class="btn btn-default" type="button" onclick="{{if .Recaptcha}}grecaptcha.execute(){{else}}submit(){{end}}">Give me Ether!</button>
|
||||
</span>
|
||||
</div>{{if .Recaptcha}}
|
||||
<div class="g-recaptcha" data-sitekey="{{.Recaptcha}}" data-callback="submit" data-size="invisible"></div>{{end}}
|
||||
</div>
|
||||
</div>
|
||||
<div class="row" style="margin-top: 32px;">
|
||||
<div class="col-lg-6 col-lg-offset-3">
|
||||
<div class="panel panel-small panel-default">
|
||||
<div class="panel-body" style="padding: 0; overflow: auto; max-height: 300px;">
|
||||
<table id="requests" class="table table-condensed" style="margin: 0;"></table>
|
||||
</div>
|
||||
<div class="panel-footer">
|
||||
<table style="width: 100%"><tr>
|
||||
<td style="text-align: center;"><i class="fa fa-rss" aria-hidden="true"></i> <span id="peers"></span> peers</td>
|
||||
<td style="text-align: center;"><i class="fa fa-database" aria-hidden="true"></i> <span id="block"></span> blocks</td>
|
||||
<td style="text-align: center;"><i class="fa fa-heartbeat" aria-hidden="true"></i> <span id="funds"></span> Ethers</td>
|
||||
<td style="text-align: center;"><i class="fa fa-university" aria-hidden="true"></i> <span id="funded"></span> funded</td>
|
||||
</tr></table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="row" style="margin-top: 32px;">
|
||||
<div class="col-lg-12">
|
||||
<h3>How does this work?</h3>
|
||||
<p>This Ether faucet is running on the {{.Network}} network. To prevent malicious actors from exhausting all available funds or accumulating enough Ether to mount long running spam attacks, requests are tied to GitHub accounts. Anyone having a GitHub account may request funds within the permitted limit of <strong>{{.Amount}} Ether(s) / {{.Period}}</strong>.{{if .Recaptcha}} The faucet is running invisible reCaptcha protection against bots.{{end}}</p>
|
||||
<p>To request funds, simply create a <a href="https://gist.github.com/" target="_about:blank">GitHub Gist</a> with your Ethereum address pasted into the contents (the file name doesn't matter), copy paste the gists URL into the above input box and fire away! You can track the current pending requests below the input field to see how much you have to wait until your turn comes.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
// Global variables to hold the current status of the faucet
|
||||
var attempt = 0;
|
||||
var server;
|
||||
|
||||
// Define the function that submits a gist url to the server
|
||||
var submit = function({{if .Recaptcha}}captcha{{end}}) {
|
||||
server.send(JSON.stringify({url: $("#gist")[0].value{{if .Recaptcha}}, captcha: captcha{{end}}}));{{if .Recaptcha}}
|
||||
grecaptcha.reset();{{end}}
|
||||
};
|
||||
// Define a method to reconnect upon server loss
|
||||
var reconnect = function() {
|
||||
if (attempt % 2 == 0) {
|
||||
server = new WebSocket("wss://" + location.host + "/api");
|
||||
} else {
|
||||
server = new WebSocket("ws://" + location.host + "/api");
|
||||
}
|
||||
attempt++;
|
||||
|
||||
server.onmessage = function(event) {
|
||||
var msg = JSON.parse(event.data);
|
||||
if (msg === null) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (msg.funds !== undefined) {
|
||||
$("#funds").text(msg.funds);
|
||||
}
|
||||
if (msg.funded !== undefined) {
|
||||
$("#funded").text(msg.funded);
|
||||
}
|
||||
if (msg.peers !== undefined) {
|
||||
$("#peers").text(msg.peers);
|
||||
}
|
||||
if (msg.number !== undefined) {
|
||||
$("#block").text(parseInt(msg.number, 16));
|
||||
}
|
||||
if (msg.error !== undefined) {
|
||||
noty({layout: 'topCenter', text: msg.error, type: 'error'});
|
||||
}
|
||||
if (msg.success !== undefined) {
|
||||
noty({layout: 'topCenter', text: msg.success, type: 'success'});
|
||||
}
|
||||
if (msg.requests !== undefined && msg.requests !== null) {
|
||||
var content = "";
|
||||
for (var i=0; i<msg.requests.length; i++) {
|
||||
content += "<tr><td><div style=\"background: url('https://github.com/" + msg.requests[i].username + ".png?size=64'); background-size: cover; width:32px; height: 32px; border-radius: 4px;\"></div></td><td><pre>" + msg.requests[i].account + "</pre></td><td style=\"width: 100%; text-align: center; vertical-align: middle;\">" + moment.duration(moment(msg.requests[i].time).unix()-moment().unix(), 'seconds').humanize(true) + "</td></tr>";
|
||||
}
|
||||
$("#requests").html("<tbody>" + content + "</tbody>");
|
||||
}
|
||||
}
|
||||
server.onclose = function() { setTimeout(reconnect, 3000); };
|
||||
}
|
||||
// Establish a websocket connection to the API server
|
||||
reconnect();
|
||||
</script>{{if .Recaptcha}}
|
||||
<script src="https://www.google.com/recaptcha/api.js" async defer></script>{{end}}
|
||||
</body>
|
||||
</html>
|
File diff suppressed because one or more lines are too long
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue