mirror of https://github.com/status-im/op-geth.git
Merge pull request #2242 from jimenezrick/upstream-crypto
Closes #2241: Use Keccak-256 from golang.org/x/crypto/sha3 and mention explicitly
This commit is contained in:
commit
483feb0d3f
|
@ -244,7 +244,7 @@ func TestMethodSignature(t *testing.T) {
|
|||
t.Error("signature mismatch", exp, "!=", m.Sig())
|
||||
}
|
||||
|
||||
idexp := crypto.Sha3([]byte(exp))[:4]
|
||||
idexp := crypto.Keccak256([]byte(exp))[:4]
|
||||
if !bytes.Equal(m.Id(), idexp) {
|
||||
t.Errorf("expected ids to match %x != %x", m.Id(), idexp)
|
||||
}
|
||||
|
@ -264,7 +264,7 @@ func TestPack(t *testing.T) {
|
|||
t.FailNow()
|
||||
}
|
||||
|
||||
sig := crypto.Sha3([]byte("foo(uint32)"))[:4]
|
||||
sig := crypto.Keccak256([]byte("foo(uint32)"))[:4]
|
||||
sig = append(sig, make([]byte, 32)...)
|
||||
sig[35] = 10
|
||||
|
||||
|
@ -286,7 +286,7 @@ func TestMultiPack(t *testing.T) {
|
|||
t.FailNow()
|
||||
}
|
||||
|
||||
sig := crypto.Sha3([]byte("bar(uint32,uint16)"))[:4]
|
||||
sig := crypto.Keccak256([]byte("bar(uint32,uint16)"))[:4]
|
||||
sig = append(sig, make([]byte, 64)...)
|
||||
sig[35] = 10
|
||||
sig[67] = 11
|
||||
|
@ -309,7 +309,7 @@ func TestPackSlice(t *testing.T) {
|
|||
t.FailNow()
|
||||
}
|
||||
|
||||
sig := crypto.Sha3([]byte("slice(uint32[2])"))[:4]
|
||||
sig := crypto.Keccak256([]byte("slice(uint32[2])"))[:4]
|
||||
sig = append(sig, make([]byte, 64)...)
|
||||
sig[35] = 1
|
||||
sig[67] = 2
|
||||
|
@ -332,7 +332,7 @@ func TestPackSliceBig(t *testing.T) {
|
|||
t.FailNow()
|
||||
}
|
||||
|
||||
sig := crypto.Sha3([]byte("slice256(uint256[2])"))[:4]
|
||||
sig := crypto.Keccak256([]byte("slice256(uint256[2])"))[:4]
|
||||
sig = append(sig, make([]byte, 64)...)
|
||||
sig[35] = 1
|
||||
sig[67] = 2
|
||||
|
|
|
@ -40,5 +40,5 @@ func (e Event) Id() common.Hash {
|
|||
types[i] = input.Type.String()
|
||||
i++
|
||||
}
|
||||
return common.BytesToHash(crypto.Sha3([]byte(fmt.Sprintf("%v(%v)", e.Name, strings.Join(types, ",")))))
|
||||
return common.BytesToHash(crypto.Keccak256([]byte(fmt.Sprintf("%v(%v)", e.Name, strings.Join(types, ",")))))
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@ func TestEventId(t *testing.T) {
|
|||
{ "type" : "event", "name" : "check", "inputs": [{ "name" : "t", "type": "address" }, { "name": "b", "type": "uint256" }] }
|
||||
]`,
|
||||
expectations: map[string]common.Hash{
|
||||
"balance": crypto.Sha3Hash([]byte("balance(uint256)")),
|
||||
"check": crypto.Sha3Hash([]byte("check(address,uint256)")),
|
||||
"balance": crypto.Keccak256Hash([]byte("balance(uint256)")),
|
||||
"check": crypto.Keccak256Hash([]byte("check(address,uint256)")),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -72,5 +72,5 @@ func (m Method) String() string {
|
|||
}
|
||||
|
||||
func (m Method) Id() []byte {
|
||||
return crypto.Sha3([]byte(m.Sig()))[:4]
|
||||
return crypto.Keccak256([]byte(m.Sig()))[:4]
|
||||
}
|
||||
|
|
|
@ -396,7 +396,7 @@ multiply7 = Multiply7.at(contractaddress);
|
|||
if sol != nil && solcVersion != sol.Version() {
|
||||
modContractInfo := versionRE.ReplaceAll(contractInfo, []byte(`"compilerVersion":"`+sol.Version()+`"`))
|
||||
fmt.Printf("modified contractinfo:\n%s\n", modContractInfo)
|
||||
contentHash = `"` + common.ToHex(crypto.Sha3([]byte(modContractInfo))) + `"`
|
||||
contentHash = `"` + common.ToHex(crypto.Keccak256([]byte(modContractInfo))) + `"`
|
||||
}
|
||||
if checkEvalJSON(t, repl, `filename = "/tmp/info.json"`, `"/tmp/info.json"`) != nil {
|
||||
return
|
||||
|
|
|
@ -48,22 +48,6 @@ func FromHex(s string) []byte {
|
|||
return nil
|
||||
}
|
||||
|
||||
type Bytes []byte
|
||||
|
||||
func (self Bytes) String() string {
|
||||
return string(self)
|
||||
}
|
||||
|
||||
func DeleteFromByteSlice(s [][]byte, hash []byte) [][]byte {
|
||||
for i, h := range s {
|
||||
if bytes.Compare(h, hash) == 0 {
|
||||
return append(s[:i:i], s[i+1:]...)
|
||||
}
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Number to bytes
|
||||
//
|
||||
// Returns the number in bytes with the specified base
|
||||
|
@ -154,7 +138,6 @@ func Hex2Bytes(str string) []byte {
|
|||
}
|
||||
|
||||
func Hex2BytesFixed(str string, flen int) []byte {
|
||||
|
||||
h, _ := hex.DecodeString(str)
|
||||
if len(h) == flen {
|
||||
return h
|
||||
|
@ -167,7 +150,6 @@ func Hex2BytesFixed(str string, flen int) []byte {
|
|||
return hh
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func StringToByteFunc(str string, cb func(str string) []byte) (ret []byte) {
|
||||
|
|
|
@ -27,26 +27,6 @@ type BytesSuite struct{}
|
|||
|
||||
var _ = checker.Suite(&BytesSuite{})
|
||||
|
||||
func (s *BytesSuite) TestByteString(c *checker.C) {
|
||||
var data Bytes
|
||||
data = []byte{102, 111, 111}
|
||||
exp := "foo"
|
||||
res := data.String()
|
||||
|
||||
c.Assert(res, checker.Equals, exp)
|
||||
}
|
||||
|
||||
/*
|
||||
func (s *BytesSuite) TestDeleteFromByteSlice(c *checker.C) {
|
||||
data := []byte{1, 2, 3, 4}
|
||||
slice := []byte{1, 2, 3, 4}
|
||||
exp := []byte{1, 4}
|
||||
res := DeleteFromByteSlice(data, slice)
|
||||
|
||||
c.Assert(res, checker.DeepEquals, exp)
|
||||
}
|
||||
|
||||
*/
|
||||
func (s *BytesSuite) TestNumberToBytes(c *checker.C) {
|
||||
// data1 := int(1)
|
||||
// res1 := NumberToBytes(data1, 16)
|
||||
|
|
|
@ -220,7 +220,7 @@ func SaveInfo(info *ContractInfo, filename string) (contenthash common.Hash, err
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
contenthash = common.BytesToHash(crypto.Sha3(infojson))
|
||||
contenthash = common.BytesToHash(crypto.Keccak256(infojson))
|
||||
err = ioutil.WriteFile(filename, infojson, 0600)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -74,7 +74,7 @@ func (self *HTTPClient) GetAuthContent(uri string, hash common.Hash) ([]byte, er
|
|||
}
|
||||
|
||||
// check hash to authenticate content
|
||||
chash := crypto.Sha3Hash(content)
|
||||
chash := crypto.Keccak256Hash(content)
|
||||
if chash != hash {
|
||||
return nil, fmt.Errorf("content hash mismatch %x != %x (exp)", hash[:], chash[:])
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ func TestGetAuthContent(t *testing.T) {
|
|||
client := New(dir)
|
||||
|
||||
text := "test"
|
||||
hash := crypto.Sha3Hash([]byte(text))
|
||||
hash := crypto.Keccak256Hash([]byte(text))
|
||||
if err := ioutil.WriteFile(path.Join(dir, "test.content"), []byte(text), os.ModePerm); err != nil {
|
||||
t.Fatal("could not write test file", err)
|
||||
}
|
||||
|
|
|
@ -115,7 +115,7 @@ func FetchDocsForContract(contractAddress string, xeth *xeth.XEth, client *httpc
|
|||
err = fmt.Errorf("contract (%v) not found", contractAddress)
|
||||
return
|
||||
}
|
||||
codehash := common.BytesToHash(crypto.Sha3(codeb))
|
||||
codehash := common.BytesToHash(crypto.Keccak256(codeb))
|
||||
// set up nameresolver with natspecreg + urlhint contract addresses
|
||||
reg := registrar.New(xeth)
|
||||
|
||||
|
@ -197,7 +197,7 @@ type userDoc struct {
|
|||
func (self *NatSpec) makeAbi2method(abiKey [8]byte) (meth *method) {
|
||||
for signature, m := range self.userDoc.Methods {
|
||||
name := strings.Split(signature, "(")[0]
|
||||
hash := []byte(common.Bytes2Hex(crypto.Sha3([]byte(signature))))
|
||||
hash := []byte(common.Bytes2Hex(crypto.Keccak256([]byte(signature))))
|
||||
var key [8]byte
|
||||
copy(key[:], hash[:8])
|
||||
if bytes.Equal(key[:], abiKey[:]) {
|
||||
|
|
|
@ -238,11 +238,11 @@ func TestNatspecE2E(t *testing.T) {
|
|||
// create a contractInfo file (mock cloud-deployed contract metadocs)
|
||||
// incidentally this is the info for the HashReg contract itself
|
||||
ioutil.WriteFile("/tmp/"+testFileName, []byte(testContractInfo), os.ModePerm)
|
||||
dochash := crypto.Sha3Hash([]byte(testContractInfo))
|
||||
dochash := crypto.Keccak256Hash([]byte(testContractInfo))
|
||||
|
||||
// take the codehash for the contract we wanna test
|
||||
codeb := tf.xeth.CodeAtBytes(registrar.HashRegAddr)
|
||||
codehash := crypto.Sha3Hash(codeb)
|
||||
codehash := crypto.Keccak256Hash(codeb)
|
||||
|
||||
reg := registrar.New(tf.xeth)
|
||||
_, err := reg.SetHashToHash(addr, codehash, dochash)
|
||||
|
|
|
@ -1,253 +0,0 @@
|
|||
package natspec
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/docserver"
|
||||
"github.com/ethereum/go-ethereum/common/registrar"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
xe "github.com/ethereum/go-ethereum/xeth"
|
||||
)
|
||||
|
||||
const (
|
||||
testBalance = "10000000000000000000"
|
||||
|
||||
testFileName = "long_file_name_for_testing_registration_of_URLs_longer_than_32_bytes.content"
|
||||
|
||||
testNotice = "Register key `utils.toHex(_key)` <- content `utils.toHex(_content)`"
|
||||
|
||||
testExpNotice = "Register key 0xadd1a7d961cff0242089674ec2ef6fca671ab15e1fe80e38859fc815b98d88ab <- content 0xb3a2dea218de5d8bbe6c4645aadbf67b5ab00ecb1a9ec95dbdad6a0eed3e41a7"
|
||||
|
||||
testExpNotice2 = `About to submit transaction (NatSpec notice error: abi key does not match any method): {"params":[{"to":"%s","data": "0x31e12c20"}]}`
|
||||
|
||||
testExpNotice3 = `About to submit transaction (no NatSpec info found for contract: content hash not found for '0x1392c62d05b2d149e22a339c531157ae06b44d39a674cce500064b12b9aeb019'): {"params":[{"to":"%s","data": "0x300a3bbfb3a2dea218de5d8bbe6c4645aadbf67b5ab00ecb1a9ec95dbdad6a0eed3e41a7000000000000000000000000000000000000000000000000000000000000000000000000000000000000000066696c653a2f2f2f746573742e636f6e74656e74"}]}`
|
||||
)
|
||||
|
||||
const (
|
||||
testUserDoc = `
|
||||
{
|
||||
"methods": {
|
||||
"register(uint256,uint256)": {
|
||||
"notice": "` + testNotice + `"
|
||||
}
|
||||
},
|
||||
"invariants": [
|
||||
{ "notice": "" }
|
||||
],
|
||||
"construction": [
|
||||
{ "notice": "" }
|
||||
]
|
||||
}
|
||||
`
|
||||
testAbiDefinition = `
|
||||
[{
|
||||
"name": "register",
|
||||
"constant": false,
|
||||
"type": "function",
|
||||
"inputs": [{
|
||||
"name": "_key",
|
||||
"type": "uint256"
|
||||
}, {
|
||||
"name": "_content",
|
||||
"type": "uint256"
|
||||
}],
|
||||
"outputs": []
|
||||
}]
|
||||
`
|
||||
|
||||
testContractInfo = `
|
||||
{
|
||||
"userDoc": ` + testUserDoc + `,
|
||||
"abiDefinition": ` + testAbiDefinition + `
|
||||
}
|
||||
`
|
||||
)
|
||||
|
||||
type testFrontend struct {
|
||||
t *testing.T
|
||||
ethereum *eth.Ethereum
|
||||
xeth *xe.XEth
|
||||
coinbase common.Address
|
||||
stateDb *state.StateDB
|
||||
txc uint64
|
||||
lastConfirm string
|
||||
wantNatSpec bool
|
||||
}
|
||||
|
||||
func (self *testFrontend) UnlockAccount(acc []byte) bool {
|
||||
self.ethereum.AccountManager().Unlock(common.BytesToAddress(acc), "password")
|
||||
return true
|
||||
}
|
||||
|
||||
func (self *testFrontend) ConfirmTransaction(tx string) bool {
|
||||
if self.wantNatSpec {
|
||||
ds := docserver.New("/tmp/")
|
||||
self.lastConfirm = GetNotice(self.xeth, tx, ds)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func testEth(t *testing.T) (ethereum *eth.Ethereum, err error) {
|
||||
|
||||
os.RemoveAll("/tmp/eth-natspec/")
|
||||
|
||||
err = os.MkdirAll("/tmp/eth-natspec/keystore", os.ModePerm)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// create a testAddress
|
||||
ks := crypto.NewKeyStorePassphrase("/tmp/eth-natspec/keystore", crypto.LightScryptN, crypto.LightScryptP)
|
||||
am := accounts.NewManager(ks)
|
||||
testAccount, err := am.NewAccount("password")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
testAddress := strings.TrimPrefix(testAccount.Address.Hex(), "0x")
|
||||
|
||||
// set up mock genesis with balance on the testAddress
|
||||
core.GenesisAccounts = []byte(`{
|
||||
"` + testAddress + `": {"balance": "` + testBalance + `"}
|
||||
}`)
|
||||
|
||||
// only use minimalistic stack with no networking
|
||||
ethereum, err = eth.New(ð.Config{
|
||||
DataDir: "/tmp/eth-natspec",
|
||||
AccountManager: am,
|
||||
MaxPeers: 0,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func testInit(t *testing.T) (self *testFrontend) {
|
||||
// initialise and start minimal ethereum stack
|
||||
ethereum, err := testEth(t)
|
||||
if err != nil {
|
||||
t.Errorf("error creating ethereum: %v", err)
|
||||
return
|
||||
}
|
||||
err = ethereum.Start()
|
||||
if err != nil {
|
||||
t.Errorf("error starting ethereum: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// mock frontend
|
||||
self = &testFrontend{t: t, ethereum: ethereum}
|
||||
self.xeth = xe.New(ethereum, self)
|
||||
|
||||
addr, _ := ethereum.Etherbase()
|
||||
self.coinbase = addr
|
||||
self.stateDb = self.ethereum.ChainManager().State().Copy()
|
||||
|
||||
// initialise the registry contracts
|
||||
reg := registrar.New(self.xeth)
|
||||
err = reg.SetHashReg("", addr)
|
||||
if err != nil {
|
||||
t.Errorf("error creating HashReg: %v", err)
|
||||
}
|
||||
err = reg.SetUrlHint("", addr)
|
||||
if err != nil {
|
||||
t.Errorf("error creating UrlHint: %v", err)
|
||||
}
|
||||
self.applyTxs()
|
||||
|
||||
return
|
||||
|
||||
}
|
||||
|
||||
// this is needed for transaction to be applied to the state in testing
|
||||
// the heavy lifing is done in XEth.ApplyTestTxs
|
||||
// this is fragile,
|
||||
// and does process leaking since xeth loops cannot quit safely
|
||||
// should be replaced by proper mining with testDAG for easy full integration tests
|
||||
func (self *testFrontend) applyTxs() {
|
||||
self.txc, self.xeth = self.xeth.ApplyTestTxs(self.stateDb, self.coinbase, self.txc)
|
||||
return
|
||||
}
|
||||
|
||||
// end to end test
|
||||
func TestNatspecE2E(t *testing.T) {
|
||||
t.Skip()
|
||||
|
||||
tf := testInit(t)
|
||||
defer tf.ethereum.Stop()
|
||||
|
||||
// create a contractInfo file (mock cloud-deployed contract metadocs)
|
||||
// incidentally this is the info for the registry contract itself
|
||||
ioutil.WriteFile("/tmp/"+testFileName, []byte(testContractInfo), os.ModePerm)
|
||||
dochash := common.BytesToHash(crypto.Sha3([]byte(testContractInfo)))
|
||||
|
||||
// take the codehash for the contract we wanna test
|
||||
// codehex := tf.xeth.CodeAt(registar.HashRegAddr)
|
||||
codeb := tf.xeth.CodeAtBytes(registrar.HashRegAddr)
|
||||
codehash := common.BytesToHash(crypto.Sha3(codeb))
|
||||
|
||||
// use resolver to register codehash->dochash->url
|
||||
// test if globalregistry works
|
||||
// registrar.HashRefAddr = "0x0"
|
||||
// registrar.UrlHintAddr = "0x0"
|
||||
reg := registrar.New(tf.xeth)
|
||||
_, err := reg.SetHashToHash(tf.coinbase, codehash, dochash)
|
||||
if err != nil {
|
||||
t.Errorf("error registering: %v", err)
|
||||
}
|
||||
_, err = reg.SetUrlToHash(tf.coinbase, dochash, "file:///"+testFileName)
|
||||
if err != nil {
|
||||
t.Errorf("error registering: %v", err)
|
||||
}
|
||||
// apply txs to the state
|
||||
tf.applyTxs()
|
||||
|
||||
// NatSpec info for register method of HashReg contract installed
|
||||
// now using the same transactions to check confirm messages
|
||||
|
||||
tf.wantNatSpec = true // this is set so now the backend uses natspec confirmation
|
||||
_, err = reg.SetHashToHash(tf.coinbase, codehash, dochash)
|
||||
if err != nil {
|
||||
t.Errorf("error calling contract registry: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("GlobalRegistrar: %v, HashReg: %v, UrlHint: %v\n", registrar.GlobalRegistrarAddr, registrar.HashRegAddr, registrar.UrlHintAddr)
|
||||
if tf.lastConfirm != testExpNotice {
|
||||
t.Errorf("Wrong confirm message. expected '%v', got '%v'", testExpNotice, tf.lastConfirm)
|
||||
}
|
||||
|
||||
// test unknown method
|
||||
exp := fmt.Sprintf(testExpNotice2, registrar.HashRegAddr)
|
||||
_, err = reg.SetOwner(tf.coinbase)
|
||||
if err != nil {
|
||||
t.Errorf("error setting owner: %v", err)
|
||||
}
|
||||
|
||||
if tf.lastConfirm != exp {
|
||||
t.Errorf("Wrong confirm message, expected '%v', got '%v'", exp, tf.lastConfirm)
|
||||
}
|
||||
|
||||
// test unknown contract
|
||||
exp = fmt.Sprintf(testExpNotice3, registrar.UrlHintAddr)
|
||||
|
||||
_, err = reg.SetUrlToHash(tf.coinbase, dochash, "file:///test.content")
|
||||
if err != nil {
|
||||
t.Errorf("error registering: %v", err)
|
||||
}
|
||||
|
||||
if tf.lastConfirm != exp {
|
||||
t.Errorf("Wrong confirm message, expected '%v', got '%v'", exp, tf.lastConfirm)
|
||||
}
|
||||
|
||||
}
|
|
@ -86,7 +86,7 @@ func (api *PrivateRegistarAPI) Register(sender common.Address, addr common.Addre
|
|||
}
|
||||
|
||||
codeb := state.GetCode(addr)
|
||||
codeHash := common.BytesToHash(crypto.Sha3(codeb))
|
||||
codeHash := common.BytesToHash(crypto.Keccak256(codeb))
|
||||
contentHash := common.HexToHash(contentHashHex)
|
||||
|
||||
_, err = registrar.New(api.be).SetHashToHash(sender, codeHash, contentHash)
|
||||
|
|
|
@ -68,7 +68,7 @@ const (
|
|||
)
|
||||
|
||||
func abiSignature(s string) string {
|
||||
return common.ToHex(crypto.Sha3([]byte(s))[:4])
|
||||
return common.ToHex(crypto.Keccak256([]byte(s))[:4])
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -401,7 +401,7 @@ func storageMapping(addr, key []byte) []byte {
|
|||
data := make([]byte, 64)
|
||||
copy(data[0:32], key[0:32])
|
||||
copy(data[32:64], addr[0:32])
|
||||
sha := crypto.Sha3(data)
|
||||
sha := crypto.Keccak256(data)
|
||||
return sha
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ type testBackend struct {
|
|||
var (
|
||||
text = "test"
|
||||
codehash = common.StringToHash("1234")
|
||||
hash = common.BytesToHash(crypto.Sha3([]byte(text)))
|
||||
hash = common.BytesToHash(crypto.Keccak256([]byte(text)))
|
||||
url = "bzz://bzzhash/my/path/contr.act"
|
||||
)
|
||||
|
||||
|
|
|
@ -31,8 +31,8 @@ const (
|
|||
tokenToken = 0xff
|
||||
)
|
||||
|
||||
var empty = crypto.Sha3([]byte(""))
|
||||
var emptyList = crypto.Sha3([]byte{0x80})
|
||||
var empty = crypto.Keccak256([]byte(""))
|
||||
var emptyList = crypto.Keccak256([]byte{0x80})
|
||||
|
||||
func Decompress(dat []byte) ([]byte, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
|
|
@ -67,8 +67,8 @@ func (s *CompressionRleSuite) TestDecompressSimple(c *checker.C) {
|
|||
// }
|
||||
|
||||
// var exp []byte
|
||||
// exp = append(exp, crypto.Sha3([]byte(""))...)
|
||||
// exp = append(exp, crypto.Sha3([]byte{0x80})...)
|
||||
// exp = append(exp, crypto.Keccak256([]byte(""))...)
|
||||
// exp = append(exp, crypto.Keccak256([]byte{0x80})...)
|
||||
// exp = append(exp, make([]byte, 10)...)
|
||||
|
||||
// if bytes.Compare(res, res) != 0 {
|
||||
|
@ -82,12 +82,12 @@ func (s *CompressionRleSuite) TestDecompressSimple(c *checker.C) {
|
|||
// t.Error("5 * zero", res)
|
||||
// }
|
||||
|
||||
// res = Compress(crypto.Sha3([]byte("")))
|
||||
// res = Compress(crypto.Keccak256([]byte("")))
|
||||
// if bytes.Compare(res, []byte{token, emptyShaToken}) != 0 {
|
||||
// t.Error("empty sha", res)
|
||||
// }
|
||||
|
||||
// res = Compress(crypto.Sha3([]byte{0x80}))
|
||||
// res = Compress(crypto.Keccak256([]byte{0x80}))
|
||||
// if bytes.Compare(res, []byte{token, emptyListShaToken}) != 0 {
|
||||
// t.Error("empty list sha", res)
|
||||
// }
|
||||
|
@ -100,8 +100,8 @@ func (s *CompressionRleSuite) TestDecompressSimple(c *checker.C) {
|
|||
|
||||
// func TestCompressMulti(t *testing.T) {
|
||||
// in := []byte{0, 0, 0, 0, 0}
|
||||
// in = append(in, crypto.Sha3([]byte(""))...)
|
||||
// in = append(in, crypto.Sha3([]byte{0x80})...)
|
||||
// in = append(in, crypto.Keccak256([]byte(""))...)
|
||||
// in = append(in, crypto.Keccak256([]byte{0x80})...)
|
||||
// in = append(in, token)
|
||||
// res := Compress(in)
|
||||
|
||||
|
@ -116,8 +116,8 @@ func (s *CompressionRleSuite) TestDecompressSimple(c *checker.C) {
|
|||
|
||||
// for i := 0; i < 20; i++ {
|
||||
// in = append(in, []byte{0, 0, 0, 0, 0}...)
|
||||
// in = append(in, crypto.Sha3([]byte(""))...)
|
||||
// in = append(in, crypto.Sha3([]byte{0x80})...)
|
||||
// in = append(in, crypto.Keccak256([]byte(""))...)
|
||||
// in = append(in, crypto.Keccak256([]byte{0x80})...)
|
||||
// in = append(in, []byte{123, 2, 19, 89, 245, 254, 255, token, 98, 233}...)
|
||||
// in = append(in, token)
|
||||
// }
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/trie"
|
||||
)
|
||||
|
||||
var emptyCodeHash = crypto.Sha3(nil)
|
||||
var emptyCodeHash = crypto.Keccak256(nil)
|
||||
|
||||
type Code []byte
|
||||
|
||||
|
@ -225,7 +225,7 @@ func (self *StateObject) Code() []byte {
|
|||
|
||||
func (self *StateObject) SetCode(code []byte) {
|
||||
self.code = code
|
||||
self.codeHash = crypto.Sha3(code)
|
||||
self.codeHash = crypto.Keccak256(code)
|
||||
self.dirty = true
|
||||
}
|
||||
|
||||
|
|
|
@ -101,7 +101,7 @@ func LogsBloom(logs vm.Logs) *big.Int {
|
|||
}
|
||||
|
||||
func bloom9(b []byte) *big.Int {
|
||||
b = crypto.Sha3(b[:])
|
||||
b = crypto.Keccak256(b[:])
|
||||
|
||||
r := new(big.Int)
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ func TestBloom9(t *testing.T) {
|
|||
func TestAddress(t *testing.T) {
|
||||
block := &Block{}
|
||||
block.Coinbase = common.Hex2Bytes("22341ae42d6dd7384bc8584e50419ea3ac75b83f")
|
||||
fmt.Printf("%x\n", crypto.Sha3(block.Coinbase))
|
||||
fmt.Printf("%x\n", crypto.Keccak256(block.Coinbase))
|
||||
|
||||
bin := CreateBloom(block)
|
||||
fmt.Printf("bin = %x\n", common.LeftPadBytes(bin, 64))
|
||||
|
|
|
@ -202,7 +202,7 @@ func doFrom(tx *Transaction, homestead bool) (common.Address, error) {
|
|||
return common.Address{}, err
|
||||
}
|
||||
var addr common.Address
|
||||
copy(addr[:], crypto.Sha3(pubkey[1:])[12:])
|
||||
copy(addr[:], crypto.Keccak256(pubkey[1:])[12:])
|
||||
tx.from.Store(addr)
|
||||
return addr, nil
|
||||
}
|
||||
|
|
|
@ -111,7 +111,7 @@ func ecrecoverFunc(in []byte) []byte {
|
|||
}
|
||||
|
||||
// the first byte of pubkey is bitcoin heritage
|
||||
return common.LeftPadBytes(crypto.Sha3(pubKey[1:])[12:], 32)
|
||||
return common.LeftPadBytes(crypto.Keccak256(pubKey[1:])[12:], 32)
|
||||
}
|
||||
|
||||
func memCpy(in []byte) []byte {
|
||||
|
|
|
@ -316,7 +316,7 @@ func opMulmod(instr instruction, pc *uint64, env Environment, contract *Contract
|
|||
|
||||
func opSha3(instr instruction, pc *uint64, env Environment, contract *Contract, memory *Memory, stack *stack) {
|
||||
offset, size := stack.pop(), stack.pop()
|
||||
hash := crypto.Sha3(memory.Get(offset.Int64(), size.Int64()))
|
||||
hash := crypto.Keccak256(memory.Get(offset.Int64(), size.Int64()))
|
||||
|
||||
stack.push(common.BytesToBig(hash))
|
||||
}
|
||||
|
|
|
@ -96,7 +96,7 @@ type Program struct {
|
|||
// NewProgram returns a new JIT program
|
||||
func NewProgram(code []byte) *Program {
|
||||
program := &Program{
|
||||
Id: crypto.Sha3Hash(code),
|
||||
Id: crypto.Keccak256Hash(code),
|
||||
mapping: make(map[uint64]uint64),
|
||||
destinations: make(map[uint64]struct{}),
|
||||
code: code,
|
||||
|
|
|
@ -189,7 +189,7 @@ func (self *Env) Db() Database { return nil }
|
|||
func (self *Env) GasLimit() *big.Int { return self.gasLimit }
|
||||
func (self *Env) VmType() Type { return StdVmTy }
|
||||
func (self *Env) GetHash(n uint64) common.Hash {
|
||||
return common.BytesToHash(crypto.Sha3([]byte(big.NewInt(int64(n)).String())))
|
||||
return common.BytesToHash(crypto.Keccak256([]byte(big.NewInt(int64(n)).String())))
|
||||
}
|
||||
func (self *Env) AddLog(log *Log) {
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ func setDefaults(cfg *Config) {
|
|||
}
|
||||
if cfg.GetHashFn == nil {
|
||||
cfg.GetHashFn = func(n uint64) common.Hash {
|
||||
return common.BytesToHash(crypto.Sha3([]byte(new(big.Int).SetUint64(n).String())))
|
||||
return common.BytesToHash(crypto.Keccak256([]byte(new(big.Int).SetUint64(n).String())))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,7 +58,7 @@ func (self *Vm) Run(contract *Contract, input []byte) (ret []byte, err error) {
|
|||
}
|
||||
|
||||
var (
|
||||
codehash = crypto.Sha3Hash(contract.Code) // codehash is used when doing jump dest caching
|
||||
codehash = crypto.Keccak256Hash(contract.Code) // codehash is used when doing jump dest caching
|
||||
program *Program
|
||||
)
|
||||
if EnableJit {
|
||||
|
|
|
@ -200,7 +200,7 @@ func (self *JitVm) Run(me, caller ContextRef, code []byte, value, gas, price *bi
|
|||
self.data.timestamp = self.env.Time()
|
||||
self.data.code = getDataPtr(code)
|
||||
self.data.codeSize = uint64(len(code))
|
||||
self.data.codeHash = hash2llvm(crypto.Sha3(code)) // TODO: Get already computed hash?
|
||||
self.data.codeHash = hash2llvm(crypto.Keccak256(code)) // TODO: Get already computed hash?
|
||||
|
||||
jit := C.evmjit_create()
|
||||
retCode := C.evmjit_run(jit, unsafe.Pointer(&self.data), unsafe.Pointer(self))
|
||||
|
@ -242,7 +242,7 @@ func (self *JitVm) Env() Environment {
|
|||
//export env_sha3
|
||||
func env_sha3(dataPtr *byte, length uint64, resultPtr unsafe.Pointer) {
|
||||
data := llvm2bytesRef(dataPtr, length)
|
||||
hash := crypto.Sha3(data)
|
||||
hash := crypto.Keccak256(data)
|
||||
result := (*i256)(resultPtr)
|
||||
*result = hash2llvm(hash)
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ import (
|
|||
"golang.org/x/crypto/ripemd160"
|
||||
)
|
||||
|
||||
func Sha3(data ...[]byte) []byte {
|
||||
func Keccak256(data ...[]byte) []byte {
|
||||
d := sha3.NewKeccak256()
|
||||
for _, b := range data {
|
||||
d.Write(b)
|
||||
|
@ -51,7 +51,7 @@ func Sha3(data ...[]byte) []byte {
|
|||
return d.Sum(nil)
|
||||
}
|
||||
|
||||
func Sha3Hash(data ...[]byte) (h common.Hash) {
|
||||
func Keccak256Hash(data ...[]byte) (h common.Hash) {
|
||||
d := sha3.NewKeccak256()
|
||||
for _, b := range data {
|
||||
d.Write(b)
|
||||
|
@ -60,11 +60,14 @@ func Sha3Hash(data ...[]byte) (h common.Hash) {
|
|||
return h
|
||||
}
|
||||
|
||||
// Deprecated: For backward compatibility as other packages depend on these
|
||||
func Sha3(data ...[]byte) []byte { return Keccak256(data...) }
|
||||
func Sha3Hash(data ...[]byte) common.Hash { return Keccak256Hash(data...) }
|
||||
|
||||
// Creates an ethereum address given the bytes and the nonce
|
||||
func CreateAddress(b common.Address, nonce uint64) common.Address {
|
||||
data, _ := rlp.EncodeToBytes([]interface{}{b, nonce})
|
||||
return common.BytesToAddress(Sha3(data)[12:])
|
||||
//return Sha3(common.NewValue([]interface{}{b, nonce}).Encode())[12:]
|
||||
return common.BytesToAddress(Keccak256(data)[12:])
|
||||
}
|
||||
|
||||
func Sha256(data []byte) []byte {
|
||||
|
@ -265,7 +268,7 @@ func decryptPreSaleKey(fileContent []byte, password string) (key *Key, err error
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ethPriv := Sha3(plainText)
|
||||
ethPriv := Keccak256(plainText)
|
||||
ecKey := ToECDSA(ethPriv)
|
||||
key = &Key{
|
||||
Id: nil,
|
||||
|
@ -330,7 +333,7 @@ func PKCS7Unpad(in []byte) []byte {
|
|||
|
||||
func PubkeyToAddress(p ecdsa.PublicKey) common.Address {
|
||||
pubBytes := FromECDSAPub(&p)
|
||||
return common.BytesToAddress(Sha3(pubBytes[1:])[12:])
|
||||
return common.BytesToAddress(Keccak256(pubBytes[1:])[12:])
|
||||
}
|
||||
|
||||
func zeroBytes(bytes []byte) {
|
||||
|
|
|
@ -40,13 +40,13 @@ var testPrivHex = "289c2857d4598e37fb9647507e47a309d6133539bf21a8b9cb6df88fd5232
|
|||
func TestSha3(t *testing.T) {
|
||||
msg := []byte("abc")
|
||||
exp, _ := hex.DecodeString("4e03657aea45a94fc7d47ba826c8d667c0d1e6e33a64a036ec44f58fa12d6c45")
|
||||
checkhash(t, "Sha3-256", func(in []byte) []byte { return Sha3(in) }, msg, exp)
|
||||
checkhash(t, "Sha3-256", func(in []byte) []byte { return Keccak256(in) }, msg, exp)
|
||||
}
|
||||
|
||||
func TestSha3Hash(t *testing.T) {
|
||||
msg := []byte("abc")
|
||||
exp, _ := hex.DecodeString("4e03657aea45a94fc7d47ba826c8d667c0d1e6e33a64a036ec44f58fa12d6c45")
|
||||
checkhash(t, "Sha3-256-array", func(in []byte) []byte { h := Sha3Hash(in); return h[:] }, msg, exp)
|
||||
checkhash(t, "Sha3-256-array", func(in []byte) []byte { h := Keccak256Hash(in); return h[:] }, msg, exp)
|
||||
}
|
||||
|
||||
func TestSha256(t *testing.T) {
|
||||
|
@ -66,7 +66,7 @@ func BenchmarkSha3(b *testing.B) {
|
|||
amount := 1000000
|
||||
start := time.Now()
|
||||
for i := 0; i < amount; i++ {
|
||||
Sha3(a)
|
||||
Keccak256(a)
|
||||
}
|
||||
|
||||
fmt.Println(amount, ":", time.Since(start))
|
||||
|
@ -84,7 +84,7 @@ func TestSign(t *testing.T) {
|
|||
key, _ := HexToECDSA(testPrivHex)
|
||||
addr := common.HexToAddress(testAddrHex)
|
||||
|
||||
msg := Sha3([]byte("foo"))
|
||||
msg := Keccak256([]byte("foo"))
|
||||
sig, err := Sign(msg, key)
|
||||
if err != nil {
|
||||
t.Errorf("Sign error: %s", err)
|
||||
|
@ -238,7 +238,7 @@ func TestPythonIntegration(t *testing.T) {
|
|||
k0, _ := HexToECDSA(kh)
|
||||
k1 := FromECDSA(k0)
|
||||
|
||||
msg0 := Sha3([]byte("foo"))
|
||||
msg0 := Keccak256([]byte("foo"))
|
||||
sig0, _ := secp256k1.Sign(msg0, k1)
|
||||
|
||||
msg1 := common.FromHex("00000000000000000000000000000000")
|
||||
|
|
|
@ -110,7 +110,7 @@ func (ks keyStorePassphrase) StoreKey(key *Key, auth string) (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
mac := Sha3(derivedKey[16:32], cipherText)
|
||||
mac := Keccak256(derivedKey[16:32], cipherText)
|
||||
|
||||
scryptParamsJSON := make(map[string]interface{}, 5)
|
||||
scryptParamsJSON["n"] = ks.scryptN
|
||||
|
@ -210,7 +210,7 @@ func decryptKeyV3(keyProtected *encryptedKeyJSONV3, auth string) (keyBytes []byt
|
|||
return nil, nil, err
|
||||
}
|
||||
|
||||
calculatedMAC := Sha3(derivedKey[16:32], cipherText)
|
||||
calculatedMAC := Keccak256(derivedKey[16:32], cipherText)
|
||||
if !bytes.Equal(calculatedMAC, mac) {
|
||||
return nil, nil, errors.New("Decryption failed: MAC mismatch")
|
||||
}
|
||||
|
@ -244,12 +244,12 @@ func decryptKeyV1(keyProtected *encryptedKeyJSONV1, auth string) (keyBytes []byt
|
|||
return nil, nil, err
|
||||
}
|
||||
|
||||
calculatedMAC := Sha3(derivedKey[16:32], cipherText)
|
||||
calculatedMAC := Keccak256(derivedKey[16:32], cipherText)
|
||||
if !bytes.Equal(calculatedMAC, mac) {
|
||||
return nil, nil, errors.New("Decryption failed: MAC mismatch")
|
||||
}
|
||||
|
||||
plainText, err := aesCBCDecrypt(Sha3(derivedKey[:16])[:16], cipherText, iv)
|
||||
plainText, err := aesCBCDecrypt(Keccak256(derivedKey[:16])[:16], cipherText, iv)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
|
|
@ -19,8 +19,6 @@ package randentropy
|
|||
import (
|
||||
crand "crypto/rand"
|
||||
"io"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||
)
|
||||
|
||||
var Reader io.Reader = &randEntropy{}
|
||||
|
@ -34,14 +32,6 @@ func (*randEntropy) Read(bytes []byte) (n int, err error) {
|
|||
return len(bytes), nil
|
||||
}
|
||||
|
||||
// TODO: copied from crypto.go , move to sha3 package?
|
||||
func Sha3(data []byte) []byte {
|
||||
d := sha3.NewKeccak256()
|
||||
d.Write(data)
|
||||
|
||||
return d.Sum(nil)
|
||||
}
|
||||
|
||||
func GetEntropyCSPRNG(n int) []byte {
|
||||
mainBuff := make([]byte, n)
|
||||
_, err := io.ReadFull(crand.Reader, mainBuff)
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,22 @@
|
|||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
|
@ -0,0 +1,66 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package sha3 implements the SHA-3 fixed-output-length hash functions and
|
||||
// the SHAKE variable-output-length hash functions defined by FIPS-202.
|
||||
//
|
||||
// Both types of hash function use the "sponge" construction and the Keccak
|
||||
// permutation. For a detailed specification see http://keccak.noekeon.org/
|
||||
//
|
||||
//
|
||||
// Guidance
|
||||
//
|
||||
// If you aren't sure what function you need, use SHAKE256 with at least 64
|
||||
// bytes of output. The SHAKE instances are faster than the SHA3 instances;
|
||||
// the latter have to allocate memory to conform to the hash.Hash interface.
|
||||
//
|
||||
// If you need a secret-key MAC (message authentication code), prepend the
|
||||
// secret key to the input, hash with SHAKE256 and read at least 32 bytes of
|
||||
// output.
|
||||
//
|
||||
//
|
||||
// Security strengths
|
||||
//
|
||||
// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security
|
||||
// strength against preimage attacks of x bits. Since they only produce "x"
|
||||
// bits of output, their collision-resistance is only "x/2" bits.
|
||||
//
|
||||
// The SHAKE-256 and -128 functions have a generic security strength of 256 and
|
||||
// 128 bits against all attacks, provided that at least 2x bits of their output
|
||||
// is used. Requesting more than 64 or 32 bytes of output, respectively, does
|
||||
// not increase the collision-resistance of the SHAKE functions.
|
||||
//
|
||||
//
|
||||
// The sponge construction
|
||||
//
|
||||
// A sponge builds a pseudo-random function from a public pseudo-random
|
||||
// permutation, by applying the permutation to a state of "rate + capacity"
|
||||
// bytes, but hiding "capacity" of the bytes.
|
||||
//
|
||||
// A sponge starts out with a zero state. To hash an input using a sponge, up
|
||||
// to "rate" bytes of the input are XORed into the sponge's state. The sponge
|
||||
// is then "full" and the permutation is applied to "empty" it. This process is
|
||||
// repeated until all the input has been "absorbed". The input is then padded.
|
||||
// The digest is "squeezed" from the sponge in the same way, except that output
|
||||
// output is copied out instead of input being XORed in.
|
||||
//
|
||||
// A sponge is parameterized by its generic security strength, which is equal
|
||||
// to half its capacity; capacity + rate is equal to the permutation's width.
|
||||
// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means
|
||||
// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2.
|
||||
//
|
||||
//
|
||||
// Recommendations
|
||||
//
|
||||
// The SHAKE functions are recommended for most new uses. They can produce
|
||||
// output of arbitrary length. SHAKE256, with an output length of at least
|
||||
// 64 bytes, provides 256-bit security against all attacks. The Keccak team
|
||||
// recommends it for most applications upgrading from SHA2-512. (NIST chose a
|
||||
// much stronger, but much slower, sponge instance for SHA3-512.)
|
||||
//
|
||||
// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions.
|
||||
// They produce output of the same length, with the same security strengths
|
||||
// against all attacks. This means, in particular, that SHA3-256 only has
|
||||
// 128-bit collision resistance, because its output length is 32 bytes.
|
||||
package sha3
|
|
@ -0,0 +1,68 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sha3
|
||||
|
||||
// This file provides functions for creating instances of the SHA-3
|
||||
// and SHAKE hash functions, as well as utility functions for hashing
|
||||
// bytes.
|
||||
|
||||
import (
|
||||
"hash"
|
||||
)
|
||||
|
||||
// NewKeccak256 creates a new Keccak-256 hash.
|
||||
func NewKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} }
|
||||
|
||||
// New224 creates a new SHA3-224 hash.
|
||||
// Its generic security strength is 224 bits against preimage attacks,
|
||||
// and 112 bits against collision attacks.
|
||||
func New224() hash.Hash { return &state{rate: 144, outputLen: 28, dsbyte: 0x06} }
|
||||
|
||||
// New256 creates a new SHA3-256 hash.
|
||||
// Its generic security strength is 256 bits against preimage attacks,
|
||||
// and 128 bits against collision attacks.
|
||||
func New256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x06} }
|
||||
|
||||
// New384 creates a new SHA3-384 hash.
|
||||
// Its generic security strength is 384 bits against preimage attacks,
|
||||
// and 192 bits against collision attacks.
|
||||
func New384() hash.Hash { return &state{rate: 104, outputLen: 48, dsbyte: 0x06} }
|
||||
|
||||
// New512 creates a new SHA3-512 hash.
|
||||
// Its generic security strength is 512 bits against preimage attacks,
|
||||
// and 256 bits against collision attacks.
|
||||
func New512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x06} }
|
||||
|
||||
// Sum224 returns the SHA3-224 digest of the data.
|
||||
func Sum224(data []byte) (digest [28]byte) {
|
||||
h := New224()
|
||||
h.Write(data)
|
||||
h.Sum(digest[:0])
|
||||
return
|
||||
}
|
||||
|
||||
// Sum256 returns the SHA3-256 digest of the data.
|
||||
func Sum256(data []byte) (digest [32]byte) {
|
||||
h := New256()
|
||||
h.Write(data)
|
||||
h.Sum(digest[:0])
|
||||
return
|
||||
}
|
||||
|
||||
// Sum384 returns the SHA3-384 digest of the data.
|
||||
func Sum384(data []byte) (digest [48]byte) {
|
||||
h := New384()
|
||||
h.Write(data)
|
||||
h.Sum(digest[:0])
|
||||
return
|
||||
}
|
||||
|
||||
// Sum512 returns the SHA3-512 digest of the data.
|
||||
func Sum512(data []byte) (digest [64]byte) {
|
||||
h := New512()
|
||||
h.Write(data)
|
||||
h.Sum(digest[:0])
|
||||
return
|
||||
}
|
|
@ -1,30 +1,6 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sha3
|
||||
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.4
|
||||
|
||||
package sha3
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
)
|
||||
|
||||
func init() {
|
||||
crypto.RegisterHash(crypto.SHA3_224, New224)
|
||||
crypto.RegisterHash(crypto.SHA3_256, New256)
|
||||
crypto.RegisterHash(crypto.SHA3_384, New384)
|
||||
crypto.RegisterHash(crypto.SHA3_512, New512)
|
||||
}
|
|
@ -1,237 +1,193 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package sha3 implements the SHA3 hash algorithm (formerly called Keccak) chosen by NIST in 2012.
|
||||
// This file provides a SHA3 implementation which implements the standard hash.Hash interface.
|
||||
// Writing input data, including padding, and reading output data are computed in this file.
|
||||
// Note that the current implementation can compute the hash of an integral number of bytes only.
|
||||
// This is a consequence of the hash interface in which a buffer of bytes is passed in.
|
||||
// The internals of the Keccak-f function are computed in keccakf.go.
|
||||
// For the detailed specification, refer to the Keccak web site (http://keccak.noekeon.org/).
|
||||
package sha3
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"hash"
|
||||
// spongeDirection indicates the direction bytes are flowing through the sponge.
|
||||
type spongeDirection int
|
||||
|
||||
const (
|
||||
// spongeAbsorbing indicates that the sponge is absorbing input.
|
||||
spongeAbsorbing spongeDirection = iota
|
||||
// spongeSqueezing indicates that the sponge is being squeezed.
|
||||
spongeSqueezing
|
||||
)
|
||||
|
||||
// laneSize is the size in bytes of each "lane" of the internal state of SHA3 (5 * 5 * 8).
|
||||
// Note that changing this size would requires using a type other than uint64 to store each lane.
|
||||
const laneSize = 8
|
||||
const (
|
||||
// maxRate is the maximum size of the internal buffer. SHAKE-256
|
||||
// currently needs the largest buffer.
|
||||
maxRate = 168
|
||||
)
|
||||
|
||||
// sliceSize represents the dimensions of the internal state, a square matrix of
|
||||
// sliceSize ** 2 lanes. This is the size of both the "rows" and "columns" dimensions in the
|
||||
// terminology of the SHA3 specification.
|
||||
const sliceSize = 5
|
||||
type state struct {
|
||||
// Generic sponge components.
|
||||
a [25]uint64 // main state of the hash
|
||||
buf []byte // points into storage
|
||||
rate int // the number of bytes of state to use
|
||||
|
||||
// numLanes represents the total number of lanes in the state.
|
||||
const numLanes = sliceSize * sliceSize
|
||||
// dsbyte contains the "domain separation" bits and the first bit of
|
||||
// the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the
|
||||
// SHA-3 and SHAKE functions by appending bitstrings to the message.
|
||||
// Using a little-endian bit-ordering convention, these are "01" for SHA-3
|
||||
// and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the
|
||||
// padding rule from section 5.1 is applied to pad the message to a multiple
|
||||
// of the rate, which involves adding a "1" bit, zero or more "0" bits, and
|
||||
// a final "1" bit. We merge the first "1" bit from the padding into dsbyte,
|
||||
// giving 00000110b (0x06) and 00011111b (0x1f).
|
||||
// [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf
|
||||
// "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and
|
||||
// Extendable-Output Functions (May 2014)"
|
||||
dsbyte byte
|
||||
storage [maxRate]byte
|
||||
|
||||
// stateSize is the size in bytes of the internal state of SHA3 (5 * 5 * WSize).
|
||||
const stateSize = laneSize * numLanes
|
||||
|
||||
// digest represents the partial evaluation of a checksum.
|
||||
// Note that capacity, and not outputSize, is the critical security parameter, as SHA3 can output
|
||||
// an arbitrary number of bytes for any given capacity. The Keccak proposal recommends that
|
||||
// capacity = 2*outputSize to ensure that finding a collision of size outputSize requires
|
||||
// O(2^{outputSize/2}) computations (the birthday lower bound). Future standards may modify the
|
||||
// capacity/outputSize ratio to allow for more output with lower cryptographic security.
|
||||
type digest struct {
|
||||
a [numLanes]uint64 // main state of the hash
|
||||
outputSize int // desired output size in bytes
|
||||
capacity int // number of bytes to leave untouched during squeeze/absorb
|
||||
absorbed int // number of bytes absorbed thus far
|
||||
// Specific to SHA-3 and SHAKE.
|
||||
fixedOutput bool // whether this is a fixed-ouput-length instance
|
||||
outputLen int // the default output size in bytes
|
||||
state spongeDirection // whether the sponge is absorbing or squeezing
|
||||
}
|
||||
|
||||
// minInt returns the lesser of two integer arguments, to simplify the absorption routine.
|
||||
func minInt(v1, v2 int) int {
|
||||
if v1 <= v2 {
|
||||
return v1
|
||||
}
|
||||
return v2
|
||||
}
|
||||
// BlockSize returns the rate of sponge underlying this hash function.
|
||||
func (d *state) BlockSize() int { return d.rate }
|
||||
|
||||
// rate returns the number of bytes of the internal state which can be absorbed or squeezed
|
||||
// in between calls to the permutation function.
|
||||
func (d *digest) rate() int {
|
||||
return stateSize - d.capacity
|
||||
}
|
||||
// Size returns the output size of the hash function in bytes.
|
||||
func (d *state) Size() int { return d.outputLen }
|
||||
|
||||
// Reset clears the internal state by zeroing bytes in the state buffer.
|
||||
// This can be skipped for a newly-created hash state; the default zero-allocated state is correct.
|
||||
func (d *digest) Reset() {
|
||||
d.absorbed = 0
|
||||
// Reset clears the internal state by zeroing the sponge state and
|
||||
// the byte buffer, and setting Sponge.state to absorbing.
|
||||
func (d *state) Reset() {
|
||||
// Zero the permutation's state.
|
||||
for i := range d.a {
|
||||
d.a[i] = 0
|
||||
}
|
||||
d.state = spongeAbsorbing
|
||||
d.buf = d.storage[:0]
|
||||
}
|
||||
|
||||
// BlockSize, required by the hash.Hash interface, does not have a standard intepretation
|
||||
// for a sponge-based construction like SHA3. We return the data rate: the number of bytes which
|
||||
// can be absorbed per invocation of the permutation function. For Merkle-Damgård based hashes
|
||||
// (ie SHA1, SHA2, MD5) the output size of the internal compression function is returned.
|
||||
// We consider this to be roughly equivalent because it represents the number of bytes of output
|
||||
// produced per cryptographic operation.
|
||||
func (d *digest) BlockSize() int { return d.rate() }
|
||||
|
||||
// Size returns the output size of the hash function in bytes.
|
||||
func (d *digest) Size() int {
|
||||
return d.outputSize
|
||||
}
|
||||
|
||||
// unalignedAbsorb is a helper function for Write, which absorbs data that isn't aligned with an
|
||||
// 8-byte lane. This requires shifting the individual bytes into position in a uint64.
|
||||
func (d *digest) unalignedAbsorb(p []byte) {
|
||||
var t uint64
|
||||
for i := len(p) - 1; i >= 0; i-- {
|
||||
t <<= 8
|
||||
t |= uint64(p[i])
|
||||
func (d *state) clone() *state {
|
||||
ret := *d
|
||||
if ret.state == spongeAbsorbing {
|
||||
ret.buf = ret.storage[:len(ret.buf)]
|
||||
} else {
|
||||
ret.buf = ret.storage[d.rate-cap(d.buf) : d.rate]
|
||||
}
|
||||
offset := (d.absorbed) % d.rate()
|
||||
t <<= 8 * uint(offset%laneSize)
|
||||
d.a[offset/laneSize] ^= t
|
||||
d.absorbed += len(p)
|
||||
|
||||
return &ret
|
||||
}
|
||||
|
||||
// Write "absorbs" bytes into the state of the SHA3 hash, updating as needed when the sponge
|
||||
// "fills up" with rate() bytes. Since lanes are stored internally as type uint64, this requires
|
||||
// converting the incoming bytes into uint64s using a little endian interpretation. This
|
||||
// implementation is optimized for large, aligned writes of multiples of 8 bytes (laneSize).
|
||||
// Non-aligned or uneven numbers of bytes require shifting and are slower.
|
||||
func (d *digest) Write(p []byte) (int, error) {
|
||||
// An initial offset is needed if the we aren't absorbing to the first lane initially.
|
||||
offset := d.absorbed % d.rate()
|
||||
toWrite := len(p)
|
||||
// permute applies the KeccakF-1600 permutation. It handles
|
||||
// any input-output buffering.
|
||||
func (d *state) permute() {
|
||||
switch d.state {
|
||||
case spongeAbsorbing:
|
||||
// If we're absorbing, we need to xor the input into the state
|
||||
// before applying the permutation.
|
||||
xorIn(d, d.buf)
|
||||
d.buf = d.storage[:0]
|
||||
keccakF1600(&d.a)
|
||||
case spongeSqueezing:
|
||||
// If we're squeezing, we need to apply the permutatin before
|
||||
// copying more output.
|
||||
keccakF1600(&d.a)
|
||||
d.buf = d.storage[:d.rate]
|
||||
copyOut(d, d.buf)
|
||||
}
|
||||
}
|
||||
|
||||
// The first lane may need to absorb unaligned and/or incomplete data.
|
||||
if (offset%laneSize != 0 || len(p) < 8) && len(p) > 0 {
|
||||
toAbsorb := minInt(laneSize-(offset%laneSize), len(p))
|
||||
d.unalignedAbsorb(p[:toAbsorb])
|
||||
p = p[toAbsorb:]
|
||||
offset = (d.absorbed) % d.rate()
|
||||
// pads appends the domain separation bits in dsbyte, applies
|
||||
// the multi-bitrate 10..1 padding rule, and permutes the state.
|
||||
func (d *state) padAndPermute(dsbyte byte) {
|
||||
if d.buf == nil {
|
||||
d.buf = d.storage[:0]
|
||||
}
|
||||
// Pad with this instance's domain-separator bits. We know that there's
|
||||
// at least one byte of space in d.buf because, if it were full,
|
||||
// permute would have been called to empty it. dsbyte also contains the
|
||||
// first one bit for the padding. See the comment in the state struct.
|
||||
d.buf = append(d.buf, dsbyte)
|
||||
zerosStart := len(d.buf)
|
||||
d.buf = d.storage[:d.rate]
|
||||
for i := zerosStart; i < d.rate; i++ {
|
||||
d.buf[i] = 0
|
||||
}
|
||||
// This adds the final one bit for the padding. Because of the way that
|
||||
// bits are numbered from the LSB upwards, the final bit is the MSB of
|
||||
// the last byte.
|
||||
d.buf[d.rate-1] ^= 0x80
|
||||
// Apply the permutation
|
||||
d.permute()
|
||||
d.state = spongeSqueezing
|
||||
d.buf = d.storage[:d.rate]
|
||||
copyOut(d, d.buf)
|
||||
}
|
||||
|
||||
// For every rate() bytes absorbed, the state must be permuted via the F Function.
|
||||
if (d.absorbed)%d.rate() == 0 {
|
||||
// Write absorbs more data into the hash's state. It produces an error
|
||||
// if more data is written to the ShakeHash after writing
|
||||
func (d *state) Write(p []byte) (written int, err error) {
|
||||
if d.state != spongeAbsorbing {
|
||||
panic("sha3: write to sponge after read")
|
||||
}
|
||||
if d.buf == nil {
|
||||
d.buf = d.storage[:0]
|
||||
}
|
||||
written = len(p)
|
||||
|
||||
for len(p) > 0 {
|
||||
if len(d.buf) == 0 && len(p) >= d.rate {
|
||||
// The fast path; absorb a full "rate" bytes of input and apply the permutation.
|
||||
xorIn(d, p[:d.rate])
|
||||
p = p[d.rate:]
|
||||
keccakF1600(&d.a)
|
||||
} else {
|
||||
// The slow path; buffer the input until we can fill the sponge, and then xor it in.
|
||||
todo := d.rate - len(d.buf)
|
||||
if todo > len(p) {
|
||||
todo = len(p)
|
||||
}
|
||||
d.buf = append(d.buf, p[:todo]...)
|
||||
p = p[todo:]
|
||||
|
||||
// If the sponge is full, apply the permutation.
|
||||
if len(d.buf) == d.rate {
|
||||
d.permute()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This loop should absorb the bulk of the data into full, aligned lanes.
|
||||
// It will call the update function as necessary.
|
||||
for len(p) > 7 {
|
||||
firstLane := offset / laneSize
|
||||
lastLane := minInt(d.rate()/laneSize, firstLane+len(p)/laneSize)
|
||||
|
||||
// This inner loop absorbs input bytes into the state in groups of 8, converted to uint64s.
|
||||
for lane := firstLane; lane < lastLane; lane++ {
|
||||
d.a[lane] ^= binary.LittleEndian.Uint64(p[:laneSize])
|
||||
p = p[laneSize:]
|
||||
}
|
||||
d.absorbed += (lastLane - firstLane) * laneSize
|
||||
// For every rate() bytes absorbed, the state must be permuted via the F Function.
|
||||
if (d.absorbed)%d.rate() == 0 {
|
||||
keccakF1600(&d.a)
|
||||
}
|
||||
|
||||
offset = 0
|
||||
}
|
||||
|
||||
// If there are insufficient bytes to fill the final lane, an unaligned absorption.
|
||||
// This should always start at a correct lane boundary though, or else it would be caught
|
||||
// by the uneven opening lane case above.
|
||||
if len(p) > 0 {
|
||||
d.unalignedAbsorb(p)
|
||||
}
|
||||
|
||||
return toWrite, nil
|
||||
return
|
||||
}
|
||||
|
||||
// pad computes the SHA3 padding scheme based on the number of bytes absorbed.
|
||||
// The padding is a 1 bit, followed by an arbitrary number of 0s and then a final 1 bit, such that
|
||||
// the input bits plus padding bits are a multiple of rate(). Adding the padding simply requires
|
||||
// xoring an opening and closing bit into the appropriate lanes.
|
||||
func (d *digest) pad() {
|
||||
offset := d.absorbed % d.rate()
|
||||
// The opening pad bit must be shifted into position based on the number of bytes absorbed
|
||||
padOpenLane := offset / laneSize
|
||||
d.a[padOpenLane] ^= 0x0000000000000001 << uint(8*(offset%laneSize))
|
||||
// The closing padding bit is always in the last position
|
||||
padCloseLane := (d.rate() / laneSize) - 1
|
||||
d.a[padCloseLane] ^= 0x8000000000000000
|
||||
}
|
||||
|
||||
// finalize prepares the hash to output data by padding and one final permutation of the state.
|
||||
func (d *digest) finalize() {
|
||||
d.pad()
|
||||
keccakF1600(&d.a)
|
||||
}
|
||||
|
||||
// squeeze outputs an arbitrary number of bytes from the hash state.
|
||||
// Squeezing can require multiple calls to the F function (one per rate() bytes squeezed),
|
||||
// although this is not the case for standard SHA3 parameters. This implementation only supports
|
||||
// squeezing a single time, subsequent squeezes may lose alignment. Future implementations
|
||||
// may wish to support multiple squeeze calls, for example to support use as a PRNG.
|
||||
func (d *digest) squeeze(in []byte, toSqueeze int) []byte {
|
||||
// Because we read in blocks of laneSize, we need enough room to read
|
||||
// an integral number of lanes
|
||||
needed := toSqueeze + (laneSize-toSqueeze%laneSize)%laneSize
|
||||
if cap(in)-len(in) < needed {
|
||||
newIn := make([]byte, len(in), len(in)+needed)
|
||||
copy(newIn, in)
|
||||
in = newIn
|
||||
// Read squeezes an arbitrary number of bytes from the sponge.
|
||||
func (d *state) Read(out []byte) (n int, err error) {
|
||||
// If we're still absorbing, pad and apply the permutation.
|
||||
if d.state == spongeAbsorbing {
|
||||
d.padAndPermute(d.dsbyte)
|
||||
}
|
||||
out := in[len(in) : len(in)+needed]
|
||||
|
||||
n = len(out)
|
||||
|
||||
// Now, do the squeezing.
|
||||
for len(out) > 0 {
|
||||
for i := 0; i < d.rate() && len(out) > 0; i += laneSize {
|
||||
binary.LittleEndian.PutUint64(out[:], d.a[i/laneSize])
|
||||
out = out[laneSize:]
|
||||
}
|
||||
if len(out) > 0 {
|
||||
keccakF1600(&d.a)
|
||||
n := copy(out, d.buf)
|
||||
d.buf = d.buf[n:]
|
||||
out = out[n:]
|
||||
|
||||
// Apply the permutation if we've squeezed the sponge dry.
|
||||
if len(d.buf) == 0 {
|
||||
d.permute()
|
||||
}
|
||||
}
|
||||
return in[:len(in)+toSqueeze] // Re-slice in case we wrote extra data.
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Sum applies padding to the hash state and then squeezes out the desired nubmer of output bytes.
|
||||
func (d *digest) Sum(in []byte) []byte {
|
||||
// Make a copy of the original hash so that caller can keep writing and summing.
|
||||
dup := *d
|
||||
dup.finalize()
|
||||
return dup.squeeze(in, dup.outputSize)
|
||||
// Sum applies padding to the hash state and then squeezes out the desired
|
||||
// number of output bytes.
|
||||
func (d *state) Sum(in []byte) []byte {
|
||||
// Make a copy of the original hash so that caller can keep writing
|
||||
// and summing.
|
||||
dup := d.clone()
|
||||
hash := make([]byte, dup.outputLen)
|
||||
dup.Read(hash)
|
||||
return append(in, hash...)
|
||||
}
|
||||
|
||||
// The NewKeccakX constructors enable initializing a hash in any of the four recommend sizes
|
||||
// from the Keccak specification, all of which set capacity=2*outputSize. Note that the final
|
||||
// NIST standard for SHA3 may specify different input/output lengths.
|
||||
// The output size is indicated in bits but converted into bytes internally.
|
||||
func NewKeccak224() hash.Hash { return &digest{outputSize: 224 / 8, capacity: 2 * 224 / 8} }
|
||||
func NewKeccak256() hash.Hash { return &digest{outputSize: 256 / 8, capacity: 2 * 256 / 8} }
|
||||
func NewKeccak384() hash.Hash { return &digest{outputSize: 384 / 8, capacity: 2 * 384 / 8} }
|
||||
func NewKeccak512() hash.Hash { return &digest{outputSize: 512 / 8, capacity: 2 * 512 / 8} }
|
||||
|
|
|
@ -0,0 +1,306 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sha3
|
||||
|
||||
// Tests include all the ShortMsgKATs provided by the Keccak team at
|
||||
// https://github.com/gvanas/KeccakCodePackage
|
||||
//
|
||||
// They only include the zero-bit case of the bitwise testvectors
|
||||
// published by NIST in the draft of FIPS-202.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/flate"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"hash"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
testString = "brekeccakkeccak koax koax"
|
||||
katFilename = "testdata/keccakKats.json.deflate"
|
||||
)
|
||||
|
||||
// Internal-use instances of SHAKE used to test against KATs.
|
||||
func newHashShake128() hash.Hash {
|
||||
return &state{rate: 168, dsbyte: 0x1f, outputLen: 512}
|
||||
}
|
||||
func newHashShake256() hash.Hash {
|
||||
return &state{rate: 136, dsbyte: 0x1f, outputLen: 512}
|
||||
}
|
||||
|
||||
// testDigests contains functions returning hash.Hash instances
|
||||
// with output-length equal to the KAT length for both SHA-3 and
|
||||
// SHAKE instances.
|
||||
var testDigests = map[string]func() hash.Hash{
|
||||
"SHA3-224": New224,
|
||||
"SHA3-256": New256,
|
||||
"SHA3-384": New384,
|
||||
"SHA3-512": New512,
|
||||
"SHAKE128": newHashShake128,
|
||||
"SHAKE256": newHashShake256,
|
||||
}
|
||||
|
||||
// testShakes contains functions that return ShakeHash instances for
|
||||
// testing the ShakeHash-specific interface.
|
||||
var testShakes = map[string]func() ShakeHash{
|
||||
"SHAKE128": NewShake128,
|
||||
"SHAKE256": NewShake256,
|
||||
}
|
||||
|
||||
// decodeHex converts a hex-encoded string into a raw byte string.
|
||||
func decodeHex(s string) []byte {
|
||||
b, err := hex.DecodeString(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// structs used to marshal JSON test-cases.
|
||||
type KeccakKats struct {
|
||||
Kats map[string][]struct {
|
||||
Digest string `json:"digest"`
|
||||
Length int64 `json:"length"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
}
|
||||
|
||||
func testUnalignedAndGeneric(t *testing.T, testf func(impl string)) {
|
||||
xorInOrig, copyOutOrig := xorIn, copyOut
|
||||
xorIn, copyOut = xorInGeneric, copyOutGeneric
|
||||
testf("generic")
|
||||
if xorImplementationUnaligned != "generic" {
|
||||
xorIn, copyOut = xorInUnaligned, copyOutUnaligned
|
||||
testf("unaligned")
|
||||
}
|
||||
xorIn, copyOut = xorInOrig, copyOutOrig
|
||||
}
|
||||
|
||||
// TestKeccakKats tests the SHA-3 and Shake implementations against all the
|
||||
// ShortMsgKATs from https://github.com/gvanas/KeccakCodePackage
|
||||
// (The testvectors are stored in keccakKats.json.deflate due to their length.)
|
||||
func TestKeccakKats(t *testing.T) {
|
||||
testUnalignedAndGeneric(t, func(impl string) {
|
||||
// Read the KATs.
|
||||
deflated, err := os.Open(katFilename)
|
||||
if err != nil {
|
||||
t.Errorf("error opening %s: %s", katFilename, err)
|
||||
}
|
||||
file := flate.NewReader(deflated)
|
||||
dec := json.NewDecoder(file)
|
||||
var katSet KeccakKats
|
||||
err = dec.Decode(&katSet)
|
||||
if err != nil {
|
||||
t.Errorf("error decoding KATs: %s", err)
|
||||
}
|
||||
|
||||
// Do the KATs.
|
||||
for functionName, kats := range katSet.Kats {
|
||||
d := testDigests[functionName]()
|
||||
for _, kat := range kats {
|
||||
d.Reset()
|
||||
in, err := hex.DecodeString(kat.Message)
|
||||
if err != nil {
|
||||
t.Errorf("error decoding KAT: %s", err)
|
||||
}
|
||||
d.Write(in[:kat.Length/8])
|
||||
got := strings.ToUpper(hex.EncodeToString(d.Sum(nil)))
|
||||
if got != kat.Digest {
|
||||
t.Errorf("function=%s, implementation=%s, length=%d\nmessage:\n %s\ngot:\n %s\nwanted:\n %s",
|
||||
functionName, impl, kat.Length, kat.Message, got, kat.Digest)
|
||||
t.Logf("wanted %+v", kat)
|
||||
t.FailNow()
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestUnalignedWrite tests that writing data in an arbitrary pattern with
|
||||
// small input buffers.
|
||||
func testUnalignedWrite(t *testing.T) {
|
||||
testUnalignedAndGeneric(t, func(impl string) {
|
||||
buf := sequentialBytes(0x10000)
|
||||
for alg, df := range testDigests {
|
||||
d := df()
|
||||
d.Reset()
|
||||
d.Write(buf)
|
||||
want := d.Sum(nil)
|
||||
d.Reset()
|
||||
for i := 0; i < len(buf); {
|
||||
// Cycle through offsets which make a 137 byte sequence.
|
||||
// Because 137 is prime this sequence should exercise all corner cases.
|
||||
offsets := [17]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1}
|
||||
for _, j := range offsets {
|
||||
if v := len(buf) - i; v < j {
|
||||
j = v
|
||||
}
|
||||
d.Write(buf[i : i+j])
|
||||
i += j
|
||||
}
|
||||
}
|
||||
got := d.Sum(nil)
|
||||
if !bytes.Equal(got, want) {
|
||||
t.Errorf("Unaligned writes, implementation=%s, alg=%s\ngot %q, want %q", impl, alg, got, want)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestAppend checks that appending works when reallocation is necessary.
|
||||
func TestAppend(t *testing.T) {
|
||||
testUnalignedAndGeneric(t, func(impl string) {
|
||||
d := New224()
|
||||
|
||||
for capacity := 2; capacity <= 66; capacity += 64 {
|
||||
// The first time around the loop, Sum will have to reallocate.
|
||||
// The second time, it will not.
|
||||
buf := make([]byte, 2, capacity)
|
||||
d.Reset()
|
||||
d.Write([]byte{0xcc})
|
||||
buf = d.Sum(buf)
|
||||
expected := "0000DF70ADC49B2E76EEE3A6931B93FA41841C3AF2CDF5B32A18B5478C39"
|
||||
if got := strings.ToUpper(hex.EncodeToString(buf)); got != expected {
|
||||
t.Errorf("got %s, want %s", got, expected)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestAppendNoRealloc tests that appending works when no reallocation is necessary.
|
||||
func TestAppendNoRealloc(t *testing.T) {
|
||||
testUnalignedAndGeneric(t, func(impl string) {
|
||||
buf := make([]byte, 1, 200)
|
||||
d := New224()
|
||||
d.Write([]byte{0xcc})
|
||||
buf = d.Sum(buf)
|
||||
expected := "00DF70ADC49B2E76EEE3A6931B93FA41841C3AF2CDF5B32A18B5478C39"
|
||||
if got := strings.ToUpper(hex.EncodeToString(buf)); got != expected {
|
||||
t.Errorf("%s: got %s, want %s", impl, got, expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestSqueezing checks that squeezing the full output a single time produces
|
||||
// the same output as repeatedly squeezing the instance.
|
||||
func TestSqueezing(t *testing.T) {
|
||||
testUnalignedAndGeneric(t, func(impl string) {
|
||||
for functionName, newShakeHash := range testShakes {
|
||||
d0 := newShakeHash()
|
||||
d0.Write([]byte(testString))
|
||||
ref := make([]byte, 32)
|
||||
d0.Read(ref)
|
||||
|
||||
d1 := newShakeHash()
|
||||
d1.Write([]byte(testString))
|
||||
var multiple []byte
|
||||
for _ = range ref {
|
||||
one := make([]byte, 1)
|
||||
d1.Read(one)
|
||||
multiple = append(multiple, one...)
|
||||
}
|
||||
if !bytes.Equal(ref, multiple) {
|
||||
t.Errorf("%s (%s): squeezing %d bytes one at a time failed", functionName, impl, len(ref))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// sequentialBytes produces a buffer of size consecutive bytes 0x00, 0x01, ..., used for testing.
|
||||
func sequentialBytes(size int) []byte {
|
||||
result := make([]byte, size)
|
||||
for i := range result {
|
||||
result[i] = byte(i)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// BenchmarkPermutationFunction measures the speed of the permutation function
|
||||
// with no input data.
|
||||
func BenchmarkPermutationFunction(b *testing.B) {
|
||||
b.SetBytes(int64(200))
|
||||
var lanes [25]uint64
|
||||
for i := 0; i < b.N; i++ {
|
||||
keccakF1600(&lanes)
|
||||
}
|
||||
}
|
||||
|
||||
// benchmarkHash tests the speed to hash num buffers of buflen each.
|
||||
func benchmarkHash(b *testing.B, h hash.Hash, size, num int) {
|
||||
b.StopTimer()
|
||||
h.Reset()
|
||||
data := sequentialBytes(size)
|
||||
b.SetBytes(int64(size * num))
|
||||
b.StartTimer()
|
||||
|
||||
var state []byte
|
||||
for i := 0; i < b.N; i++ {
|
||||
for j := 0; j < num; j++ {
|
||||
h.Write(data)
|
||||
}
|
||||
state = h.Sum(state[:0])
|
||||
}
|
||||
b.StopTimer()
|
||||
h.Reset()
|
||||
}
|
||||
|
||||
// benchmarkShake is specialized to the Shake instances, which don't
|
||||
// require a copy on reading output.
|
||||
func benchmarkShake(b *testing.B, h ShakeHash, size, num int) {
|
||||
b.StopTimer()
|
||||
h.Reset()
|
||||
data := sequentialBytes(size)
|
||||
d := make([]byte, 32)
|
||||
|
||||
b.SetBytes(int64(size * num))
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
h.Reset()
|
||||
for j := 0; j < num; j++ {
|
||||
h.Write(data)
|
||||
}
|
||||
h.Read(d)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSha3_512_MTU(b *testing.B) { benchmarkHash(b, New512(), 1350, 1) }
|
||||
func BenchmarkSha3_384_MTU(b *testing.B) { benchmarkHash(b, New384(), 1350, 1) }
|
||||
func BenchmarkSha3_256_MTU(b *testing.B) { benchmarkHash(b, New256(), 1350, 1) }
|
||||
func BenchmarkSha3_224_MTU(b *testing.B) { benchmarkHash(b, New224(), 1350, 1) }
|
||||
|
||||
func BenchmarkShake128_MTU(b *testing.B) { benchmarkShake(b, NewShake128(), 1350, 1) }
|
||||
func BenchmarkShake256_MTU(b *testing.B) { benchmarkShake(b, NewShake256(), 1350, 1) }
|
||||
func BenchmarkShake256_16x(b *testing.B) { benchmarkShake(b, NewShake256(), 16, 1024) }
|
||||
func BenchmarkShake256_1MiB(b *testing.B) { benchmarkShake(b, NewShake256(), 1024, 1024) }
|
||||
|
||||
func BenchmarkSha3_512_1MiB(b *testing.B) { benchmarkHash(b, New512(), 1024, 1024) }
|
||||
|
||||
func Example_sum() {
|
||||
buf := []byte("some data to hash")
|
||||
// A hash needs to be 64 bytes long to have 256-bit collision resistance.
|
||||
h := make([]byte, 64)
|
||||
// Compute a 64-byte hash of buf and put it in h.
|
||||
ShakeSum256(h, buf)
|
||||
}
|
||||
|
||||
func Example_mac() {
|
||||
k := []byte("this is a secret key; you should generate a strong random key that's at least 32 bytes long")
|
||||
buf := []byte("and this is some data to authenticate")
|
||||
// A MAC with 32 bytes of output has 256-bit security strength -- if you use at least a 32-byte-long key.
|
||||
h := make([]byte, 32)
|
||||
d := NewShake256()
|
||||
// Write the key into the hash.
|
||||
d.Write(k)
|
||||
// Now write the data.
|
||||
d.Write(buf)
|
||||
// Read 32 bytes of output from the hash into h.
|
||||
d.Read(h)
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sha3
|
||||
|
||||
// This file defines the ShakeHash interface, and provides
|
||||
// functions for creating SHAKE instances, as well as utility
|
||||
// functions for hashing bytes to arbitrary-length output.
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// ShakeHash defines the interface to hash functions that
|
||||
// support arbitrary-length output.
|
||||
type ShakeHash interface {
|
||||
// Write absorbs more data into the hash's state. It panics if input is
|
||||
// written to it after output has been read from it.
|
||||
io.Writer
|
||||
|
||||
// Read reads more output from the hash; reading affects the hash's
|
||||
// state. (ShakeHash.Read is thus very different from Hash.Sum)
|
||||
// It never returns an error.
|
||||
io.Reader
|
||||
|
||||
// Clone returns a copy of the ShakeHash in its current state.
|
||||
Clone() ShakeHash
|
||||
|
||||
// Reset resets the ShakeHash to its initial state.
|
||||
Reset()
|
||||
}
|
||||
|
||||
func (d *state) Clone() ShakeHash {
|
||||
return d.clone()
|
||||
}
|
||||
|
||||
// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash.
|
||||
// Its generic security strength is 128 bits against all attacks if at
|
||||
// least 32 bytes of its output are used.
|
||||
func NewShake128() ShakeHash { return &state{rate: 168, dsbyte: 0x1f} }
|
||||
|
||||
// NewShake256 creates a new SHAKE128 variable-output-length ShakeHash.
|
||||
// Its generic security strength is 256 bits against all attacks if
|
||||
// at least 64 bytes of its output are used.
|
||||
func NewShake256() ShakeHash { return &state{rate: 136, dsbyte: 0x1f} }
|
||||
|
||||
// ShakeSum128 writes an arbitrary-length digest of data into hash.
|
||||
func ShakeSum128(hash, data []byte) {
|
||||
h := NewShake128()
|
||||
h.Write(data)
|
||||
h.Read(hash)
|
||||
}
|
||||
|
||||
// ShakeSum256 writes an arbitrary-length digest of data into hash.
|
||||
func ShakeSum256(hash, data []byte) {
|
||||
h := NewShake256()
|
||||
h.Write(data)
|
||||
h.Read(hash)
|
||||
}
|
Binary file not shown.
|
@ -0,0 +1,16 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !amd64,!386 appengine
|
||||
|
||||
package sha3
|
||||
|
||||
var (
|
||||
xorIn = xorInGeneric
|
||||
copyOut = copyOutGeneric
|
||||
xorInUnaligned = xorInGeneric
|
||||
copyOutUnaligned = copyOutGeneric
|
||||
)
|
||||
|
||||
const xorImplementationUnaligned = "generic"
|
|
@ -0,0 +1,28 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sha3
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
// xorInGeneric xors the bytes in buf into the state; it
|
||||
// makes no non-portable assumptions about memory layout
|
||||
// or alignment.
|
||||
func xorInGeneric(d *state, buf []byte) {
|
||||
n := len(buf) / 8
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
a := binary.LittleEndian.Uint64(buf)
|
||||
d.a[i] ^= a
|
||||
buf = buf[8:]
|
||||
}
|
||||
}
|
||||
|
||||
// copyOutGeneric copies ulint64s to a byte buffer.
|
||||
func copyOutGeneric(d *state, b []byte) {
|
||||
for i := 0; len(b) >= 8; i++ {
|
||||
binary.LittleEndian.PutUint64(b, d.a[i])
|
||||
b = b[8:]
|
||||
}
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build amd64 386
|
||||
// +build !appengine
|
||||
|
||||
package sha3
|
||||
|
||||
import "unsafe"
|
||||
|
||||
func xorInUnaligned(d *state, buf []byte) {
|
||||
bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))
|
||||
n := len(buf)
|
||||
if n >= 72 {
|
||||
d.a[0] ^= bw[0]
|
||||
d.a[1] ^= bw[1]
|
||||
d.a[2] ^= bw[2]
|
||||
d.a[3] ^= bw[3]
|
||||
d.a[4] ^= bw[4]
|
||||
d.a[5] ^= bw[5]
|
||||
d.a[6] ^= bw[6]
|
||||
d.a[7] ^= bw[7]
|
||||
d.a[8] ^= bw[8]
|
||||
}
|
||||
if n >= 104 {
|
||||
d.a[9] ^= bw[9]
|
||||
d.a[10] ^= bw[10]
|
||||
d.a[11] ^= bw[11]
|
||||
d.a[12] ^= bw[12]
|
||||
}
|
||||
if n >= 136 {
|
||||
d.a[13] ^= bw[13]
|
||||
d.a[14] ^= bw[14]
|
||||
d.a[15] ^= bw[15]
|
||||
d.a[16] ^= bw[16]
|
||||
}
|
||||
if n >= 144 {
|
||||
d.a[17] ^= bw[17]
|
||||
}
|
||||
if n >= 168 {
|
||||
d.a[18] ^= bw[18]
|
||||
d.a[19] ^= bw[19]
|
||||
d.a[20] ^= bw[20]
|
||||
}
|
||||
}
|
||||
|
||||
func copyOutUnaligned(d *state, buf []byte) {
|
||||
ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0]))
|
||||
copy(buf, ab[:])
|
||||
}
|
||||
|
||||
var (
|
||||
xorIn = xorInUnaligned
|
||||
copyOut = copyOutUnaligned
|
||||
)
|
||||
|
||||
const xorImplementationUnaligned = "unaligned"
|
|
@ -977,7 +977,7 @@ func (q *queue) DeliverNodeData(id string, data [][]byte, callback func(error, i
|
|||
process := []trie.SyncResult{}
|
||||
for _, blob := range data {
|
||||
// Skip any state trie entires that were not requested
|
||||
hash := common.BytesToHash(crypto.Sha3(blob))
|
||||
hash := common.BytesToHash(crypto.Keccak256(blob))
|
||||
if _, ok := request.Hashes[hash]; !ok {
|
||||
errs = append(errs, fmt.Errorf("non-requested state data %x", hash))
|
||||
continue
|
||||
|
|
|
@ -481,7 +481,7 @@ func testGetNodeData(t *testing.T, protocol int) {
|
|||
}
|
||||
// Verify that all hashes correspond to the requested data, and reconstruct a state tree
|
||||
for i, want := range hashes {
|
||||
if hash := crypto.Sha3Hash(data[i]); hash != want {
|
||||
if hash := crypto.Keccak256Hash(data[i]); hash != want {
|
||||
fmt.Errorf("data hash mismatch: have %x, want %x", hash, want)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
# ethdb
|
||||
|
||||
The ethdb package contains the ethereum database interfaces
|
||||
|
||||
# Installation
|
||||
|
||||
`go get github.com/ethereum/ethdb-go`
|
||||
|
||||
# Usage
|
||||
|
||||
Todo :-)
|
|
@ -52,7 +52,7 @@ func main() {
|
|||
json.Unmarshal(content, &m)
|
||||
|
||||
filepath := filepath.Join(os.Getenv("GOPATH"), "src", "github.com", "ethereum", "go-ethereum", "params", os.Args[2])
|
||||
output, err := os.OpenFile(filepath, os.O_RDWR|os.O_CREATE, os.ModePerm /*0777*/)
|
||||
output, err := os.OpenFile(filepath, os.O_RDWR|os.O_CREATE, 0666)
|
||||
if err != nil {
|
||||
fatal("error opening file for writing %v\n", err)
|
||||
}
|
||||
|
|
|
@ -53,7 +53,7 @@ func (req *TrieRequest) StoreResult(db ethdb.Database) {
|
|||
// storeProof stores the new trie nodes obtained from a merkle proof in the database
|
||||
func storeProof(db ethdb.Database, proof []rlp.RawValue) {
|
||||
for _, buf := range proof {
|
||||
hash := crypto.Sha3(buf)
|
||||
hash := crypto.Keccak256(buf)
|
||||
val, _ := db.Get(hash)
|
||||
if val == nil {
|
||||
db.Put(hash, buf)
|
||||
|
@ -78,7 +78,7 @@ func (req *NodeDataRequest) StoreResult(db ethdb.Database) {
|
|||
db.Put(req.hash[:], req.GetData())
|
||||
}
|
||||
|
||||
var sha3_nil = crypto.Sha3Hash(nil)
|
||||
var sha3_nil = crypto.Keccak256Hash(nil)
|
||||
|
||||
// retrieveNodeData tries to retrieve node data with the given hash from the network
|
||||
func retrieveNodeData(ctx context.Context, odr OdrBackend, hash common.Hash) ([]byte, error) {
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var emptyCodeHash = crypto.Sha3(nil)
|
||||
var emptyCodeHash = crypto.Keccak256(nil)
|
||||
|
||||
// Code represents a contract code in binary form
|
||||
type Code []byte
|
||||
|
@ -220,7 +220,7 @@ func (self *StateObject) Code() []byte {
|
|||
// SetCode sets the contract code
|
||||
func (self *StateObject) SetCode(code []byte) {
|
||||
self.code = code
|
||||
self.codeHash = crypto.Sha3(code)
|
||||
self.codeHash = crypto.Keccak256(code)
|
||||
self.dirty = true
|
||||
}
|
||||
|
||||
|
|
|
@ -269,5 +269,5 @@ func (s *PublicWeb3API) ClientVersion() string {
|
|||
// Sha3 applies the ethereum sha3 implementation on the input.
|
||||
// It assumes the input is hex encoded.
|
||||
func (s *PublicWeb3API) Sha3(input string) string {
|
||||
return common.ToHex(crypto.Sha3(common.FromHex(input)))
|
||||
return common.ToHex(crypto.Keccak256(common.FromHex(input)))
|
||||
}
|
||||
|
|
|
@ -188,7 +188,7 @@ func (db *nodeDB) node(id NodeID) *Node {
|
|||
glog.V(logger.Warn).Infof("failed to decode node RLP: %v", err)
|
||||
return nil
|
||||
}
|
||||
node.sha = crypto.Sha3Hash(node.ID[:])
|
||||
node.sha = crypto.Keccak256Hash(node.ID[:])
|
||||
return node
|
||||
}
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ func NewNode(id NodeID, ip net.IP, udpPort, tcpPort uint16) *Node {
|
|||
UDP: udpPort,
|
||||
TCP: tcpPort,
|
||||
ID: id,
|
||||
sha: crypto.Sha3Hash(id[:]),
|
||||
sha: crypto.Keccak256Hash(id[:]),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -195,7 +195,7 @@ func (tab *Table) SetFallbackNodes(nodes []*Node) error {
|
|||
cpy := *n
|
||||
// Recompute cpy.sha because the node might not have been
|
||||
// created by NewNode or ParseNode.
|
||||
cpy.sha = crypto.Sha3Hash(n.ID[:])
|
||||
cpy.sha = crypto.Keccak256Hash(n.ID[:])
|
||||
tab.nursery = append(tab.nursery, &cpy)
|
||||
}
|
||||
tab.mutex.Unlock()
|
||||
|
@ -208,7 +208,7 @@ func (tab *Table) SetFallbackNodes(nodes []*Node) error {
|
|||
func (tab *Table) Resolve(targetID NodeID) *Node {
|
||||
// If the node is present in the local table, no
|
||||
// network interaction is required.
|
||||
hash := crypto.Sha3Hash(targetID[:])
|
||||
hash := crypto.Keccak256Hash(targetID[:])
|
||||
tab.mutex.Lock()
|
||||
cl := tab.closest(hash, 1)
|
||||
tab.mutex.Unlock()
|
||||
|
@ -236,7 +236,7 @@ func (tab *Table) Lookup(targetID NodeID) []*Node {
|
|||
|
||||
func (tab *Table) lookup(targetID NodeID, refreshIfEmpty bool) []*Node {
|
||||
var (
|
||||
target = crypto.Sha3Hash(targetID[:])
|
||||
target = crypto.Keccak256Hash(targetID[:])
|
||||
asked = make(map[NodeID]bool)
|
||||
seen = make(map[NodeID]bool)
|
||||
reply = make(chan []*Node, alpha)
|
||||
|
|
|
@ -530,12 +530,12 @@ func (*preminedTestnet) ping(toid NodeID, toaddr *net.UDPAddr) error { return ni
|
|||
// various distances to the given target.
|
||||
func (n *preminedTestnet) mine(target NodeID) {
|
||||
n.target = target
|
||||
n.targetSha = crypto.Sha3Hash(n.target[:])
|
||||
n.targetSha = crypto.Keccak256Hash(n.target[:])
|
||||
found := 0
|
||||
for found < bucketSize*10 {
|
||||
k := newkey()
|
||||
id := PubkeyID(&k.PublicKey)
|
||||
sha := crypto.Sha3Hash(id[:])
|
||||
sha := crypto.Keccak256Hash(id[:])
|
||||
ld := logdist(n.targetSha, sha)
|
||||
if len(n.dists[ld]) < bucketSize {
|
||||
n.dists[ld] = append(n.dists[ld], id)
|
||||
|
|
|
@ -466,7 +466,7 @@ func encodePacket(priv *ecdsa.PrivateKey, ptype byte, req interface{}) ([]byte,
|
|||
return nil, err
|
||||
}
|
||||
packet := b.Bytes()
|
||||
sig, err := crypto.Sign(crypto.Sha3(packet[headSize:]), priv)
|
||||
sig, err := crypto.Sign(crypto.Keccak256(packet[headSize:]), priv)
|
||||
if err != nil {
|
||||
glog.V(logger.Error).Infoln("could not sign packet:", err)
|
||||
return nil, err
|
||||
|
@ -475,7 +475,7 @@ func encodePacket(priv *ecdsa.PrivateKey, ptype byte, req interface{}) ([]byte,
|
|||
// add the hash to the front. Note: this doesn't protect the
|
||||
// packet in any way. Our public key will be part of this hash in
|
||||
// The future.
|
||||
copy(packet, crypto.Sha3(packet[macSize:]))
|
||||
copy(packet, crypto.Keccak256(packet[macSize:]))
|
||||
return packet, nil
|
||||
}
|
||||
|
||||
|
@ -527,11 +527,11 @@ func decodePacket(buf []byte) (packet, NodeID, []byte, error) {
|
|||
return nil, NodeID{}, nil, errPacketTooSmall
|
||||
}
|
||||
hash, sig, sigdata := buf[:macSize], buf[macSize:headSize], buf[headSize:]
|
||||
shouldhash := crypto.Sha3(buf[macSize:])
|
||||
shouldhash := crypto.Keccak256(buf[macSize:])
|
||||
if !bytes.Equal(hash, shouldhash) {
|
||||
return nil, NodeID{}, nil, errBadHash
|
||||
}
|
||||
fromID, err := recoverNodeID(crypto.Sha3(buf[headSize:]), sig)
|
||||
fromID, err := recoverNodeID(crypto.Keccak256(buf[headSize:]), sig)
|
||||
if err != nil {
|
||||
return nil, NodeID{}, hash, err
|
||||
}
|
||||
|
@ -593,7 +593,7 @@ func (req *findnode) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte
|
|||
// (which is a much bigger packet than findnode) to the victim.
|
||||
return errUnknownNode
|
||||
}
|
||||
target := crypto.Sha3Hash(req.Target[:])
|
||||
target := crypto.Keccak256Hash(req.Target[:])
|
||||
t.mutex.Lock()
|
||||
closest := t.closest(target, bucketSize).entries
|
||||
t.mutex.Unlock()
|
||||
|
|
|
@ -286,7 +286,7 @@ func TestUDP_findnode(t *testing.T) {
|
|||
// put a few nodes into the table. their exact
|
||||
// distribution shouldn't matter much, altough we need to
|
||||
// take care not to overflow any bucket.
|
||||
targetHash := crypto.Sha3Hash(testTarget[:])
|
||||
targetHash := crypto.Keccak256Hash(testTarget[:])
|
||||
nodes := &nodesByDistance{target: targetHash}
|
||||
for i := 0; i < bucketSize; i++ {
|
||||
nodes.push(nodeAtDistance(test.table.self.sha, i+2), bucketSize)
|
||||
|
|
|
@ -232,12 +232,12 @@ func (h *encHandshake) secrets(auth, authResp []byte) (secrets, error) {
|
|||
}
|
||||
|
||||
// derive base secrets from ephemeral key agreement
|
||||
sharedSecret := crypto.Sha3(ecdheSecret, crypto.Sha3(h.respNonce, h.initNonce))
|
||||
aesSecret := crypto.Sha3(ecdheSecret, sharedSecret)
|
||||
sharedSecret := crypto.Keccak256(ecdheSecret, crypto.Keccak256(h.respNonce, h.initNonce))
|
||||
aesSecret := crypto.Keccak256(ecdheSecret, sharedSecret)
|
||||
s := secrets{
|
||||
RemoteID: h.remoteID,
|
||||
AES: aesSecret,
|
||||
MAC: crypto.Sha3(ecdheSecret, aesSecret),
|
||||
MAC: crypto.Keccak256(ecdheSecret, aesSecret),
|
||||
}
|
||||
|
||||
// setup sha3 instances for the MACs
|
||||
|
@ -426,7 +426,7 @@ func (h *encHandshake) makeAuthResp() (msg *authRespV4, err error) {
|
|||
func (msg *authMsgV4) sealPlain(h *encHandshake) ([]byte, error) {
|
||||
buf := make([]byte, authMsgLen)
|
||||
n := copy(buf, msg.Signature[:])
|
||||
n += copy(buf[n:], crypto.Sha3(exportPubkey(&h.randomPrivKey.PublicKey)))
|
||||
n += copy(buf[n:], crypto.Keccak256(exportPubkey(&h.randomPrivKey.PublicKey)))
|
||||
n += copy(buf[n:], msg.InitiatorPubkey[:])
|
||||
n += copy(buf[n:], msg.Nonce[:])
|
||||
buf[n] = 0 // token-flag
|
||||
|
|
|
@ -267,8 +267,8 @@ func TestRLPXFrameFake(t *testing.T) {
|
|||
buf := new(bytes.Buffer)
|
||||
hash := fakeHash([]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})
|
||||
rw := newRLPXFrameRW(buf, secrets{
|
||||
AES: crypto.Sha3(),
|
||||
MAC: crypto.Sha3(),
|
||||
AES: crypto.Keccak256(),
|
||||
MAC: crypto.Keccak256(),
|
||||
IngressMAC: hash,
|
||||
EgressMAC: hash,
|
||||
})
|
||||
|
|
|
@ -183,7 +183,7 @@ func (self *Env) Db() vm.Database { return self.state }
|
|||
func (self *Env) GasLimit() *big.Int { return self.gasLimit }
|
||||
func (self *Env) VmType() vm.Type { return vm.StdVmTy }
|
||||
func (self *Env) GetHash(n uint64) common.Hash {
|
||||
return common.BytesToHash(crypto.Sha3([]byte(big.NewInt(int64(n)).String())))
|
||||
return common.BytesToHash(crypto.Keccak256([]byte(big.NewInt(int64(n)).String())))
|
||||
}
|
||||
func (self *Env) AddLog(log *vm.Log) {
|
||||
self.state.AddLog(log)
|
||||
|
|
|
@ -63,7 +63,7 @@ func TestSecureGetKey(t *testing.T) {
|
|||
|
||||
key := []byte("foo")
|
||||
value := []byte("bar")
|
||||
seckey := crypto.Sha3(key)
|
||||
seckey := crypto.Keccak256(key)
|
||||
|
||||
if !bytes.Equal(trie.Get(key), value) {
|
||||
t.Errorf("Get did not return bar")
|
||||
|
|
|
@ -40,7 +40,7 @@ var (
|
|||
emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
|
||||
|
||||
// This is the known hash of an empty state trie entry.
|
||||
emptyState = crypto.Sha3Hash(nil)
|
||||
emptyState = crypto.Keccak256Hash(nil)
|
||||
)
|
||||
|
||||
// ClearGlobalCache clears the global trie cache
|
||||
|
|
|
@ -66,7 +66,7 @@ func (self *Envelope) Seal(pow time.Duration) {
|
|||
for i := 0; i < 1024; i++ {
|
||||
binary.BigEndian.PutUint32(d[60:], nonce)
|
||||
|
||||
firstBit := common.FirstBitSet(common.BigD(crypto.Sha3(d)))
|
||||
firstBit := common.FirstBitSet(common.BigD(crypto.Keccak256(d)))
|
||||
if firstBit > bestBit {
|
||||
self.Nonce, bestBit = nonce, firstBit
|
||||
}
|
||||
|
@ -123,7 +123,7 @@ func (self *Envelope) Open(key *ecdsa.PrivateKey) (msg *Message, err error) {
|
|||
func (self *Envelope) Hash() common.Hash {
|
||||
if (self.hash == common.Hash{}) {
|
||||
enc, _ := rlp.EncodeToBytes(self)
|
||||
self.hash = crypto.Sha3Hash(enc)
|
||||
self.hash = crypto.Keccak256Hash(enc)
|
||||
}
|
||||
return self.hash
|
||||
}
|
||||
|
@ -142,6 +142,6 @@ func (self *Envelope) DecodeRLP(s *rlp.Stream) error {
|
|||
if err := rlp.DecodeBytes(raw, (*rlpenv)(self)); err != nil {
|
||||
return err
|
||||
}
|
||||
self.hash = crypto.Sha3Hash(raw)
|
||||
self.hash = crypto.Keccak256Hash(raw)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -146,7 +146,7 @@ func (self *Message) decrypt(key *ecdsa.PrivateKey) error {
|
|||
|
||||
// hash calculates the SHA3 checksum of the message flags and payload.
|
||||
func (self *Message) hash() []byte {
|
||||
return crypto.Sha3(append([]byte{self.Flags}, self.Payload...))
|
||||
return crypto.Keccak256(append([]byte{self.Flags}, self.Payload...))
|
||||
}
|
||||
|
||||
// bytes flattens the message contents (flags, signature and payload) into a
|
||||
|
|
|
@ -31,7 +31,7 @@ type Topic [4]byte
|
|||
// Note, empty topics are considered the wildcard, and cannot be used in messages.
|
||||
func NewTopic(data []byte) Topic {
|
||||
prefix := [4]byte{}
|
||||
copy(prefix[:], crypto.Sha3(data)[:4])
|
||||
copy(prefix[:], crypto.Keccak256(data)[:4])
|
||||
return Topic(prefix)
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue