upgrade ethereum

This commit is contained in:
Andrea Maria Piana 2022-10-25 15:25:08 +01:00 committed by Anthony Laibe
parent b11643e66d
commit fc836e4c76
732 changed files with 73144 additions and 35291 deletions

View File

@ -1,7 +1,7 @@
package api package api
import ( import (
signercore "github.com/ethereum/go-ethereum/signer/core" signercore "github.com/ethereum/go-ethereum/signer/core/apitypes"
"github.com/status-im/status-go/eth-node/types" "github.com/status-im/status-go/eth-node/types"
"github.com/status-im/status-go/multiaccounts" "github.com/status-im/status-go/multiaccounts"

View File

@ -17,7 +17,7 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
ethcrypto "github.com/ethereum/go-ethereum/crypto" ethcrypto "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
signercore "github.com/ethereum/go-ethereum/signer/core" signercore "github.com/ethereum/go-ethereum/signer/core/apitypes"
"github.com/status-im/status-go/account" "github.com/status-im/status-go/account"
"github.com/status-im/status-go/appdatabase" "github.com/status-im/status-go/appdatabase"

15
go.mod
View File

@ -2,7 +2,7 @@ module github.com/status-im/status-go
go 1.18 go 1.18
replace github.com/ethereum/go-ethereum v1.10.21 => github.com/status-im/go-ethereum v1.10.4-status.5 replace github.com/ethereum/go-ethereum v1.10.25 => github.com/status-im/go-ethereum v1.10.25-status.3
replace github.com/docker/docker => github.com/docker/engine v1.4.2-0.20190717161051-705d9623b7c1 replace github.com/docker/docker => github.com/docker/engine v1.4.2-0.20190717161051-705d9623b7c1
@ -21,7 +21,7 @@ require (
github.com/cenkalti/backoff/v3 v3.2.2 github.com/cenkalti/backoff/v3 v3.2.2
github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew v1.1.1
github.com/deckarep/golang-set v1.8.0 github.com/deckarep/golang-set v1.8.0
github.com/ethereum/go-ethereum v1.10.21 github.com/ethereum/go-ethereum v1.10.25
github.com/forPelevin/gomoji v1.1.2 github.com/forPelevin/gomoji v1.1.2
github.com/golang/mock v1.6.0 github.com/golang/mock v1.6.0
github.com/golang/protobuf v1.5.2 github.com/golang/protobuf v1.5.2
@ -117,6 +117,7 @@ require (
github.com/cheekybits/genny v1.0.0 // indirect github.com/cheekybits/genny v1.0.0 // indirect
github.com/containerd/cgroups v1.0.4 // indirect github.com/containerd/cgroups v1.0.4 // indirect
github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/cruxic/go-hmac-drbg v0.0.0-20170206035330-84c46983886d // indirect github.com/cruxic/go-hmac-drbg v0.0.0-20170206035330-84c46983886d // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
@ -136,6 +137,7 @@ require (
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.3.0 // indirect
github.com/golang-migrate/migrate/v4 v4.15.2 // indirect github.com/golang-migrate/migrate/v4 v4.15.2 // indirect
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
@ -145,6 +147,7 @@ require (
github.com/gorilla/securecookie v1.1.1 // indirect github.com/gorilla/securecookie v1.1.1 // indirect
github.com/gorilla/websocket v1.5.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-bexpr v0.1.10 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
@ -189,6 +192,8 @@ require (
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/sha256-simd v1.0.0 // indirect github.com/minio/sha256-simd v1.0.0 // indirect
github.com/mitchellh/mapstructure v1.4.1 // indirect
github.com/mitchellh/pointerstructure v1.2.0 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect
github.com/mschoch/smat v0.2.0 // indirect github.com/mschoch/smat v0.2.0 // indirect
github.com/multiformats/go-base32 v0.0.4 // indirect github.com/multiformats/go-base32 v0.0.4 // indirect
@ -235,6 +240,7 @@ require (
github.com/rs/dnscache v0.0.0-20210201191234-295bba877686 // indirect github.com/rs/dnscache v0.0.0-20210201191234-295bba877686 // indirect
github.com/russolsen/ohyeah v0.0.0-20160324131710-f4938c005315 // indirect github.com/russolsen/ohyeah v0.0.0-20160324131710-f4938c005315 // indirect
github.com/russolsen/same v0.0.0-20160222130632-f089df61f51d // indirect github.com/russolsen/same v0.0.0-20160222130632-f089df61f51d // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/shirou/gopsutil v3.21.5+incompatible // indirect github.com/shirou/gopsutil v3.21.5+incompatible // indirect
github.com/shopspring/decimal v1.2.0 // indirect github.com/shopspring/decimal v1.2.0 // indirect
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect
@ -246,9 +252,11 @@ require (
github.com/tklauser/go-sysconf v0.3.6 // indirect github.com/tklauser/go-sysconf v0.3.6 // indirect
github.com/tklauser/numcpus v0.2.2 // indirect github.com/tklauser/numcpus v0.2.2 // indirect
github.com/tyler-smith/go-bip39 v1.1.0 // indirect github.com/tyler-smith/go-bip39 v1.1.0 // indirect
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa // indirect
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee // indirect github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
go.etcd.io/bbolt v1.3.6 // indirect go.etcd.io/bbolt v1.3.6 // indirect
go.opencensus.io v0.23.0 // indirect go.opencensus.io v0.23.0 // indirect
go.uber.org/atomic v1.9.0 // indirect go.uber.org/atomic v1.9.0 // indirect
@ -256,7 +264,7 @@ require (
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
golang.org/x/net v0.0.0-20220630215102-69896b714898 // indirect golang.org/x/net v0.0.0-20220630215102-69896b714898 // indirect
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 // indirect golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 // indirect
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect golang.org/x/sys v0.0.0-20221013171732-95e765b1cc43 // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
golang.org/x/text v0.3.7 // indirect golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect
@ -264,7 +272,6 @@ require (
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/blake3 v1.1.7 // indirect lukechampine.com/blake3 v1.1.7 // indirect
modernc.org/libc v1.11.82 // indirect modernc.org/libc v1.11.82 // indirect

21
go.sum
View File

@ -632,6 +632,7 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
@ -733,7 +734,6 @@ github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
github.com/elastic/gosigar v0.14.1/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4=
github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
@ -760,6 +760,7 @@ github.com/ethereum/go-ethereum v1.9.5/go.mod h1:PwpWDrCLZrV+tfrhqqF6kPknbISMHaJ
github.com/ethereum/go-ethereum v1.10.4/go.mod h1:nEE0TP5MtxGzOMd7egIrbPJMQBnhVU3ELNxhBglIzhg= github.com/ethereum/go-ethereum v1.10.4/go.mod h1:nEE0TP5MtxGzOMd7egIrbPJMQBnhVU3ELNxhBglIzhg=
github.com/ethereum/go-ethereum v1.10.16/go.mod h1:Anj6cxczl+AHy63o4X9O8yWNHuN5wMpfb8MAnHkWn7Y= github.com/ethereum/go-ethereum v1.10.16/go.mod h1:Anj6cxczl+AHy63o4X9O8yWNHuN5wMpfb8MAnHkWn7Y=
github.com/ethereum/go-ethereum v1.10.20/go.mod h1:LWUN82TCHGpxB3En5HVmLLzPD7YSrEUFmFfN1nKkVN0= github.com/ethereum/go-ethereum v1.10.20/go.mod h1:LWUN82TCHGpxB3En5HVmLLzPD7YSrEUFmFfN1nKkVN0=
github.com/ethereum/go-ethereum v1.10.21/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
@ -922,6 +923,7 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog=
github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-migrate/migrate/v4 v4.6.2/go.mod h1:JYi6reN3+Z734VZ0akNuyOJNcrg45ZL7LDBMW3WGJL0= github.com/golang-migrate/migrate/v4 v4.6.2/go.mod h1:JYi6reN3+Z734VZ0akNuyOJNcrg45ZL7LDBMW3WGJL0=
github.com/golang-migrate/migrate/v4 v4.15.2 h1:vU+M05vs6jWHKDdmE1Ecwj0BznygFc4QsdRe2E/L7kc= github.com/golang-migrate/migrate/v4 v4.15.2 h1:vU+M05vs6jWHKDdmE1Ecwj0BznygFc4QsdRe2E/L7kc=
@ -1095,6 +1097,7 @@ github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FK
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
@ -1121,6 +1124,7 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e h1:pIYdhNkDh+YENVNi3gto8n9hAmRxKxoar0iE6BLucjw=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM= github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM=
@ -1136,7 +1140,6 @@ github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc=
github.com/huin/goupnp v1.0.1-0.20210310174557-0ca763054c88/go.mod h1:nNs7wvRfN1eKaMknBydLNQU6146XQim8t4h+q90biWo= github.com/huin/goupnp v1.0.1-0.20210310174557-0ca763054c88/go.mod h1:nNs7wvRfN1eKaMknBydLNQU6146XQim8t4h+q90biWo=
github.com/huin/goupnp v1.0.1-0.20210626160114-33cdcbb30dda/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM=
github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM= github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM=
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y=
@ -1582,8 +1585,10 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
@ -2003,6 +2008,7 @@ github.com/russolsen/transit v0.0.0-20180705123435-0794b4c4505a h1:yVNJFSzkEG8sm
github.com/russolsen/transit v0.0.0-20180705123435-0794b4c4505a/go.mod h1:TPq+fcJOdGrkpZpXF4UVmFjYxH0gGqnxdgZ+OzAmvJk= github.com/russolsen/transit v0.0.0-20180705123435-0794b4c4505a/go.mod h1:TPq+fcJOdGrkpZpXF4UVmFjYxH0gGqnxdgZ+OzAmvJk=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
@ -2103,8 +2109,8 @@ github.com/status-im/doubleratchet v3.0.0+incompatible h1:aJ1ejcSERpSzmWZBgtfYti
github.com/status-im/doubleratchet v3.0.0+incompatible/go.mod h1:1sqR0+yhiM/bd+wrdX79AOt2csZuJOni0nUDzKNuqOU= github.com/status-im/doubleratchet v3.0.0+incompatible/go.mod h1:1sqR0+yhiM/bd+wrdX79AOt2csZuJOni0nUDzKNuqOU=
github.com/status-im/go-discover v0.0.0-20220406135310-85a2ce36f63e h1:fDm8hqKGFy8LMNV8zedT3W+QYVPVDfb0F9Fr7fVf9rQ= github.com/status-im/go-discover v0.0.0-20220406135310-85a2ce36f63e h1:fDm8hqKGFy8LMNV8zedT3W+QYVPVDfb0F9Fr7fVf9rQ=
github.com/status-im/go-discover v0.0.0-20220406135310-85a2ce36f63e/go.mod h1:u1s0ACIlweIjmJrgXyljRPSOflZLaS6ezb044+92W3c= github.com/status-im/go-discover v0.0.0-20220406135310-85a2ce36f63e/go.mod h1:u1s0ACIlweIjmJrgXyljRPSOflZLaS6ezb044+92W3c=
github.com/status-im/go-ethereum v1.10.4-status.4 h1:Cs0MoHEhF0LEwii2R8U8jMSEW/gycbHvSJt//KSdP8c= github.com/status-im/go-ethereum v1.10.25-status.3 h1:otbW2SCLmNNXAT8uqtrhxq0jRlwQeDA3Tqrhz+LMX3M=
github.com/status-im/go-ethereum v1.10.4-status.4/go.mod h1:GvIhpdCOgMHI6i5xVPEZOrv/qSMeOFHbZh77AoyZUoE= github.com/status-im/go-ethereum v1.10.25-status.3/go.mod h1:Dt4K5JYMhJRdtXJwBEyGZLZn9iz/chSOZyjVmt5ZhwQ=
github.com/status-im/go-multiaddr-ethv4 v1.2.3 h1:EdMt0rCVcue9zQ3TZoUVrKel0HG+RfbuqY94PXd3AWk= github.com/status-im/go-multiaddr-ethv4 v1.2.3 h1:EdMt0rCVcue9zQ3TZoUVrKel0HG+RfbuqY94PXd3AWk=
github.com/status-im/go-multiaddr-ethv4 v1.2.3/go.mod h1:xgjMcsI3pNezwNVlUnUmaDLVmtT871/OOqnEUublHKQ= github.com/status-im/go-multiaddr-ethv4 v1.2.3/go.mod h1:xgjMcsI3pNezwNVlUnUmaDLVmtT871/OOqnEUublHKQ=
github.com/status-im/go-waku v0.2.3-0.20221005220914-22c049dfd68b h1:KPuv6Hq4gShubVcN9fIoVqWqTxhTXlLDuGmqRi6ZZFo= github.com/status-im/go-waku v0.2.3-0.20221005220914-22c049dfd68b h1:KPuv6Hq4gShubVcN9fIoVqWqTxhTXlLDuGmqRi6ZZFo=
@ -2205,6 +2211,8 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
github.com/urfave/cli/v2 v2.10.2/go.mod h1:f8iq5LtQ/bLxafbdBSLPPNsgaW0l/2fYYEHhAyPlwvo= github.com/urfave/cli/v2 v2.10.2/go.mod h1:f8iq5LtQ/bLxafbdBSLPPNsgaW0l/2fYYEHhAyPlwvo=
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q=
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
github.com/vacp2p/mvds v0.0.24-0.20201124060106-26d8e94130d8 h1:aSQuY64yglPb7I6lZRXt/xWD4ExM1DZo8Gpb7xCz6zk= github.com/vacp2p/mvds v0.0.24-0.20201124060106-26d8e94130d8 h1:aSQuY64yglPb7I6lZRXt/xWD4ExM1DZo8Gpb7xCz6zk=
github.com/vacp2p/mvds v0.0.24-0.20201124060106-26d8e94130d8/go.mod h1:uUmtiahU7efOVl/5w5yk9jOze5xYpDZDrSrT8TvHXjQ= github.com/vacp2p/mvds v0.0.24-0.20201124060106-26d8e94130d8/go.mod h1:uUmtiahU7efOVl/5w5yk9jOze5xYpDZDrSrT8TvHXjQ=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
@ -2254,6 +2262,7 @@ github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQ
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@ -2753,8 +2762,9 @@ golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220317061510-51cd9980dadf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220317061510-51cd9980dadf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20221013171732-95e765b1cc43 h1:OK7RB6t2WQX54srQQYSXMW8dF5C6/8+oA/s5QBmmto4=
golang.org/x/sys v0.0.0-20221013171732-95e765b1cc43/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@ -3109,7 +3119,6 @@ gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVY
gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=

View File

@ -11,7 +11,7 @@ import (
validator "gopkg.in/go-playground/validator.v9" validator "gopkg.in/go-playground/validator.v9"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
signercore "github.com/ethereum/go-ethereum/signer/core" "github.com/ethereum/go-ethereum/signer/core/apitypes"
"github.com/status-im/zxcvbn-go" "github.com/status-im/zxcvbn-go"
"github.com/status-im/zxcvbn-go/scoring" "github.com/status-im/zxcvbn-go/scoring"
@ -452,7 +452,7 @@ func HashTypedData(data string) string {
// if password matches selected account. // if password matches selected account.
//export SignTypedDataV4 //export SignTypedDataV4
func SignTypedDataV4(data, address, password string) string { func SignTypedDataV4(data, address, password string) string {
var typed signercore.TypedData var typed apitypes.TypedData
err := json.Unmarshal([]byte(data), &typed) err := json.Unmarshal([]byte(data), &typed)
if err != nil { if err != nil {
return prepareJSONResponseWithCode(nil, err, codeFailedParseParams) return prepareJSONResponseWithCode(nil, err, codeFailedParseParams)
@ -464,7 +464,7 @@ func SignTypedDataV4(data, address, password string) string {
// HashTypedDataV4 unmarshalls data into TypedData, validates it and hashes it. // HashTypedDataV4 unmarshalls data into TypedData, validates it and hashes it.
//export HashTypedDataV4 //export HashTypedDataV4
func HashTypedDataV4(data string) string { func HashTypedDataV4(data string) string {
var typed signercore.TypedData var typed apitypes.TypedData
err := json.Unmarshal([]byte(data), &typed) err := json.Unmarshal([]byte(data), &typed)
if err != nil { if err != nil {
return prepareJSONResponseWithCode(nil, err, codeFailedParseParams) return prepareJSONResponseWithCode(nil, err, codeFailedParseParams)

View File

@ -10,9 +10,8 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/signer/core/apitypes"
"github.com/status-im/status-go/eth-node/types" "github.com/status-im/status-go/eth-node/types"
signercore "github.com/ethereum/go-ethereum/signer/core"
) )
func TestTypedDataSuite(t *testing.T) { func TestTypedDataSuite(t *testing.T) {
@ -23,8 +22,8 @@ type TypedDataSuite struct {
suite.Suite suite.Suite
privateKey *ecdsa.PrivateKey privateKey *ecdsa.PrivateKey
typedDataV3 signercore.TypedData typedDataV3 apitypes.TypedData
typedDataV4 signercore.TypedData typedDataV4 apitypes.TypedData
} }
func (s *TypedDataSuite) SetupTest() { func (s *TypedDataSuite) SetupTest() {
@ -49,7 +48,7 @@ func (s *TypedDataSuite) TestTypedDataV4() {
actual := s.typedDataV4.TypeHash("Person") actual := s.typedDataV4.TypeHash("Person")
s.Require().Equal(expected, actual.String()) s.Require().Equal(expected, actual.String())
fromTypedData := signercore.TypedData{} fromTypedData := apitypes.TypedData{}
s.Require().NoError(json.Unmarshal([]byte(fromJSON), &fromTypedData)) s.Require().NoError(json.Unmarshal([]byte(fromJSON), &fromTypedData))
actual, err := s.typedDataV4.HashStruct("Person", fromTypedData.Message) actual, err := s.typedDataV4.HashStruct("Person", fromTypedData.Message)

View File

@ -9,7 +9,7 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
signercore "github.com/ethereum/go-ethereum/signer/core" signercore "github.com/ethereum/go-ethereum/signer/core/apitypes"
) )
var ( var (

View File

@ -121,7 +121,7 @@ func (h *HopBridge) Can(from, to *params.Network, token *token.Token, balance *b
return true, nil return true, nil
} }
func (s *HopBridge) EstimateGas(from, to *params.Network, token *token.Token, amountIn *big.Int) (uint64, error) { func (h *HopBridge) EstimateGas(from, to *params.Network, token *token.Token, amountIn *big.Int) (uint64, error) {
// TODO: find why this doesn't work // TODO: find why this doesn't work
// ethClient, err := s.contractMaker.RPCClient.EthClient(from.ChainID) // ethClient, err := s.contractMaker.RPCClient.EthClient(from.ChainID)
// if err != nil { // if err != nil {
@ -158,8 +158,8 @@ func (s *HopBridge) EstimateGas(from, to *params.Network, token *token.Token, am
return 500000 + 1000, nil return 500000 + 1000, nil
} }
func (s *HopBridge) Send(sendArgs *TransactionBridge, verifiedAccount *account.SelectedExtKey) (hash types.Hash, err error) { func (h *HopBridge) Send(sendArgs *TransactionBridge, verifiedAccount *account.SelectedExtKey) (hash types.Hash, err error) {
networks, err := s.contractMaker.RPCClient.NetworkManager.Get(false) networks, err := h.contractMaker.RPCClient.NetworkManager.Get(false)
if err != nil { if err != nil {
return hash, err return hash, err
} }
@ -172,9 +172,9 @@ func (s *HopBridge) Send(sendArgs *TransactionBridge, verifiedAccount *account.S
} }
if fromNetwork.Layer == 1 { if fromNetwork.Layer == 1 {
return s.sendToL2(sendArgs.ChainID, sendArgs.HopTx, verifiedAccount) return h.sendToL2(sendArgs.ChainID, sendArgs.HopTx, verifiedAccount)
} }
return s.swapAndSend(sendArgs.ChainID, sendArgs.HopTx, verifiedAccount) return h.swapAndSend(sendArgs.ChainID, sendArgs.HopTx, verifiedAccount)
} }
func getSigner(chainID uint64, from types.Address, verifiedAccount *account.SelectedExtKey) bind.SignerFn { func getSigner(chainID uint64, from types.Address, verifiedAccount *account.SelectedExtKey) bind.SignerFn {
@ -184,8 +184,8 @@ func getSigner(chainID uint64, from types.Address, verifiedAccount *account.Sele
} }
} }
func (s *HopBridge) sendToL2(chainID uint64, sendArgs *HopTxArgs, verifiedAccount *account.SelectedExtKey) (hash types.Hash, err error) { func (h *HopBridge) sendToL2(chainID uint64, sendArgs *HopTxArgs, verifiedAccount *account.SelectedExtKey) (hash types.Hash, err error) {
bridge, err := s.contractMaker.NewHopL1Bridge(chainID, sendArgs.Symbol) bridge, err := h.contractMaker.NewHopL1Bridge(chainID, sendArgs.Symbol)
if err != nil { if err != nil {
return hash, err return hash, err
} }
@ -210,8 +210,8 @@ func (s *HopBridge) sendToL2(chainID uint64, sendArgs *HopTxArgs, verifiedAccoun
return types.Hash(tx.Hash()), nil return types.Hash(tx.Hash()), nil
} }
func (s *HopBridge) swapAndSend(chainID uint64, sendArgs *HopTxArgs, verifiedAccount *account.SelectedExtKey) (hash types.Hash, err error) { func (h *HopBridge) swapAndSend(chainID uint64, sendArgs *HopTxArgs, verifiedAccount *account.SelectedExtKey) (hash types.Hash, err error) {
ammWrapper, err := s.contractMaker.NewHopL2AmmWrapper(chainID, sendArgs.Symbol) ammWrapper, err := h.contractMaker.NewHopL2AmmWrapper(chainID, sendArgs.Symbol)
if err != nil { if err != nil {
return hash, err return hash, err
} }

View File

@ -51,7 +51,7 @@ func (s SendType) EstimateGas(service *Service, network *params.Network) uint64
from := types.Address(common.HexToAddress("0x5ffa75ce51c3a7ebe23bde37b5e3a0143dfbcee0")) from := types.Address(common.HexToAddress("0x5ffa75ce51c3a7ebe23bde37b5e3a0143dfbcee0"))
tx := transactions.SendTxArgs{ tx := transactions.SendTxArgs{
From: from, From: from,
Value: (*hexutil.Big)(big.NewInt(0)), Value: (*hexutil.Big)(zero),
} }
if s == ENSRegister { if s == ENSRegister {
estimate, err := service.ens.API().RegisterEstimate(context.Background(), network.ChainID, tx, EstimateUsername, EstimatePubKey) estimate, err := service.ens.API().RegisterEstimate(context.Background(), network.ChainID, tx, EstimateUsername, EstimatePubKey)
@ -78,8 +78,8 @@ func (s SendType) EstimateGas(service *Service, network *params.Network) uint64
} }
if s == StickersBuy { if s == StickersBuy {
packId := &bigint.BigInt{Int: big.NewInt(2)} packID := &bigint.BigInt{Int: big.NewInt(2)}
estimate, err := service.stickers.API().BuyEstimate(context.Background(), network.ChainID, from, packId) estimate, err := service.stickers.API().BuyEstimate(context.Background(), network.ChainID, from, packID)
if err != nil { if err != nil {
return 400000 return 400000
} }
@ -210,7 +210,7 @@ func newSuggestedRoutes(amountIn *big.Int, candidates []*Path) *SuggestedRoutes
rest := new(big.Int).Set(amountIn) rest := new(big.Int).Set(amountIn)
for _, path := range best { for _, path := range best {
diff := new(big.Int).Sub(rest, path.MaxAmountIn.ToInt()) diff := new(big.Int).Sub(rest, path.MaxAmountIn.ToInt())
if diff.Cmp(big.NewInt(0)) >= 0 { if diff.Cmp(zero) >= 0 {
path.AmountIn = path.MaxAmountIn path.AmountIn = path.MaxAmountIn
} else { } else {
path.AmountIn = (*hexutil.Big)(new(big.Int).Set(rest)) path.AmountIn = (*hexutil.Big)(new(big.Int).Set(rest))
@ -235,7 +235,7 @@ func NewRouter(s *Service) *Router {
return &Router{s, bridges} return &Router{s, bridges}
} }
func containsNetworkChainId(network *params.Network, chainIDs []uint64) bool { func containsNetworkChainID(network *params.Network, chainIDs []uint64) bool {
for _, chainID := range chainIDs { for _, chainID := range chainIDs {
if chainID == network.ChainID { if chainID == network.ChainID {
return true return true
@ -298,7 +298,7 @@ func (r *Router) suggestedRoutes(ctx context.Context, sendType SendType, account
continue continue
} }
if containsNetworkChainId(network, disabledFromChainIDs) { if containsNetworkChainID(network, disabledFromChainIDs) {
continue continue
} }
@ -345,11 +345,11 @@ func (r *Router) suggestedRoutes(ctx context.Context, sendType SendType, account
continue continue
} }
if len(preferedChainIDs) > 0 && !containsNetworkChainId(network, preferedChainIDs) { if len(preferedChainIDs) > 0 && !containsNetworkChainID(network, preferedChainIDs) {
continue continue
} }
if containsNetworkChainId(dest, disabledToChaindIDs) { if containsNetworkChainID(dest, disabledToChaindIDs) {
continue continue
} }
@ -378,7 +378,7 @@ func (r *Router) suggestedRoutes(ctx context.Context, sendType SendType, account
continue continue
} }
preferred := containsNetworkChainId(dest, preferedChainIDs) preferred := containsNetworkChainID(dest, preferedChainIDs)
gasCost := new(big.Float) gasCost := new(big.Float)
gasCost.Mul( gasCost.Mul(
@ -400,8 +400,8 @@ func (r *Router) suggestedRoutes(ctx context.Context, sendType SendType, account
From: network, From: network,
To: dest, To: dest,
MaxAmountIn: (*hexutil.Big)(balance), MaxAmountIn: (*hexutil.Big)(balance),
AmountIn: (*hexutil.Big)(big.NewInt(0)), AmountIn: (*hexutil.Big)(zero),
AmountOut: (*hexutil.Big)(big.NewInt(0)), AmountOut: (*hexutil.Big)(zero),
GasAmount: gasLimit, GasAmount: gasLimit,
GasFees: gasFees, GasFees: gasFees,
BonderFees: (*hexutil.Big)(bonderFees), BonderFees: (*hexutil.Big)(bonderFees),

View File

@ -28,7 +28,7 @@ func TestCustoms(t *testing.T) {
manager, stop := setupTestTokenDB(t) manager, stop := setupTestTokenDB(t)
defer stop() defer stop()
rst, err := manager.getCustoms() rst, err := manager.GetCustoms()
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, rst) require.Nil(t, rst)
@ -41,18 +41,18 @@ func TestCustoms(t *testing.T) {
ChainID: 777, ChainID: 777,
} }
err = manager.upsertCustom(token) err = manager.UpsertCustom(token)
require.NoError(t, err) require.NoError(t, err)
rst, err = manager.getCustoms() rst, err = manager.GetCustoms()
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 1, len(rst)) require.Equal(t, 1, len(rst))
require.Equal(t, token, *rst[0]) require.Equal(t, token, *rst[0])
err = manager.deleteCustom(777, token.Address) err = manager.DeleteCustom(777, token.Address)
require.NoError(t, err) require.NoError(t, err)
rst, err = manager.getCustoms() rst, err = manager.GetCustoms()
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 0, len(rst)) require.Equal(t, 0, len(rst))
} }

View File

@ -6,7 +6,7 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
signercore "github.com/ethereum/go-ethereum/signer/core" signercore "github.com/ethereum/go-ethereum/signer/core/apitypes"
"github.com/status-im/status-go/account" "github.com/status-im/status-go/account"
"github.com/status-im/status-go/eth-node/types" "github.com/status-im/status-go/eth-node/types"
"github.com/status-im/status-go/services/typeddata" "github.com/status-im/status-go/services/typeddata"

View File

@ -6,7 +6,7 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
signercore "github.com/ethereum/go-ethereum/signer/core" signercore "github.com/ethereum/go-ethereum/signer/core/apitypes"
"github.com/status-im/status-go/eth-node/crypto" "github.com/status-im/status-go/eth-node/crypto"
"github.com/status-im/status-go/eth-node/types" "github.com/status-im/status-go/eth-node/types"
"github.com/status-im/status-go/services/typeddata" "github.com/status-im/status-go/services/typeddata"

21
vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Brian Goff
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,14 @@
package md2man
import (
"github.com/russross/blackfriday/v2"
)
// Render converts a markdown document into a roff formatted document.
func Render(doc []byte) []byte {
renderer := NewRoffRenderer()
return blackfriday.Run(doc,
[]blackfriday.Option{blackfriday.WithRenderer(renderer),
blackfriday.WithExtensions(renderer.GetExtensions())}...)
}

336
vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go generated vendored Normal file
View File

@ -0,0 +1,336 @@
package md2man
import (
"fmt"
"io"
"os"
"strings"
"github.com/russross/blackfriday/v2"
)
// roffRenderer implements the blackfriday.Renderer interface for creating
// roff format (manpages) from markdown text
type roffRenderer struct {
extensions blackfriday.Extensions
listCounters []int
firstHeader bool
firstDD bool
listDepth int
}
const (
titleHeader = ".TH "
topLevelHeader = "\n\n.SH "
secondLevelHdr = "\n.SH "
otherHeader = "\n.SS "
crTag = "\n"
emphTag = "\\fI"
emphCloseTag = "\\fP"
strongTag = "\\fB"
strongCloseTag = "\\fP"
breakTag = "\n.br\n"
paraTag = "\n.PP\n"
hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n"
linkTag = "\n\\[la]"
linkCloseTag = "\\[ra]"
codespanTag = "\\fB\\fC"
codespanCloseTag = "\\fR"
codeTag = "\n.PP\n.RS\n\n.nf\n"
codeCloseTag = "\n.fi\n.RE\n"
quoteTag = "\n.PP\n.RS\n"
quoteCloseTag = "\n.RE\n"
listTag = "\n.RS\n"
listCloseTag = "\n.RE\n"
dtTag = "\n.TP\n"
dd2Tag = "\n"
tableStart = "\n.TS\nallbox;\n"
tableEnd = ".TE\n"
tableCellStart = "T{\n"
tableCellEnd = "\nT}\n"
)
// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents
// from markdown
func NewRoffRenderer() *roffRenderer { // nolint: golint
var extensions blackfriday.Extensions
extensions |= blackfriday.NoIntraEmphasis
extensions |= blackfriday.Tables
extensions |= blackfriday.FencedCode
extensions |= blackfriday.SpaceHeadings
extensions |= blackfriday.Footnotes
extensions |= blackfriday.Titleblock
extensions |= blackfriday.DefinitionLists
return &roffRenderer{
extensions: extensions,
}
}
// GetExtensions returns the list of extensions used by this renderer implementation
func (r *roffRenderer) GetExtensions() blackfriday.Extensions {
return r.extensions
}
// RenderHeader handles outputting the header at document start
func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) {
// disable hyphenation
out(w, ".nh\n")
}
// RenderFooter handles outputting the footer at the document end; the roff
// renderer has no footer information
func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) {
}
// RenderNode is called for each node in a markdown document; based on the node
// type the equivalent roff output is sent to the writer
func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
var walkAction = blackfriday.GoToNext
switch node.Type {
case blackfriday.Text:
escapeSpecialChars(w, node.Literal)
case blackfriday.Softbreak:
out(w, crTag)
case blackfriday.Hardbreak:
out(w, breakTag)
case blackfriday.Emph:
if entering {
out(w, emphTag)
} else {
out(w, emphCloseTag)
}
case blackfriday.Strong:
if entering {
out(w, strongTag)
} else {
out(w, strongCloseTag)
}
case blackfriday.Link:
if !entering {
out(w, linkTag+string(node.LinkData.Destination)+linkCloseTag)
}
case blackfriday.Image:
// ignore images
walkAction = blackfriday.SkipChildren
case blackfriday.Code:
out(w, codespanTag)
escapeSpecialChars(w, node.Literal)
out(w, codespanCloseTag)
case blackfriday.Document:
break
case blackfriday.Paragraph:
// roff .PP markers break lists
if r.listDepth > 0 {
return blackfriday.GoToNext
}
if entering {
out(w, paraTag)
} else {
out(w, crTag)
}
case blackfriday.BlockQuote:
if entering {
out(w, quoteTag)
} else {
out(w, quoteCloseTag)
}
case blackfriday.Heading:
r.handleHeading(w, node, entering)
case blackfriday.HorizontalRule:
out(w, hruleTag)
case blackfriday.List:
r.handleList(w, node, entering)
case blackfriday.Item:
r.handleItem(w, node, entering)
case blackfriday.CodeBlock:
out(w, codeTag)
escapeSpecialChars(w, node.Literal)
out(w, codeCloseTag)
case blackfriday.Table:
r.handleTable(w, node, entering)
case blackfriday.TableHead:
case blackfriday.TableBody:
case blackfriday.TableRow:
// no action as cell entries do all the nroff formatting
return blackfriday.GoToNext
case blackfriday.TableCell:
r.handleTableCell(w, node, entering)
case blackfriday.HTMLSpan:
// ignore other HTML tags
default:
fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String())
}
return walkAction
}
func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) {
if entering {
switch node.Level {
case 1:
if !r.firstHeader {
out(w, titleHeader)
r.firstHeader = true
break
}
out(w, topLevelHeader)
case 2:
out(w, secondLevelHdr)
default:
out(w, otherHeader)
}
}
}
func (r *roffRenderer) handleList(w io.Writer, node *blackfriday.Node, entering bool) {
openTag := listTag
closeTag := listCloseTag
if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
// tags for definition lists handled within Item node
openTag = ""
closeTag = ""
}
if entering {
r.listDepth++
if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
r.listCounters = append(r.listCounters, 1)
}
out(w, openTag)
} else {
if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
r.listCounters = r.listCounters[:len(r.listCounters)-1]
}
out(w, closeTag)
r.listDepth--
}
}
func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering bool) {
if entering {
if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1]))
r.listCounters[len(r.listCounters)-1]++
} else if node.ListFlags&blackfriday.ListTypeTerm != 0 {
// DT (definition term): line just before DD (see below).
out(w, dtTag)
r.firstDD = true
} else if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
// DD (definition description): line that starts with ": ".
//
// We have to distinguish between the first DD and the
// subsequent ones, as there should be no vertical
// whitespace between the DT and the first DD.
if r.firstDD {
r.firstDD = false
} else {
out(w, dd2Tag)
}
} else {
out(w, ".IP \\(bu 2\n")
}
} else {
out(w, "\n")
}
}
func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) {
if entering {
out(w, tableStart)
// call walker to count cells (and rows?) so format section can be produced
columns := countColumns(node)
out(w, strings.Repeat("l ", columns)+"\n")
out(w, strings.Repeat("l ", columns)+".\n")
} else {
out(w, tableEnd)
}
}
func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) {
if entering {
var start string
if node.Prev != nil && node.Prev.Type == blackfriday.TableCell {
start = "\t"
}
if node.IsHeader {
start += codespanTag
} else if nodeLiteralSize(node) > 30 {
start += tableCellStart
}
out(w, start)
} else {
var end string
if node.IsHeader {
end = codespanCloseTag
} else if nodeLiteralSize(node) > 30 {
end = tableCellEnd
}
if node.Next == nil && end != tableCellEnd {
// Last cell: need to carriage return if we are at the end of the
// header row and content isn't wrapped in a "tablecell"
end += crTag
}
out(w, end)
}
}
func nodeLiteralSize(node *blackfriday.Node) int {
total := 0
for n := node.FirstChild; n != nil; n = n.FirstChild {
total += len(n.Literal)
}
return total
}
// because roff format requires knowing the column count before outputting any table
// data we need to walk a table tree and count the columns
func countColumns(node *blackfriday.Node) int {
var columns int
node.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
switch node.Type {
case blackfriday.TableRow:
if !entering {
return blackfriday.Terminate
}
case blackfriday.TableCell:
if entering {
columns++
}
default:
}
return blackfriday.GoToNext
})
return columns
}
func out(w io.Writer, output string) {
io.WriteString(w, output) // nolint: errcheck
}
func escapeSpecialChars(w io.Writer, text []byte) {
for i := 0; i < len(text); i++ {
// escape initial apostrophe or period
if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') {
out(w, "\\&")
}
// directly copy normal characters
org := i
for i < len(text) && text[i] != '\\' {
i++
}
if i > org {
w.Write(text[org:i]) // nolint: errcheck
}
// escape a character
if i >= len(text) {
break
}
w.Write([]byte{'\\', text[i]}) // nolint: errcheck
}
}

View File

@ -1,3 +1,8 @@
[submodule "tests"] [submodule "tests"]
path = tests/testdata path = tests/testdata
url = https://github.com/ethereum/tests url = https://github.com/ethereum/tests
shallow = true
[submodule "evm-benchmarks"]
path = tests/evm-benchmarks
url = https://github.com/ipsilon/evm-benchmarks
shallow = true

View File

@ -1,7 +1,7 @@
# This file configures github.com/golangci/golangci-lint. # This file configures github.com/golangci/golangci-lint.
run: run:
timeout: 3m timeout: 20m
tests: true tests: true
# default is true. Enables skipping of directories: # default is true. Enables skipping of directories:
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
@ -12,17 +12,29 @@ run:
linters: linters:
disable-all: true disable-all: true
enable: enable:
- deadcode
- goconst - goconst
- goimports - goimports
- gosimple - gosimple
- govet - govet
- ineffassign - ineffassign
- misspell - misspell
# - staticcheck
- unconvert - unconvert
# - unused - typecheck
- varcheck - unused
- staticcheck
- bidichk
- durationcheck
- exportloopref
- whitespace
# - structcheck # lots of false positives
# - errcheck #lot of false positives
# - contextcheck
# - errchkjson # lots of false positives
# - errorlint # this check crashes
# - exhaustive # silly check
# - makezero # false positives
# - nilerr # several intentional
linters-settings: linters-settings:
gofmt: gofmt:
@ -33,18 +45,20 @@ linters-settings:
issues: issues:
exclude-rules: exclude-rules:
- path: crypto/blake2b/ - path: crypto/bn256/cloudflare/optate.go
linters:
- deadcode
- path: crypto/bn256/cloudflare
linters:
- deadcode
- path: p2p/discv5/
linters:
- deadcode
- path: core/vm/instructions_test.go
linters:
- goconst
- path: cmd/faucet/
linters: linters:
- deadcode - deadcode
- staticcheck
- path: internal/build/pgp.go
text: 'SA1019: "golang.org/x/crypto/openpgp" is deprecated: this package is unmaintained except for security fixes.'
- path: core/vm/contracts.go
text: 'SA1019: "golang.org/x/crypto/ripemd160" is deprecated: RIPEMD-160 is a legacy hash and should not be used for new applications.'
- path: accounts/usbwallet/trezor.go
text: 'SA1019: "github.com/golang/protobuf/proto" is deprecated: Use the "google.golang.org/protobuf/proto" package instead.'
- path: accounts/usbwallet/trezor/
text: 'SA1019: "github.com/golang/protobuf/proto" is deprecated: Use the "google.golang.org/protobuf/proto" package instead.'
exclude:
- 'SA1019: event.TypeMux is deprecated: use Feed'
- 'SA1019: strings.Title is deprecated'
- 'SA1019: strings.Title has been deprecated since Go 1.18 and an alternative has been available since Go 1.0: The rule Title uses for word boundaries does not handle Unicode punctuation properly. Use golang.org/x/text/cases instead.'
- 'SA1029: should not use built-in type string as key for value'

View File

@ -1,123 +1,237 @@
Jeffrey Wilcke <jeffrey@ethereum.org> Aaron Buchwald <aaron.buchwald56@gmail.com>
Jeffrey Wilcke <jeffrey@ethereum.org> <geffobscura@gmail.com>
Jeffrey Wilcke <jeffrey@ethereum.org> <obscuren@obscura.com>
Jeffrey Wilcke <jeffrey@ethereum.org> <obscuren@users.noreply.github.com>
Viktor Trón <viktor.tron@gmail.com> Aaron Kumavis <kumavis@users.noreply.github.com>
Joseph Goulden <joegoulden@gmail.com> Abel Nieto <abel.nieto90@gmail.com>
Abel Nieto <abel.nieto90@gmail.com> <anietoro@uwaterloo.ca>
Nick Savers <nicksavers@gmail.com> Afri Schoedon <58883403+q9f@users.noreply.github.com>
Afri Schoedon <5chdn@users.noreply.github.com> <58883403+q9f@users.noreply.github.com>
Maran Hidskes <maran.hidskes@gmail.com> Alec Perseghin <aperseghin@gmail.com>
Taylor Gerring <taylor.gerring@gmail.com> Aleksey Smyrnov <i@soar.name>
Taylor Gerring <taylor.gerring@gmail.com> <taylor.gerring@ethereum.org>
Alex Leverington <alex@ethdev.com>
Alex Leverington <alex@ethdev.com> <subtly@users.noreply.github.com>
Alex Pozhilenkov <alex_pozhilenkov@adoriasoft.com>
Alex Pozhilenkov <alex_pozhilenkov@adoriasoft.com> <leshiy12345678@gmail.com>
Alexey Akhunov <akhounov@gmail.com>
Alon Muroch <alonmuroch@gmail.com>
Andrey Petrov <shazow@gmail.com>
Andrey Petrov <shazow@gmail.com> <andrey.petrov@shazow.net>
Arkadiy Paronyan <arkadiy@ethdev.com>
Armin Braun <me@obrown.io>
Aron Fischer <github@aron.guru> <homotopycolimit@users.noreply.github.com>
Austin Roberts <code@ausiv.com>
Austin Roberts <code@ausiv.com> <git@ausiv.com>
Bas van Kervel <bas@ethdev.com> Bas van Kervel <bas@ethdev.com>
Bas van Kervel <bas@ethdev.com> <basvankervel@ziggo.nl> Bas van Kervel <bas@ethdev.com> <basvankervel@ziggo.nl>
Bas van Kervel <bas@ethdev.com> <basvankervel@gmail.com> Bas van Kervel <bas@ethdev.com> <basvankervel@gmail.com>
Bas van Kervel <bas@ethdev.com> <bas-vk@users.noreply.github.com> Bas van Kervel <bas@ethdev.com> <bas-vk@users.noreply.github.com>
Sven Ehlert <sven@ethdev.com> Boqin Qin <bobbqqin@bupt.edu.cn>
Boqin Qin <bobbqqin@bupt.edu.cn> <Bobbqqin@gmail.com>
Vitalik Buterin <v@buterin.com>
Marian Oancea <contact@siteshop.ro>
Christoph Jentzsch <jentzsch.software@gmail.com>
Heiko Hees <heiko@heiko.org>
Alex Leverington <alex@ethdev.com>
Alex Leverington <alex@ethdev.com> <subtly@users.noreply.github.com>
Zsolt Felföldi <zsfelfoldi@gmail.com>
Gavin Wood <i@gavwood.com>
Martin Becze <mjbecze@gmail.com>
Martin Becze <mjbecze@gmail.com> <wanderer@users.noreply.github.com>
Dimitry Khokhlov <winsvega@mail.ru>
Roman Mandeleil <roman.mandeleil@gmail.com>
Alec Perseghin <aperseghin@gmail.com>
Alon Muroch <alonmuroch@gmail.com>
Arkadiy Paronyan <arkadiy@ethdev.com>
Jae Kwon <jkwon.work@gmail.com>
Aaron Kumavis <kumavis@users.noreply.github.com>
Nick Dodson <silentcicero@outlook.com>
Jason Carver <jacarver@linkedin.com>
Jason Carver <jacarver@linkedin.com> <ut96caarrs@snkmail.com>
Joseph Chow <ethereum@outlook.com>
Joseph Chow <ethereum@outlook.com> ethers <TODO>
Enrique Fynn <enriquefynn@gmail.com>
Vincent G <caktux@gmail.com>
RJ Catalano <catalanor0220@gmail.com>
RJ Catalano <catalanor0220@gmail.com> <rj@erisindustries.com>
Nchinda Nchinda <nchinda2@gmail.com>
Aron Fischer <github@aron.guru> <homotopycolimit@users.noreply.github.com>
Vlad Gluhovsky <gluk256@users.noreply.github.com>
Ville Sundell <github@solarius.fi>
Elliot Shepherd <elliot@identitii.com>
Yohann Léon <sybiload@gmail.com>
Gregg Dourgarian <greggd@tempworks.com>
Casey Detrio <cdetrio@gmail.com> Casey Detrio <cdetrio@gmail.com>
Jens Agerberg <github@agerberg.me> Cheng Li <lob4tt@gmail.com>
Nick Johnson <arachnid@notdot.net> Chris Ziogas <ziogaschr@gmail.com>
Chris Ziogas <ziogaschr@gmail.com> <ziogas_chr@hotmail.com>
Henning Diedrich <hd@eonblast.com> Christoph Jentzsch <jentzsch.software@gmail.com>
Henning Diedrich <hd@eonblast.com> Drake Burroughs <wildfyre@hotmail.com>
Diederik Loerakker <proto@protolambda.com>
Dimitry Khokhlov <winsvega@mail.ru>
Domino Valdano <dominoplural@gmail.com>
Domino Valdano <dominoplural@gmail.com> <jeff@okcupid.com>
Edgar Aroutiounian <edgar.factorial@gmail.com>
Elliot Shepherd <elliot@identitii.com>
Enrique Fynn <enriquefynn@gmail.com>
Enrique Fynn <me@enriquefynn.com>
Enrique Fynn <me@enriquefynn.com> <enriquefynn@gmail.com>
Ernesto del Toro <ernesto.deltoro@gmail.com>
Ernesto del Toro <ernesto.deltoro@gmail.com> <ernestodeltoro@users.noreply.github.com>
Everton Fraga <ev@ethereum.org>
Felix Lange <fjl@twurst.com> Felix Lange <fjl@twurst.com>
Felix Lange <fjl@twurst.com> <fjl@users.noreply.github.com> Felix Lange <fjl@twurst.com> <fjl@users.noreply.github.com>
Максим Чусовлянов <mchusovlianov@gmail.com>
Louis Holbrook <dev@holbrook.no>
Louis Holbrook <dev@holbrook.no> <nolash@users.noreply.github.com>
Thomas Bocek <tom@tomp2p.net>
Victor Tran <vu.tran54@gmail.com>
Justin Drake <drakefjustin@gmail.com>
Frank Wang <eternnoir@gmail.com> Frank Wang <eternnoir@gmail.com>
Gary Rong <garyrong0905@gmail.com> Gary Rong <garyrong0905@gmail.com>
Gavin Wood <i@gavwood.com>
Gregg Dourgarian <greggd@tempworks.com>
Guillaume Ballet <gballet@gmail.com>
Guillaume Ballet <gballet@gmail.com> <3272758+gballet@users.noreply.github.com>
Guillaume Nicolas <guin56@gmail.com> Guillaume Nicolas <guin56@gmail.com>
Hanjiang Yu <delacroix.yu@gmail.com>
Hanjiang Yu <delacroix.yu@gmail.com> <42531996+de1acr0ix@users.noreply.github.com>
Heiko Hees <heiko@heiko.org>
Henning Diedrich <hd@eonblast.com>
Henning Diedrich <hd@eonblast.com> Drake Burroughs <wildfyre@hotmail.com>
Hwanjo Heo <34005989+hwanjo@users.noreply.github.com>
Iskander (Alex) Sharipov <quasilyte@gmail.com>
Iskander (Alex) Sharipov <quasilyte@gmail.com> <i.sharipov@corp.vk.com>
Jae Kwon <jkwon.work@gmail.com>
Janoš Guljaš <janos@resenje.org> <janos@users.noreply.github.com>
Janoš Guljaš <janos@resenje.org> Janos Guljas <janos@resenje.org>
Jared Wasinger <j-wasinger@hotmail.com>
Jason Carver <jacarver@linkedin.com>
Jason Carver <jacarver@linkedin.com> <ut96caarrs@snkmail.com>
Javier Peletier <jm@epiclabs.io>
Javier Peletier <jm@epiclabs.io> <jpeletier@users.noreply.github.com>
Jeffrey Wilcke <jeffrey@ethereum.org>
Jeffrey Wilcke <jeffrey@ethereum.org> <geffobscura@gmail.com>
Jeffrey Wilcke <jeffrey@ethereum.org> <obscuren@obscura.com>
Jeffrey Wilcke <jeffrey@ethereum.org> <obscuren@users.noreply.github.com>
Jens Agerberg <github@agerberg.me>
Joseph Chow <ethereum@outlook.com>
Joseph Chow <ethereum@outlook.com> ethers <TODO>
Joseph Goulden <joegoulden@gmail.com>
Justin Drake <drakefjustin@gmail.com>
Kenso Trabing <ktrabing@acm.org>
Kenso Trabing <ktrabing@acm.org> <kenso.trabing@bloomwebsite.com>
Liang Ma <liangma@liangbit.com>
Liang Ma <liangma@liangbit.com> <liangma.ul@gmail.com>
Louis Holbrook <dev@holbrook.no>
Louis Holbrook <dev@holbrook.no> <nolash@users.noreply.github.com>
Maran Hidskes <maran.hidskes@gmail.com>
Marian Oancea <contact@siteshop.ro>
Martin Becze <mjbecze@gmail.com>
Martin Becze <mjbecze@gmail.com> <wanderer@users.noreply.github.com>
Martin Lundfall <martin.lundfall@protonmail.com>
Matt Garnett <14004106+lightclient@users.noreply.github.com>
Matthew Halpern <matthalp@gmail.com>
Matthew Halpern <matthalp@gmail.com> <matthalp@google.com>
Michael Riabzev <michael@starkware.co>
Nchinda Nchinda <nchinda2@gmail.com>
Nick Dodson <silentcicero@outlook.com>
Nick Johnson <arachnid@notdot.net>
Nick Savers <nicksavers@gmail.com>
Nishant Das <nishdas93@gmail.com>
Nishant Das <nishdas93@gmail.com> <nish1993@hotmail.com>
Olivier Hervieu <olivier.hervieu@gmail.com>
Pascal Dierich <pascal@merkleplant.xyz>
Pascal Dierich <pascal@merkleplant.xyz> <pascal@pascaldierich.com>
RJ Catalano <catalanor0220@gmail.com>
RJ Catalano <catalanor0220@gmail.com> <rj@erisindustries.com>
Ralph Caraveo <deckarep@gmail.com>
Rene Lubov <41963722+renaynay@users.noreply.github.com>
Robert Zaremba <robert@zaremba.ch>
Robert Zaremba <robert@zaremba.ch> <robert.zaremba@scale-it.pl>
Roman Mandeleil <roman.mandeleil@gmail.com>
Sorin Neacsu <sorin.neacsu@gmail.com> Sorin Neacsu <sorin.neacsu@gmail.com>
Sorin Neacsu <sorin.neacsu@gmail.com> <sorin@users.noreply.github.com> Sorin Neacsu <sorin.neacsu@gmail.com> <sorin@users.noreply.github.com>
Sven Ehlert <sven@ethdev.com>
Taylor Gerring <taylor.gerring@gmail.com>
Taylor Gerring <taylor.gerring@gmail.com> <taylor.gerring@ethereum.org>
Thomas Bocek <tom@tomp2p.net>
Tim Cooijmans <timcooijmans@gmail.com>
Valentin Wüstholz <wuestholz@gmail.com> Valentin Wüstholz <wuestholz@gmail.com>
Valentin Wüstholz <wuestholz@gmail.com> <wuestholz@users.noreply.github.com> Valentin Wüstholz <wuestholz@gmail.com> <wuestholz@users.noreply.github.com>
Armin Braun <me@obrown.io> Victor Tran <vu.tran54@gmail.com>
Ernesto del Toro <ernesto.deltoro@gmail.com> Viktor Trón <viktor.tron@gmail.com>
Ernesto del Toro <ernesto.deltoro@gmail.com> <ernestodeltoro@users.noreply.github.com>
Ville Sundell <github@solarius.fi>
Vincent G <caktux@gmail.com>
Vitalik Buterin <v@buterin.com>
Vlad Gluhovsky <gluk256@gmail.com>
Vlad Gluhovsky <gluk256@gmail.com> <gluk256@users.noreply.github.com>
Wenshao Zhong <wzhong20@uic.edu>
Wenshao Zhong <wzhong20@uic.edu> <11510383@mail.sustc.edu.cn>
Wenshao Zhong <wzhong20@uic.edu> <374662347@qq.com>
Will Villanueva <hello@willvillanueva.com>
Xiaobing Jiang <s7v7nislands@gmail.com>
Xudong Liu <33193253+r1cs@users.noreply.github.com>
Yohann Léon <sybiload@gmail.com>
Zachinquarantine <Zachinquarantine@protonmail.com>
Zachinquarantine <Zachinquarantine@protonmail.com> <zachinquarantine@yahoo.com>
Ziyuan Zhong <zzy.albert@163.com>
Zsolt Felföldi <zsfelfoldi@gmail.com>
meowsbits <b5c6@protonmail.com>
meowsbits <b5c6@protonmail.com> <45600330+meowsbits@users.noreply.github.com>
nedifi <103940716+nedifi@users.noreply.github.com>
Максим Чусовлянов <mchusovlianov@gmail.com>

View File

@ -5,7 +5,7 @@ jobs:
allow_failures: allow_failures:
- stage: build - stage: build
os: osx os: osx
go: 1.15.x go: 1.17.x
env: env:
- azure-osx - azure-osx
- azure-ios - azure-ios
@ -16,7 +16,7 @@ jobs:
- stage: lint - stage: lint
os: linux os: linux
dist: bionic dist: bionic
go: 1.16.x go: 1.19.x
env: env:
- lint - lint
git: git:
@ -31,7 +31,7 @@ jobs:
os: linux os: linux
arch: amd64 arch: amd64
dist: bionic dist: bionic
go: 1.16.x go: 1.19.x
env: env:
- docker - docker
services: services:
@ -41,14 +41,14 @@ jobs:
before_install: before_install:
- export DOCKER_CLI_EXPERIMENTAL=enabled - export DOCKER_CLI_EXPERIMENTAL=enabled
script: script:
- go run build/ci.go docker -image -manifest amd64,arm64 -upload karalabe/geth-docker-test - go run build/ci.go docker -image -manifest amd64,arm64 -upload ethereum/client-go
- stage: build - stage: build
if: type = push if: type = push
os: linux os: linux
arch: arm64 arch: arm64
dist: bionic dist: bionic
go: 1.16.x go: 1.19.x
env: env:
- docker - docker
services: services:
@ -58,14 +58,14 @@ jobs:
before_install: before_install:
- export DOCKER_CLI_EXPERIMENTAL=enabled - export DOCKER_CLI_EXPERIMENTAL=enabled
script: script:
- go run build/ci.go docker -image -manifest amd64,arm64 -upload karalabe/geth-docker-test - go run build/ci.go docker -image -manifest amd64,arm64 -upload ethereum/client-go
# This builder does the Ubuntu PPA upload # This builder does the Ubuntu PPA upload
- stage: build - stage: build
if: type = push if: type = push
os: linux os: linux
dist: bionic dist: bionic
go: 1.16.x go: 1.19.x
env: env:
- ubuntu-ppa - ubuntu-ppa
- GO111MODULE=on - GO111MODULE=on
@ -90,7 +90,7 @@ jobs:
os: linux os: linux
dist: bionic dist: bionic
sudo: required sudo: required
go: 1.16.x go: 1.19.x
env: env:
- azure-linux - azure-linux
- GO111MODULE=on - GO111MODULE=on
@ -120,36 +120,6 @@ jobs:
- go run build/ci.go install -dlgo -arch arm64 -cc aarch64-linux-gnu-gcc - go run build/ci.go install -dlgo -arch arm64 -cc aarch64-linux-gnu-gcc
- go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
# This builder does the Linux Azure MIPS xgo uploads
- stage: build
if: type = push
os: linux
dist: bionic
services:
- docker
go: 1.16.x
env:
- azure-linux-mips
- GO111MODULE=on
git:
submodules: false # avoid cloning ethereum/tests
script:
- go run build/ci.go xgo --alltools -- --targets=linux/mips --ldflags '-extldflags "-static"' -v
- for bin in build/bin/*-linux-mips; do mv -f "${bin}" "${bin/-linux-mips/}"; done
- go run build/ci.go archive -arch mips -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
- go run build/ci.go xgo --alltools -- --targets=linux/mipsle --ldflags '-extldflags "-static"' -v
- for bin in build/bin/*-linux-mipsle; do mv -f "${bin}" "${bin/-linux-mipsle/}"; done
- go run build/ci.go archive -arch mipsle -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
- go run build/ci.go xgo --alltools -- --targets=linux/mips64 --ldflags '-extldflags "-static"' -v
- for bin in build/bin/*-linux-mips64; do mv -f "${bin}" "${bin/-linux-mips64/}"; done
- go run build/ci.go archive -arch mips64 -type tar -signer LINUX_SIGNING_KEY signify SIGNIFY_KEY -upload gethstore/builds
- go run build/ci.go xgo --alltools -- --targets=linux/mips64le --ldflags '-extldflags "-static"' -v
- for bin in build/bin/*-linux-mips64le; do mv -f "${bin}" "${bin/-linux-mips64le/}"; done
- go run build/ci.go archive -arch mips64le -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
# This builder does the Android Maven and Azure uploads # This builder does the Android Maven and Azure uploads
- stage: build - stage: build
if: type = push if: type = push
@ -178,7 +148,7 @@ jobs:
- sdkmanager "platform-tools" "platforms;android-15" "platforms;android-19" "platforms;android-24" "ndk-bundle" - sdkmanager "platform-tools" "platforms;android-15" "platforms;android-19" "platforms;android-24" "ndk-bundle"
# Install Go to allow building with # Install Go to allow building with
- curl https://dl.google.com/go/go1.16.linux-amd64.tar.gz | tar -xz - curl https://dl.google.com/go/go1.18.linux-amd64.tar.gz | tar -xz
- export PATH=`pwd`/go/bin:$PATH - export PATH=`pwd`/go/bin:$PATH
- export GOROOT=`pwd`/go - export GOROOT=`pwd`/go
- export GOPATH=$HOME/go - export GOPATH=$HOME/go
@ -192,7 +162,7 @@ jobs:
- stage: build - stage: build
if: type = push if: type = push
os: osx os: osx
go: 1.16.x go: 1.19.x
env: env:
- azure-osx - azure-osx
- azure-ios - azure-ios
@ -224,7 +194,7 @@ jobs:
os: linux os: linux
arch: amd64 arch: amd64
dist: bionic dist: bionic
go: 1.16.x go: 1.19.x
env: env:
- GO111MODULE=on - GO111MODULE=on
script: script:
@ -235,7 +205,7 @@ jobs:
os: linux os: linux
arch: arm64 arch: arm64
dist: bionic dist: bionic
go: 1.16.x go: 1.18.x
env: env:
- GO111MODULE=on - GO111MODULE=on
script: script:
@ -244,7 +214,7 @@ jobs:
- stage: build - stage: build
os: linux os: linux
dist: bionic dist: bionic
go: 1.15.x go: 1.18.x
env: env:
- GO111MODULE=on - GO111MODULE=on
script: script:
@ -255,7 +225,7 @@ jobs:
if: type = cron if: type = cron
os: linux os: linux
dist: bionic dist: bionic
go: 1.16.x go: 1.19.x
env: env:
- azure-purge - azure-purge
- GO111MODULE=on - GO111MODULE=on
@ -263,3 +233,15 @@ jobs:
submodules: false # avoid cloning ethereum/tests submodules: false # avoid cloning ethereum/tests
script: script:
- go run build/ci.go purge -store gethstore/builds -days 14 - go run build/ci.go purge -store gethstore/builds -days 14
# This builder executes race tests
- stage: build
if: type = cron
os: linux
dist: bionic
go: 1.19.x
env:
- GO111MODULE=on
script:
- go run build/ci.go test -race -coverage $TEST_PACKAGES

View File

@ -1,27 +1,46 @@
# This is the official list of go-ethereum authors for copyright purposes. # This is the official list of go-ethereum authors for copyright purposes.
6543 <6543@obermui.de>
a e r t h <aerth@users.noreply.github.com> a e r t h <aerth@users.noreply.github.com>
Aaron Buchwald <aaron.buchwald56@gmail.com>
Abel Nieto <abel.nieto90@gmail.com> Abel Nieto <abel.nieto90@gmail.com>
Abel Nieto <anietoro@uwaterloo.ca>
Adam Babik <a.babik@designfortress.com> Adam Babik <a.babik@designfortress.com>
Adam Schmideg <adamschmideg@users.noreply.github.com>
Aditya <adityasripal@gmail.com> Aditya <adityasripal@gmail.com>
Aditya Arora <arora.aditya520@gmail.com>
Adrià Cidre <adria.cidre@gmail.com> Adrià Cidre <adria.cidre@gmail.com>
Afanasii Kurakin <afanasy@users.noreply.github.com>
Afri Schoedon <5chdn@users.noreply.github.com> Afri Schoedon <5chdn@users.noreply.github.com>
Agustin Armellini Fischer <armellini13@gmail.com> Agustin Armellini Fischer <armellini13@gmail.com>
Ahyun <urbanart2251@gmail.com>
Airead <fgh1987168@gmail.com> Airead <fgh1987168@gmail.com>
Alan Chen <alanchchen@users.noreply.github.com> Alan Chen <alanchchen@users.noreply.github.com>
Alejandro Isaza <alejandro.isaza@gmail.com> Alejandro Isaza <alejandro.isaza@gmail.com>
Aleksey Smyrnov <i@soar.name>
Ales Katona <ales@coinbase.com> Ales Katona <ales@coinbase.com>
Alex Beregszaszi <alex@rtfs.hu>
Alex Leverington <alex@ethdev.com> Alex Leverington <alex@ethdev.com>
Alex Mazalov <mazalov@gmail.com>
Alex Pozhilenkov <alex_pozhilenkov@adoriasoft.com>
Alex Prut <1648497+alexprut@users.noreply.github.com>
Alex Wu <wuyiding@gmail.com> Alex Wu <wuyiding@gmail.com>
Alexander van der Meij <alexandervdm@users.noreply.github.com>
Alexander Yastrebov <yastrebov.alex@gmail.com>
Alexandre Van de Sande <alex.vandesande@ethdev.com> Alexandre Van de Sande <alex.vandesande@ethdev.com>
Alexey Akhunov <akhounov@gmail.com>
Alexey Shekhirin <a.shekhirin@gmail.com>
alexwang <39109351+dipingxian2@users.noreply.github.com>
Ali Atiia <42751398+aliatiia@users.noreply.github.com>
Ali Hajimirza <Ali92hm@users.noreply.github.com> Ali Hajimirza <Ali92hm@users.noreply.github.com>
am2rican5 <am2rican5@gmail.com> am2rican5 <am2rican5@gmail.com>
AmitBRD <60668103+AmitBRD@users.noreply.github.com>
Anatole <62328077+a2br@users.noreply.github.com>
Andrea Franz <andrea@gravityblast.com> Andrea Franz <andrea@gravityblast.com>
Andrey Petrov <andrey.petrov@shazow.net> Andrei Maiboroda <andrei@ethereum.org>
Andrey Petrov <shazow@gmail.com> Andrey Petrov <shazow@gmail.com>
ANOTHEL <anothel1@naver.com> ANOTHEL <anothel1@naver.com>
Antoine Rondelet <rondelet.antoine@gmail.com> Antoine Rondelet <rondelet.antoine@gmail.com>
Antoine Toulme <atoulme@users.noreply.github.com>
Anton Evangelatov <anton.evangelatov@gmail.com> Anton Evangelatov <anton.evangelatov@gmail.com>
Antonio Salazar Cardozo <savedfastcool@gmail.com> Antonio Salazar Cardozo <savedfastcool@gmail.com>
Arba Sasmoyo <arba.sasmoyo@gmail.com> Arba Sasmoyo <arba.sasmoyo@gmail.com>
@ -29,19 +48,26 @@ Armani Ferrante <armaniferrante@berkeley.edu>
Armin Braun <me@obrown.io> Armin Braun <me@obrown.io>
Aron Fischer <github@aron.guru> Aron Fischer <github@aron.guru>
atsushi-ishibashi <atsushi.ishibashi@finatext.com> atsushi-ishibashi <atsushi.ishibashi@finatext.com>
Austin Roberts <code@ausiv.com>
ayeowch <ayeowch@gmail.com> ayeowch <ayeowch@gmail.com>
b00ris <b00ris@mail.ru> b00ris <b00ris@mail.ru>
b1ackd0t <blackd0t@protonmail.com>
bailantaotao <Edwin@maicoin.com> bailantaotao <Edwin@maicoin.com>
baizhenxuan <nkbai@163.com> baizhenxuan <nkbai@163.com>
Balaji Shetty Pachai <32358081+balajipachai@users.noreply.github.com>
Balint Gabor <balint.g@gmail.com> Balint Gabor <balint.g@gmail.com>
baptiste-b-pegasys <85155432+baptiste-b-pegasys@users.noreply.github.com>
Bas van Kervel <bas@ethdev.com> Bas van Kervel <bas@ethdev.com>
Benjamin Brent <benjamin@benjaminbrent.com> Benjamin Brent <benjamin@benjaminbrent.com>
benma <mbencun@gmail.com> benma <mbencun@gmail.com>
Benoit Verkindt <benoit.verkindt@gmail.com> Benoit Verkindt <benoit.verkindt@gmail.com>
Binacs <bin646891055@gmail.com>
bloonfield <bloonfield@163.com> bloonfield <bloonfield@163.com>
Bo <bohende@gmail.com> Bo <bohende@gmail.com>
Bo Ye <boy.e.computer.1982@outlook.com> Bo Ye <boy.e.computer.1982@outlook.com>
Bob Glickstein <bobg@users.noreply.github.com> Bob Glickstein <bobg@users.noreply.github.com>
Boqin Qin <bobbqqin@bupt.edu.cn>
Brandon Harden <b.harden92@gmail.com>
Brent <bmperrea@gmail.com> Brent <bmperrea@gmail.com>
Brian Schroeder <bts@gmail.com> Brian Schroeder <bts@gmail.com>
Bruno Škvorc <bruno@skvorc.me> Bruno Škvorc <bruno@skvorc.me>
@ -49,36 +75,58 @@ C. Brown <hackdom@majoolr.io>
Caesar Chad <BLUE.WEB.GEEK@gmail.com> Caesar Chad <BLUE.WEB.GEEK@gmail.com>
Casey Detrio <cdetrio@gmail.com> Casey Detrio <cdetrio@gmail.com>
CDsigma <cdsigma271@gmail.com> CDsigma <cdsigma271@gmail.com>
Ceelog <chenwei@ceelog.org>
Ceyhun Onur <ceyhun.onur@avalabs.org>
chabashilah <doumodoumo@gmail.com>
changhong <changhong.yu@shanbay.com> changhong <changhong.yu@shanbay.com>
Chase Wright <mysticryuujin@gmail.com> Chase Wright <mysticryuujin@gmail.com>
Chen Quan <terasum@163.com> Chen Quan <terasum@163.com>
Cheng Li <lob4tt@gmail.com>
chenglin <910372762@qq.com>
chenyufeng <yufengcode@gmail.com> chenyufeng <yufengcode@gmail.com>
Chris Pacia <ctpacia@gmail.com>
Chris Ziogas <ziogaschr@gmail.com>
Christian Muehlhaeuser <muesli@gmail.com> Christian Muehlhaeuser <muesli@gmail.com>
Christoph Jentzsch <jentzsch.software@gmail.com> Christoph Jentzsch <jentzsch.software@gmail.com>
chuwt <weitaochu@gmail.com>
cong <ackratos@users.noreply.github.com> cong <ackratos@users.noreply.github.com>
Connor Stein <connor.stein@mail.mcgill.ca>
Corey Lin <514971757@qq.com> Corey Lin <514971757@qq.com>
courtier <derinilter@gmail.com>
cpusoft <cpusoft@live.com> cpusoft <cpusoft@live.com>
Crispin Flowerday <crispin@bitso.com> Crispin Flowerday <crispin@bitso.com>
croath <croathliu@gmail.com> croath <croathliu@gmail.com>
cui <523516579@qq.com> cui <523516579@qq.com>
Dan DeGreef <dan.degreef@gmail.com>
Dan Kinsley <dan@joincivil.com> Dan Kinsley <dan@joincivil.com>
Dan Sosedoff <dan.sosedoff@gmail.com>
Daniel A. Nagy <nagy.da@gmail.com> Daniel A. Nagy <nagy.da@gmail.com>
Daniel Perez <daniel@perez.sh>
Daniel Sloof <goapsychadelic@gmail.com> Daniel Sloof <goapsychadelic@gmail.com>
Darioush Jalali <darioush.jalali@avalabs.org>
Darrel Herbst <dherbst@gmail.com> Darrel Herbst <dherbst@gmail.com>
Dave Appleton <calistralabs@gmail.com> Dave Appleton <calistralabs@gmail.com>
Dave McGregor <dave.s.mcgregor@gmail.com> Dave McGregor <dave.s.mcgregor@gmail.com>
David Cai <davidcai1993@yahoo.com>
David Huie <dahuie@gmail.com> David Huie <dahuie@gmail.com>
Denver <aeharvlee@gmail.com>
Derek Chiang <me@derekchiang.com>
Derek Gottfrid <derek@codecubed.com> Derek Gottfrid <derek@codecubed.com>
Di Peng <pendyaaa@gmail.com>
Diederik Loerakker <proto@protolambda.com>
Diego Siqueira <DiSiqueira@users.noreply.github.com> Diego Siqueira <DiSiqueira@users.noreply.github.com>
Diep Pham <mrfavadi@gmail.com> Diep Pham <mrfavadi@gmail.com>
dipingxian2 <39109351+dipingxian2@users.noreply.github.com> dipingxian2 <39109351+dipingxian2@users.noreply.github.com>
divergencetech <94644849+divergencetech@users.noreply.github.com>
dm4 <sunrisedm4@gmail.com> dm4 <sunrisedm4@gmail.com>
Dmitrij Koniajev <dimchansky@gmail.com> Dmitrij Koniajev <dimchansky@gmail.com>
Dmitry Shulyak <yashulyak@gmail.com> Dmitry Shulyak <yashulyak@gmail.com>
Dmitry Zenovich <dzenovich@gmail.com>
Domino Valdano <dominoplural@gmail.com> Domino Valdano <dominoplural@gmail.com>
Domino Valdano <jeff@okcupid.com>
Dragan Milic <dragan@netice9.com> Dragan Milic <dragan@netice9.com>
dragonvslinux <35779158+dragononcrypto@users.noreply.github.com> dragonvslinux <35779158+dragononcrypto@users.noreply.github.com>
Edgar Aroutiounian <edgar.factorial@gmail.com>
Eduard S <eduardsanou@posteo.net>
Egon Elbre <egonelbre@gmail.com> Egon Elbre <egonelbre@gmail.com>
Elad <theman@elad.im> Elad <theman@elad.im>
Eli <elihanover@yahoo.com> Eli <elihanover@yahoo.com>
@ -86,131 +134,189 @@ Elias Naur <elias.naur@gmail.com>
Elliot Shepherd <elliot@identitii.com> Elliot Shepherd <elliot@identitii.com>
Emil <mursalimovemeel@gmail.com> Emil <mursalimovemeel@gmail.com>
emile <emile@users.noreply.github.com> emile <emile@users.noreply.github.com>
Enrique Fynn <enriquefynn@gmail.com> Emmanuel T Odeke <odeke@ualberta.ca>
Eng Zer Jun <engzerjun@gmail.com>
Enrique Fynn <me@enriquefynn.com> Enrique Fynn <me@enriquefynn.com>
Enrique Ortiz <hi@enriqueortiz.dev>
EOS Classic <info@eos-classic.io> EOS Classic <info@eos-classic.io>
Erichin <erichinbato@gmail.com> Erichin <erichinbato@gmail.com>
Ernesto del Toro <ernesto.deltoro@gmail.com> Ernesto del Toro <ernesto.deltoro@gmail.com>
Ethan Buchman <ethan@coinculture.info> Ethan Buchman <ethan@coinculture.info>
ethersphere <thesw@rm.eth> ethersphere <thesw@rm.eth>
Eugene Lepeico <eugenelepeico@gmail.com>
Eugene Valeyev <evgen.povt@gmail.com> Eugene Valeyev <evgen.povt@gmail.com>
Evangelos Pappas <epappas@evalonlabs.com> Evangelos Pappas <epappas@evalonlabs.com>
Everton Fraga <ev@ethereum.org>
Evgeny <awesome.observer@yandex.com> Evgeny <awesome.observer@yandex.com>
Evgeny Danilenko <6655321@bk.ru> Evgeny Danilenko <6655321@bk.ru>
evgk <evgeniy.kamyshev@gmail.com> evgk <evgeniy.kamyshev@gmail.com>
Evolution404 <35091674+Evolution404@users.noreply.github.com>
EXEC <execvy@gmail.com>
Fabian Vogelsteller <fabian@frozeman.de> Fabian Vogelsteller <fabian@frozeman.de>
Fabio Barone <fabio.barone.co@gmail.com> Fabio Barone <fabio.barone.co@gmail.com>
Fabio Berger <fabioberger1991@gmail.com> Fabio Berger <fabioberger1991@gmail.com>
FaceHo <facehoshi@gmail.com> FaceHo <facehoshi@gmail.com>
Felipe Strozberg <48066928+FelStroz@users.noreply.github.com>
Felix Lange <fjl@twurst.com> Felix Lange <fjl@twurst.com>
Ferenc Szabo <frncmx@gmail.com> Ferenc Szabo <frncmx@gmail.com>
ferhat elmas <elmas.ferhat@gmail.com> ferhat elmas <elmas.ferhat@gmail.com>
Ferran Borreguero <ferranbt@protonmail.com>
Fiisio <liangcszzu@163.com> Fiisio <liangcszzu@163.com>
Fire Man <55934298+basdevelop@users.noreply.github.com>
flowerofdream <775654398@qq.com>
fomotrader <82184770+fomotrader@users.noreply.github.com>
ForLina <471133417@qq.com>
Frank Szendzielarz <33515470+FrankSzendzielarz@users.noreply.github.com> Frank Szendzielarz <33515470+FrankSzendzielarz@users.noreply.github.com>
Frank Wang <eternnoir@gmail.com> Frank Wang <eternnoir@gmail.com>
Franklin <mr_franklin@126.com> Franklin <mr_franklin@126.com>
Furkan KAMACI <furkankamaci@gmail.com> Furkan KAMACI <furkankamaci@gmail.com>
Fuyang Deng <dengfuyang@outlook.com>
GagziW <leon.stanko@rwth-aachen.de> GagziW <leon.stanko@rwth-aachen.de>
Gary Rong <garyrong0905@gmail.com> Gary Rong <garyrong0905@gmail.com>
Gautam Botrel <gautam.botrel@gmail.com>
George Ornbo <george@shapeshed.com> George Ornbo <george@shapeshed.com>
Giuseppe Bertone <bertone.giuseppe@gmail.com>
Greg Colvin <greg@colvin.org>
Gregg Dourgarian <greggd@tempworks.com> Gregg Dourgarian <greggd@tempworks.com>
Gregory Markou <16929357+GregTheGreek@users.noreply.github.com>
Guifel <toowik@gmail.com>
Guilherme Salgado <gsalgado@gmail.com> Guilherme Salgado <gsalgado@gmail.com>
Guillaume Ballet <gballet@gmail.com> Guillaume Ballet <gballet@gmail.com>
Guillaume Nicolas <guin56@gmail.com> Guillaume Nicolas <guin56@gmail.com>
GuiltyMorishita <morilliantblue@gmail.com> GuiltyMorishita <morilliantblue@gmail.com>
Guruprasad Kamath <48196632+gurukamath@users.noreply.github.com>
Gus <yo@soygus.com> Gus <yo@soygus.com>
Gustav Simonsson <gustav.simonsson@gmail.com> Gustav Simonsson <gustav.simonsson@gmail.com>
Gísli Kristjánsson <gislik@hamstur.is> Gísli Kristjánsson <gislik@hamstur.is>
Ha ĐANG <dvietha@gmail.com> Ha ĐANG <dvietha@gmail.com>
HackyMiner <hackyminer@gmail.com> HackyMiner <hackyminer@gmail.com>
hadv <dvietha@gmail.com> hadv <dvietha@gmail.com>
Hanjiang Yu <delacroix.yu@gmail.com>
Hao Bryan Cheng <haobcheng@gmail.com> Hao Bryan Cheng <haobcheng@gmail.com>
Hao Duan <duanhao0814@gmail.com>
HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com> HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Harry Dutton <me@bytejedi.com>
haryu703 <34744512+haryu703@users.noreply.github.com>
Hendrik Hofstadt <hendrik@nexantic.com>
Henning Diedrich <hd@eonblast.com> Henning Diedrich <hd@eonblast.com>
henopied <13500516+henopied@users.noreply.github.com>
hero5512 <lvshuaino@gmail.com>
holisticode <holistic.computing@gmail.com> holisticode <holistic.computing@gmail.com>
Hongbin Mao <hello2mao@gmail.com> Hongbin Mao <hello2mao@gmail.com>
Hsien-Tang Kao <htkao@pm.me> Hsien-Tang Kao <htkao@pm.me>
hsyodyssey <47173566+hsyodyssey@users.noreply.github.com>
Husam Ibrahim <39692071+HusamIbrahim@users.noreply.github.com> Husam Ibrahim <39692071+HusamIbrahim@users.noreply.github.com>
Hwanjo Heo <34005989+hwanjo@users.noreply.github.com>
hydai <z54981220@gmail.com> hydai <z54981220@gmail.com>
Hyung-Kyu Hqueue Choi <hyungkyu.choi@gmail.com> Hyung-Kyu Hqueue Choi <hyungkyu.choi@gmail.com>
Håvard Anda Estensen <haavard.ae@gmail.com>
Ian Macalinao <me@ian.pw> Ian Macalinao <me@ian.pw>
Ian Norden <iannordenn@gmail.com> Ian Norden <iannordenn@gmail.com>
icodezjb <icodezjb@163.com>
Ikko Ashimine <eltociear@gmail.com>
Ilan Gitter <8359193+gitteri@users.noreply.github.com>
ImanSharaf <78227895+ImanSharaf@users.noreply.github.com>
Isidoro Ghezzi <isidoro.ghezzi@icloud.com> Isidoro Ghezzi <isidoro.ghezzi@icloud.com>
Iskander (Alex) Sharipov <quasilyte@gmail.com> Iskander (Alex) Sharipov <quasilyte@gmail.com>
Ivan Bogatyy <bogatyi@gmail.com>
Ivan Daniluk <ivan.daniluk@gmail.com> Ivan Daniluk <ivan.daniluk@gmail.com>
Ivo Georgiev <ivo@strem.io> Ivo Georgiev <ivo@strem.io>
jacksoom <lifengliu1994@gmail.com>
Jae Kwon <jkwon.work@gmail.com> Jae Kwon <jkwon.work@gmail.com>
James Prestwich <10149425+prestwich@users.noreply.github.com>
Jamie Pitts <james.pitts@gmail.com> Jamie Pitts <james.pitts@gmail.com>
Janos Guljas <janos@resenje.org> Janoš Guljaš <janos@resenje.org>
Janoš Guljaš <janos@users.noreply.github.com> Jared Wasinger <j-wasinger@hotmail.com>
Jason Carver <jacarver@linkedin.com> Jason Carver <jacarver@linkedin.com>
Javier Peletier <jm@epiclabs.io> Javier Peletier <jm@epiclabs.io>
Javier Peletier <jpeletier@users.noreply.github.com>
Javier Sagredo <jasataco@gmail.com> Javier Sagredo <jasataco@gmail.com>
Jay <codeholic.arena@gmail.com> Jay <codeholic.arena@gmail.com>
Jay Guo <guojiannan1101@gmail.com> Jay Guo <guojiannan1101@gmail.com>
Jaynti Kanani <jdkanani@gmail.com> Jaynti Kanani <jdkanani@gmail.com>
Jeff Prestes <jeffprestes@gmail.com> Jeff Prestes <jeffprestes@gmail.com>
Jeff R. Allen <jra@nella.org> Jeff R. Allen <jra@nella.org>
Jeff Wentworth <jeff@curvegrid.com>
Jeffery Robert Walsh <rlxrlps@gmail.com> Jeffery Robert Walsh <rlxrlps@gmail.com>
Jeffrey Wilcke <jeffrey@ethereum.org> Jeffrey Wilcke <jeffrey@ethereum.org>
Jens Agerberg <github@agerberg.me> Jens Agerberg <github@agerberg.me>
Jeremy McNevin <jeremy.mcnevin@optum.com> Jeremy McNevin <jeremy.mcnevin@optum.com>
Jeremy Schlatter <jeremy.schlatter@gmail.com> Jeremy Schlatter <jeremy.schlatter@gmail.com>
Jerzy Lasyk <jerzylasyk@gmail.com> Jerzy Lasyk <jerzylasyk@gmail.com>
Jesse Tane <jesse.tane@gmail.com>
Jia Chenhui <jiachenhui1989@gmail.com> Jia Chenhui <jiachenhui1989@gmail.com>
Jim McDonald <Jim@mcdee.net> Jim McDonald <Jim@mcdee.net>
jk-jeongkyun <45347815+jeongkyun-oh@users.noreply.github.com>
jkcomment <jkcomment@gmail.com> jkcomment <jkcomment@gmail.com>
JoeGruffins <34998433+JoeGruffins@users.noreply.github.com>
Joel Burget <joelburget@gmail.com> Joel Burget <joelburget@gmail.com>
John C. Vernaleo <john@netpurgatory.com> John C. Vernaleo <john@netpurgatory.com>
John Difool <johndifoolpi@gmail.com>
Johns Beharry <johns@peakshift.com> Johns Beharry <johns@peakshift.com>
Jonas <felberj@users.noreply.github.com> Jonas <felberj@users.noreply.github.com>
Jonathan Brown <jbrown@bluedroplet.com> Jonathan Brown <jbrown@bluedroplet.com>
Jonathan Chappelow <chappjc@users.noreply.github.com>
Jonathan Gimeno <jgimeno@gmail.com>
JoranHonig <JoranHonig@users.noreply.github.com> JoranHonig <JoranHonig@users.noreply.github.com>
Jordan Krage <jmank88@gmail.com> Jordan Krage <jmank88@gmail.com>
Jorropo <jorropo.pgm@gmail.com>
Joseph Chow <ethereum@outlook.com> Joseph Chow <ethereum@outlook.com>
Joshua Colvin <jcolvin@offchainlabs.com>
Joshua Gutow <jbgutow@gmail.com>
jovijovi <mageyul@hotmail.com>
jtakalai <juuso.takalainen@streamr.com> jtakalai <juuso.takalainen@streamr.com>
JU HYEONG PARK <dkdkajej@gmail.com> JU HYEONG PARK <dkdkajej@gmail.com>
Julian Y <jyap808@users.noreply.github.com>
Justin Clark-Casey <justincc@justincc.org> Justin Clark-Casey <justincc@justincc.org>
Justin Drake <drakefjustin@gmail.com> Justin Drake <drakefjustin@gmail.com>
jwasinger <j-wasinger@hotmail.com> Justus <jus@gtsbr.org>
Kawashima <91420903+sscodereth@users.noreply.github.com>
ken10100147 <sunhongping@kanjian.com> ken10100147 <sunhongping@kanjian.com>
Kenji Siu <kenji@isuntv.com> Kenji Siu <kenji@isuntv.com>
Kenso Trabing <kenso.trabing@bloomwebsite.com>
Kenso Trabing <ktrabing@acm.org> Kenso Trabing <ktrabing@acm.org>
Kevin <denk.kevin@web.de> Kevin <denk.kevin@web.de>
kevin.xu <cming.xu@gmail.com> kevin.xu <cming.xu@gmail.com>
KibGzr <kibgzr@gmail.com>
kiel barry <kiel.j.barry@gmail.com> kiel barry <kiel.j.barry@gmail.com>
kilic <onurkilic1004@gmail.com>
kimmylin <30611210+kimmylin@users.noreply.github.com> kimmylin <30611210+kimmylin@users.noreply.github.com>
Kitten King <53072918+kittenking@users.noreply.github.com> Kitten King <53072918+kittenking@users.noreply.github.com>
knarfeh <hejun1874@gmail.com> knarfeh <hejun1874@gmail.com>
Kobi Gurkan <kobigurk@gmail.com> Kobi Gurkan <kobigurk@gmail.com>
komika <komika@komika.org>
Konrad Feldmeier <konrad@brainbot.com> Konrad Feldmeier <konrad@brainbot.com>
Kris Shinn <raggamuffin.music@gmail.com> Kris Shinn <raggamuffin.music@gmail.com>
Kristofer Peterson <svenski123@users.noreply.github.com>
Kumar Anirudha <mail@anirudha.dev>
Kurkó Mihály <kurkomisi@users.noreply.github.com> Kurkó Mihály <kurkomisi@users.noreply.github.com>
Kushagra Sharma <ksharm01@gmail.com> Kushagra Sharma <ksharm01@gmail.com>
Kwuaint <34888408+kwuaint@users.noreply.github.com> Kwuaint <34888408+kwuaint@users.noreply.github.com>
Kyuntae Ethan Kim <ethan.kyuntae.kim@gmail.com> Kyuntae Ethan Kim <ethan.kyuntae.kim@gmail.com>
ledgerwatch <akhounov@gmail.com> Lee Bousfield <ljbousfield@gmail.com>
Lefteris Karapetsas <lefteris@refu.co> Lefteris Karapetsas <lefteris@refu.co>
Leif Jurvetson <leijurv@gmail.com> Leif Jurvetson <leijurv@gmail.com>
Leo Shklovskii <leo@thermopylae.net> Leo Shklovskii <leo@thermopylae.net>
LeoLiao <leofantast@gmail.com> LeoLiao <leofantast@gmail.com>
Lewis Marshall <lewis@lmars.net> Lewis Marshall <lewis@lmars.net>
lhendre <lhendre2@gmail.com> lhendre <lhendre2@gmail.com>
Liang Ma <liangma.ul@gmail.com> Li Dongwei <lidw1988@126.com>
Liang Ma <liangma@liangbit.com> Liang Ma <liangma@liangbit.com>
Liang ZOU <liang.d.zou@gmail.com> Liang ZOU <liang.d.zou@gmail.com>
libby kent <viskovitzzz@gmail.com>
libotony <liboliqi@gmail.com> libotony <liboliqi@gmail.com>
LieutenantRoger <dijsky_2015@hotmail.com>
ligi <ligi@ligi.de> ligi <ligi@ligi.de>
Lio李欧 <lionello@users.noreply.github.com> Lio李欧 <lionello@users.noreply.github.com>
lmittmann <lmittmann@users.noreply.github.com>
Lorenzo Manacorda <lorenzo@kinvolk.io> Lorenzo Manacorda <lorenzo@kinvolk.io>
Louis Holbrook <dev@holbrook.no> Louis Holbrook <dev@holbrook.no>
Luca Zeug <luclu@users.noreply.github.com> Luca Zeug <luclu@users.noreply.github.com>
Lucas Hendren <lhendre2@gmail.com>
lzhfromustc <43191155+lzhfromustc@users.noreply.github.com>
Magicking <s@6120.eu> Magicking <s@6120.eu>
manlio <manlio.poltronieri@gmail.com> manlio <manlio.poltronieri@gmail.com>
Maran Hidskes <maran.hidskes@gmail.com> Maran Hidskes <maran.hidskes@gmail.com>
Marek Kotewicz <marek.kotewicz@gmail.com> Marek Kotewicz <marek.kotewicz@gmail.com>
Mariano Cortesi <mcortesi@gmail.com>
Marius van der Wijden <m.vanderwijden@live.de> Marius van der Wijden <m.vanderwijden@live.de>
Mark <markya0616@gmail.com> Mark <markya0616@gmail.com>
Mark Rushakoff <mark.rushakoff@gmail.com> Mark Rushakoff <mark.rushakoff@gmail.com>
@ -218,108 +324,193 @@ mark.lin <mark@maicoin.com>
Martin Alex Philip Dawson <u1356770@gmail.com> Martin Alex Philip Dawson <u1356770@gmail.com>
Martin Holst Swende <martin@swende.se> Martin Holst Swende <martin@swende.se>
Martin Klepsch <martinklepsch@googlemail.com> Martin Klepsch <martinklepsch@googlemail.com>
Martin Lundfall <martin.lundfall@protonmail.com>
Martin Michlmayr <tbm@cyrius.com>
Martin Redmond <21436+reds@users.noreply.github.com>
Mason Fischer <mason@kissr.co>
Mateusz Morusiewicz <11313015+Ruteri@users.noreply.github.com>
Mats Julian Olsen <mats@plysjbyen.net> Mats Julian Olsen <mats@plysjbyen.net>
Matt Garnett <14004106+lightclient@users.noreply.github.com>
Matt K <1036969+mkrump@users.noreply.github.com> Matt K <1036969+mkrump@users.noreply.github.com>
Matthew Di Ferrante <mattdf@users.noreply.github.com> Matthew Di Ferrante <mattdf@users.noreply.github.com>
Matthew Halpern <matthalp@gmail.com> Matthew Halpern <matthalp@gmail.com>
Matthew Halpern <matthalp@google.com>
Matthew Wampler-Doty <matthew.wampler.doty@gmail.com> Matthew Wampler-Doty <matthew.wampler.doty@gmail.com>
Max Sistemich <mafrasi2@googlemail.com> Max Sistemich <mafrasi2@googlemail.com>
Maxim Zhiburt <zhiburt@gmail.com>
Maximilian Meister <mmeister@suse.de> Maximilian Meister <mmeister@suse.de>
me020523 <me020523@gmail.com>
Melvin Junhee Woo <melvin.woo@groundx.xyz>
meowsbits <b5c6@protonmail.com>
Micah Zoltu <micah@zoltu.net> Micah Zoltu <micah@zoltu.net>
Michael Forney <mforney@mforney.org>
Michael Riabzev <michael@starkware.co>
Michael Ruminer <michael.ruminer+github@gmail.com> Michael Ruminer <michael.ruminer+github@gmail.com>
michael1011 <me@michael1011.at>
Miguel Mota <miguelmota2@gmail.com> Miguel Mota <miguelmota2@gmail.com>
Mike Burr <mburr@nightmare.com>
Mikhail Mikheev <mmvsha73@gmail.com>
milesvant <milesvant@gmail.com>
Miro <mirokuratczyk@users.noreply.github.com>
Miya Chen <miyatlchen@gmail.com> Miya Chen <miyatlchen@gmail.com>
Mohanson <mohanson@outlook.com> Mohanson <mohanson@outlook.com>
mr_franklin <mr_franklin@126.com> mr_franklin <mr_franklin@126.com>
Mudit Gupta <guptamudit@ymail.com>
Mymskmkt <1847234666@qq.com> Mymskmkt <1847234666@qq.com>
Nalin Bhardwaj <nalinbhardwaj@nibnalin.me> Nalin Bhardwaj <nalinbhardwaj@nibnalin.me>
Natsu Kagami <natsukagami@gmail.com>
Nchinda Nchinda <nchinda2@gmail.com> Nchinda Nchinda <nchinda2@gmail.com>
nebojsa94 <nebojsa94@users.noreply.github.com>
necaremus <necaremus@gmail.com> necaremus <necaremus@gmail.com>
nedifi <103940716+nedifi@users.noreply.github.com>
needkane <604476380@qq.com> needkane <604476380@qq.com>
Nguyen Kien Trung <trung.n.k@gmail.com> Nguyen Kien Trung <trung.n.k@gmail.com>
Nguyen Sy Thanh Son <thanhson1085@gmail.com> Nguyen Sy Thanh Son <thanhson1085@gmail.com>
Nic Jansma <nic@nicj.net>
Nick Dodson <silentcicero@outlook.com> Nick Dodson <silentcicero@outlook.com>
Nick Johnson <arachnid@notdot.net> Nick Johnson <arachnid@notdot.net>
Nicolas Feignon <nfeignon@gmail.com>
Nicolas Guillaume <gunicolas@sqli.com> Nicolas Guillaume <gunicolas@sqli.com>
Nikita Kozhemyakin <enginegl.ec@gmail.com>
Nikola Madjarevic <nikola.madjarevic@gmail.com>
Nilesh Trivedi <nilesh@hypertrack.io> Nilesh Trivedi <nilesh@hypertrack.io>
Nimrod Gutman <nimrod.gutman@gmail.com> Nimrod Gutman <nimrod.gutman@gmail.com>
Nishant Das <nishdas93@gmail.com>
njupt-moon <1015041018@njupt.edu.cn> njupt-moon <1015041018@njupt.edu.cn>
nkbai <nkbai@163.com> nkbai <nkbai@163.com>
noam-alchemy <76969113+noam-alchemy@users.noreply.github.com>
nobody <ddean2009@163.com> nobody <ddean2009@163.com>
Noman <noman@noman.land> Noman <noman@noman.land>
nujabes403 <nujabes403@gmail.com>
Nye Liu <nyet@nyet.org>
Oleg Kovalov <iamolegkovalov@gmail.com> Oleg Kovalov <iamolegkovalov@gmail.com>
Oli Bye <olibye@users.noreply.github.com> Oli Bye <olibye@users.noreply.github.com>
Oliver Tale-Yazdi <oliver@perun.network>
Olivier Hervieu <olivier.hervieu@gmail.com>
Or Neeman <oneeman@gmail.com>
Osoro Bironga <fanosoro@gmail.com>
Osuke <arget-fee.free.dgm@hotmail.co.jp> Osuke <arget-fee.free.dgm@hotmail.co.jp>
Pantelis Peslis <pespantelis@gmail.com>
Pascal Dierich <pascal@merkleplant.xyz>
Patrick O'Grady <prohb125@gmail.com>
Pau <pau@dabax.net>
Paul Berg <hello@paulrberg.com> Paul Berg <hello@paulrberg.com>
Paul Litvak <litvakpol@012.net.il> Paul Litvak <litvakpol@012.net.il>
Paul-Armand Verhaegen <paularmand.verhaegen@gmail.com>
Paulo L F Casaretto <pcasaretto@gmail.com> Paulo L F Casaretto <pcasaretto@gmail.com>
Paweł Bylica <chfast@gmail.com> Paweł Bylica <chfast@gmail.com>
Pedro Gomes <otherview@gmail.com>
Pedro Pombeiro <PombeirP@users.noreply.github.com> Pedro Pombeiro <PombeirP@users.noreply.github.com>
Peter Broadhurst <peter@themumbles.net> Peter Broadhurst <peter@themumbles.net>
peter cresswell <pcresswell@gmail.com>
Peter Pratscher <pratscher@gmail.com> Peter Pratscher <pratscher@gmail.com>
Peter Simard <petesimard56@gmail.com>
Petr Mikusek <petr@mikusek.info> Petr Mikusek <petr@mikusek.info>
Philip Schlump <pschlump@gmail.com> Philip Schlump <pschlump@gmail.com>
Pierre Neter <pierreneter@gmail.com> Pierre Neter <pierreneter@gmail.com>
Pierre R <p.rousset@gmail.com>
piersy <pierspowlesland@gmail.com>
PilkyuJung <anothel1@naver.com> PilkyuJung <anothel1@naver.com>
protolambda <proto@protolambda.com> Piotr Dyraga <piotr.dyraga@keep.network>
ploui <64719999+ploui@users.noreply.github.com>
Preston Van Loon <preston@prysmaticlabs.com>
Prince Sinha <sinhaprince013@gmail.com>
Péter Szilágyi <peterke@gmail.com> Péter Szilágyi <peterke@gmail.com>
qd-ethan <31876119+qdgogogo@users.noreply.github.com> qd-ethan <31876119+qdgogogo@users.noreply.github.com>
Qian Bin <cola.tin.com@gmail.com>
Quest Henkart <qhenkart@gmail.com>
Rachel Franks <nfranks@protonmail.com>
Rafael Matias <rafael@skyle.net>
Raghav Sood <raghavsood@gmail.com> Raghav Sood <raghavsood@gmail.com>
Ralph Caraveo <deckarep@gmail.com> Ralph Caraveo <deckarep@gmail.com>
Ralph Caraveo III <deckarep@gmail.com>
Ramesh Nair <ram@hiddentao.com> Ramesh Nair <ram@hiddentao.com>
rangzen <public@l-homme.com>
reinerRubin <tolstov.georgij@gmail.com> reinerRubin <tolstov.georgij@gmail.com>
Rene Lubov <41963722+renaynay@users.noreply.github.com>
rhaps107 <dod-source@yandex.ru> rhaps107 <dod-source@yandex.ru>
Ricardo Catalinas Jiménez <r@untroubled.be> Ricardo Catalinas Jiménez <r@untroubled.be>
Ricardo Domingos <ricardohsd@gmail.com> Ricardo Domingos <ricardohsd@gmail.com>
Richard Hart <richardhart92@gmail.com> Richard Hart <richardhart92@gmail.com>
Rick <rick.no@groundx.xyz>
RJ Catalano <catalanor0220@gmail.com> RJ Catalano <catalanor0220@gmail.com>
Rob <robert@rojotek.com> Rob <robert@rojotek.com>
Rob Mulholand <rmulholand@8thlight.com> Rob Mulholand <rmulholand@8thlight.com>
Robert Zaremba <robert.zaremba@scale-it.pl> Robert Zaremba <robert@zaremba.ch>
Roc Yu <rociiu0112@gmail.com> Roc Yu <rociiu0112@gmail.com>
Roman Mazalov <83914728+gopherxyz@users.noreply.github.com>
Ross <9055337+Chadsr@users.noreply.github.com>
Runchao Han <elvisage941102@gmail.com> Runchao Han <elvisage941102@gmail.com>
Russ Cox <rsc@golang.org> Russ Cox <rsc@golang.org>
Ryan Schneider <ryanleeschneider@gmail.com> Ryan Schneider <ryanleeschneider@gmail.com>
ryanc414 <ryan@tokencard.io>
Rémy Roy <remyroy@remyroy.com> Rémy Roy <remyroy@remyroy.com>
S. Matthew English <s-matthew-english@users.noreply.github.com> S. Matthew English <s-matthew-english@users.noreply.github.com>
salanfe <salanfe@users.noreply.github.com> salanfe <salanfe@users.noreply.github.com>
Sam <39165351+Xia-Sam@users.noreply.github.com>
Sammy Libre <7374093+sammy007@users.noreply.github.com>
Samuel Marks <samuelmarks@gmail.com> Samuel Marks <samuelmarks@gmail.com>
sanskarkhare <sanskarkhare47@gmail.com>
Sarlor <kinsleer@outlook.com> Sarlor <kinsleer@outlook.com>
Sasuke1964 <neilperry1964@gmail.com> Sasuke1964 <neilperry1964@gmail.com>
Satpal <28562234+SatpalSandhu61@users.noreply.github.com>
Saulius Grigaitis <saulius@necolt.com> Saulius Grigaitis <saulius@necolt.com>
Sean <darcys22@gmail.com> Sean <darcys22@gmail.com>
Sheldon <11510383@mail.sustc.edu.cn> Serhat Şevki Dinçer <jfcgauss@gmail.com>
Sheldon <374662347@qq.com> Shane Bammel <sjb933@gmail.com>
shawn <36943337+lxex@users.noreply.github.com>
shigeyuki azuchi <azuchi@chaintope.com>
Shihao Xia <charlesxsh@hotmail.com>
Shiming <codingmylife@gmail.com>
Shintaro Kaneko <kaneshin0120@gmail.com> Shintaro Kaneko <kaneshin0120@gmail.com>
shiqinfeng1 <150627601@qq.com>
Shuai Qi <qishuai231@gmail.com> Shuai Qi <qishuai231@gmail.com>
Shude Li <islishude@gmail.com>
Shunsuke Watanabe <ww.shunsuke@gmail.com> Shunsuke Watanabe <ww.shunsuke@gmail.com>
silence <wangsai.silence@qq.com> silence <wangsai.silence@qq.com>
Simon Jentzsch <simon@slock.it> Simon Jentzsch <simon@slock.it>
Sina Mahmoodi <1591639+s1na@users.noreply.github.com>
sixdays <lj491685571@126.com>
SjonHortensius <SjonHortensius@users.noreply.github.com>
Slava Karpenko <slavikus@gmail.com>
slumber1122 <slumber1122@gmail.com> slumber1122 <slumber1122@gmail.com>
Smilenator <yurivanenko@yandex.ru> Smilenator <yurivanenko@yandex.ru>
soc1c <soc1c@users.noreply.github.com>
Sorin Neacsu <sorin.neacsu@gmail.com> Sorin Neacsu <sorin.neacsu@gmail.com>
Sparty <vignesh.crysis@gmail.com>
Stein Dekker <dekker.stein@gmail.com> Stein Dekker <dekker.stein@gmail.com>
Steve Gattuso <steve@stevegattuso.me> Steve Gattuso <steve@stevegattuso.me>
Steve Ruckdashel <steve.ruckdashel@gmail.com> Steve Ruckdashel <steve.ruckdashel@gmail.com>
Steve Waldman <swaldman@mchange.com> Steve Waldman <swaldman@mchange.com>
Steven E. Harris <seh@panix.com>
Steven Roose <stevenroose@gmail.com> Steven Roose <stevenroose@gmail.com>
stompesi <stompesi@gmail.com> stompesi <stompesi@gmail.com>
stormpang <jialinpeng@vip.qq.com> stormpang <jialinpeng@vip.qq.com>
sunxiaojun2014 <sunxiaojun-xy@360.cn> sunxiaojun2014 <sunxiaojun-xy@360.cn>
Suriyaa Sundararuban <isc.suriyaa@gmail.com>
Sylvain Laurent <s@6120.eu>
Taeik Lim <sibera21@gmail.com>
tamirms <tamir@trello.com> tamirms <tamir@trello.com>
Tangui Clairet <tangui.clairet@gmail.com>
Tatsuya Shimoda <tacoo@users.noreply.github.com>
Taylor Gerring <taylor.gerring@gmail.com> Taylor Gerring <taylor.gerring@gmail.com>
TColl <38299499+TColl@users.noreply.github.com> TColl <38299499+TColl@users.noreply.github.com>
terasum <terasum@163.com> terasum <terasum@163.com>
tgyKomgo <52910426+tgyKomgo@users.noreply.github.com>
Thad Guidry <thadguidry@gmail.com>
Thomas Bocek <tom@tomp2p.net> Thomas Bocek <tom@tomp2p.net>
thomasmodeneis <thomas.modeneis@gmail.com> thomasmodeneis <thomas.modeneis@gmail.com>
thumb8432 <thumb8432@gmail.com> thumb8432 <thumb8432@gmail.com>
Ti Zhou <tizhou1986@gmail.com> Ti Zhou <tizhou1986@gmail.com>
tia-99 <67107070+tia-99@users.noreply.github.com>
Tim Cooijmans <timcooijmans@gmail.com>
Tobias Hildebrandt <79341166+tobias-hildebrandt@users.noreply.github.com>
Tosh Camille <tochecamille@gmail.com> Tosh Camille <tochecamille@gmail.com>
tsarpaul <Litvakpol@012.net.il> tsarpaul <Litvakpol@012.net.il>
Tyler Chambers <2775339+tylerchambers@users.noreply.github.com>
tzapu <alex@tzapu.com> tzapu <alex@tzapu.com>
ucwong <ucwong@126.com>
uji <49834542+uji@users.noreply.github.com>
ult-bobonovski <alex@ultiledger.io> ult-bobonovski <alex@ultiledger.io>
Valentin Trinqué <ValentinTrinque@users.noreply.github.com>
Valentin Wüstholz <wuestholz@gmail.com> Valentin Wüstholz <wuestholz@gmail.com>
Vedhavyas Singareddi <vedhavyas.singareddi@gmail.com> Vedhavyas Singareddi <vedhavyas.singareddi@gmail.com>
Victor Farazdagi <simple.square@gmail.com> Victor Farazdagi <simple.square@gmail.com>
@ -330,40 +521,71 @@ Ville Sundell <github@solarius.fi>
vim88 <vim88vim88@gmail.com> vim88 <vim88vim88@gmail.com>
Vincent G <caktux@gmail.com> Vincent G <caktux@gmail.com>
Vincent Serpoul <vincent@serpoul.com> Vincent Serpoul <vincent@serpoul.com>
Vinod Damle <vdamle@users.noreply.github.com>
Vitalik Buterin <v@buterin.com> Vitalik Buterin <v@buterin.com>
Vitaly Bogdanov <vsbogd@gmail.com> Vitaly Bogdanov <vsbogd@gmail.com>
Vitaly V <vvelikodny@gmail.com> Vitaly V <vvelikodny@gmail.com>
Vivek Anand <vivekanand1101@users.noreply.github.com> Vivek Anand <vivekanand1101@users.noreply.github.com>
Vlad <gluk256@gmail.com>
Vlad Bokov <razum2um@mail.ru> Vlad Bokov <razum2um@mail.ru>
Vlad Gluhovsky <gluk256@users.noreply.github.com> Vlad Gluhovsky <gluk256@gmail.com>
Ward Bradt <wardbradt5@gmail.com>
Water <44689567+codeoneline@users.noreply.github.com>
wbt <wbt@users.noreply.github.com>
weimumu <934657014@qq.com> weimumu <934657014@qq.com>
Wenbiao Zheng <delweng@gmail.com> Wenbiao Zheng <delweng@gmail.com>
Wenshao Zhong <wzhong20@uic.edu>
Will Villanueva <hello@willvillanueva.com>
William Morriss <wjmelements@gmail.com>
William Setzer <bootstrapsetzer@gmail.com> William Setzer <bootstrapsetzer@gmail.com>
williambannas <wrschwartz@wpi.edu> williambannas <wrschwartz@wpi.edu>
wuff1996 <33193253+wuff1996@users.noreply.github.com>
Wuxiang <wuxiangzhou2010@gmail.com> Wuxiang <wuxiangzhou2010@gmail.com>
Xiaobing Jiang <s7v7nislands@gmail.com>
xiekeyang <xiekeyang@users.noreply.github.com> xiekeyang <xiekeyang@users.noreply.github.com>
xincaosu <xincaosu@126.com> xincaosu <xincaosu@126.com>
xinluyin <31590468+xinluyin@users.noreply.github.com>
Xudong Liu <33193253+r1cs@users.noreply.github.com>
xwjack <XWJACK@users.noreply.github.com>
yahtoo <yahtoo.ma@gmail.com> yahtoo <yahtoo.ma@gmail.com>
Yang Hau <vulxj0j8j8@gmail.com>
YaoZengzeng <yaozengzeng@zju.edu.cn> YaoZengzeng <yaozengzeng@zju.edu.cn>
YH-Zhou <yanhong.zhou05@gmail.com> YH-Zhou <yanhong.zhou05@gmail.com>
Yihau Chen <a122092487@gmail.com>
Yohann Léon <sybiload@gmail.com> Yohann Léon <sybiload@gmail.com>
Yoichi Hirai <i@yoichihirai.com> Yoichi Hirai <i@yoichihirai.com>
Yole <007yuyue@gmail.com>
Yondon Fu <yondon.fu@gmail.com> Yondon Fu <yondon.fu@gmail.com>
YOSHIDA Masanori <masanori.yoshida@gmail.com> YOSHIDA Masanori <masanori.yoshida@gmail.com>
yoza <yoza.is12s@gmail.com> yoza <yoza.is12s@gmail.com>
yumiel yoomee1313 <yumiel.ko@groundx.xyz>
Yusup <awklsgrep@gmail.com> Yusup <awklsgrep@gmail.com>
yutianwu <wzxingbupt@gmail.com>
ywzqwwt <39263032+ywzqwwt@users.noreply.github.com>
zaccoding <zaccoding725@gmail.com>
Zach <zach.ramsay@gmail.com> Zach <zach.ramsay@gmail.com>
Zachinquarantine <Zachinquarantine@protonmail.com>
zah <zahary@gmail.com> zah <zahary@gmail.com>
Zahoor Mohamed <zahoor@zahoor.in> Zahoor Mohamed <zahoor@zahoor.in>
Zak Cole <zak@beattiecole.com> Zak Cole <zak@beattiecole.com>
zcheng9 <zcheng9@hawk.iit.edu>
zer0to0ne <36526113+zer0to0ne@users.noreply.github.com> zer0to0ne <36526113+zer0to0ne@users.noreply.github.com>
zgfzgf <48779939+zgfzgf@users.noreply.github.com>
Zhang Zhuo <mycinbrin@gmail.com>
zhangsoledad <787953403@qq.com>
zhaochonghe <41711151+zhaochonghe@users.noreply.github.com>
Zhenguo Niu <Niu.ZGlinux@gmail.com> Zhenguo Niu <Niu.ZGlinux@gmail.com>
zhiqiangxu <652732310@qq.com>
Zhou Zhiyao <ZHOU0250@e.ntu.edu.sg>
Ziyuan Zhong <zzy.albert@163.com>
Zoe Nolan <github@zoenolan.org> Zoe Nolan <github@zoenolan.org>
Zou Guangxian <zouguangxian@gmail.com>
Zsolt Felföldi <zsfelfoldi@gmail.com> Zsolt Felföldi <zsfelfoldi@gmail.com>
Łukasz Kurowski <crackcomm@users.noreply.github.com> Łukasz Kurowski <crackcomm@users.noreply.github.com>
Łukasz Zimnoch <lukaszzimnoch1994@gmail.com>
ΞTHΞЯSPHΞЯΞ <{viktor.tron,nagydani,zsfelfoldi}@gmail.com> ΞTHΞЯSPHΞЯΞ <{viktor.tron,nagydani,zsfelfoldi}@gmail.com>
Максим Чусовлянов <mchusovlianov@gmail.com> Максим Чусовлянов <mchusovlianov@gmail.com>
大彬 <hz_stb@163.com> 大彬 <hz_stb@163.com>
沉风 <myself659@users.noreply.github.com>
贺鹏飞 <hpf@hackerful.cn> 贺鹏飞 <hpf@hackerful.cn>
陈佳 <chenjiablog@gmail.com>
유용환 <33824408+eric-yoo@users.noreply.github.com> 유용환 <33824408+eric-yoo@users.noreply.github.com>

View File

@ -4,12 +4,17 @@ ARG VERSION=""
ARG BUILDNUM="" ARG BUILDNUM=""
# Build Geth in a stock Go builder container # Build Geth in a stock Go builder container
FROM golang:1.16-alpine as builder FROM golang:1.18-alpine as builder
RUN apk add --no-cache gcc musl-dev linux-headers git RUN apk add --no-cache gcc musl-dev linux-headers git
# Get dependencies - will also be cached if we won't change go.mod/go.sum
COPY go.mod /go-ethereum/
COPY go.sum /go-ethereum/
RUN cd /go-ethereum && go mod download
ADD . /go-ethereum ADD . /go-ethereum
RUN cd /go-ethereum && go run build/ci.go install ./cmd/geth RUN cd /go-ethereum && go run build/ci.go install -static ./cmd/geth
# Pull Geth into a second stage deploy alpine container # Pull Geth into a second stage deploy alpine container
FROM alpine:latest FROM alpine:latest

View File

@ -4,12 +4,17 @@ ARG VERSION=""
ARG BUILDNUM="" ARG BUILDNUM=""
# Build Geth in a stock Go builder container # Build Geth in a stock Go builder container
FROM golang:1.16-alpine as builder FROM golang:1.18-alpine as builder
RUN apk add --no-cache gcc musl-dev linux-headers git RUN apk add --no-cache gcc musl-dev linux-headers git
# Get dependencies - will also be cached if we won't change go.mod/go.sum
COPY go.mod /go-ethereum/
COPY go.sum /go-ethereum/
RUN cd /go-ethereum && go mod download
ADD . /go-ethereum ADD . /go-ethereum
RUN cd /go-ethereum && go run build/ci.go install RUN cd /go-ethereum && go run build/ci.go install -static
# Pull all binaries into a second stage deploy alpine container # Pull all binaries into a second stage deploy alpine container
FROM alpine:latest FROM alpine:latest

View File

@ -2,11 +2,7 @@
# with Go source code. If you know what GOPATH is then you probably # with Go source code. If you know what GOPATH is then you probably
# don't need to bother with make. # don't need to bother with make.
.PHONY: geth android ios geth-cross evm all test clean .PHONY: geth android ios evm all test clean
.PHONY: geth-linux geth-linux-386 geth-linux-amd64 geth-linux-mips64 geth-linux-mips64le
.PHONY: geth-linux-arm geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64
.PHONY: geth-darwin geth-darwin-386 geth-darwin-amd64
.PHONY: geth-windows geth-windows-386 geth-windows-amd64
GOBIN = ./build/bin GOBIN = ./build/bin
GO ?= latest GO ?= latest
@ -47,101 +43,8 @@ clean:
devtools: devtools:
env GOBIN= go install golang.org/x/tools/cmd/stringer@latest env GOBIN= go install golang.org/x/tools/cmd/stringer@latest
env GOBIN= go install github.com/kevinburke/go-bindata/go-bindata@latest
env GOBIN= go install github.com/fjl/gencodec@latest env GOBIN= go install github.com/fjl/gencodec@latest
env GOBIN= go install github.com/golang/protobuf/protoc-gen-go@latest env GOBIN= go install github.com/golang/protobuf/protoc-gen-go@latest
env GOBIN= go install ./cmd/abigen env GOBIN= go install ./cmd/abigen
@type "solc" 2> /dev/null || echo 'Please install solc' @type "solc" 2> /dev/null || echo 'Please install solc'
@type "protoc" 2> /dev/null || echo 'Please install protoc' @type "protoc" 2> /dev/null || echo 'Please install protoc'
# Cross Compilation Targets (xgo)
geth-cross: geth-linux geth-darwin geth-windows geth-android geth-ios
@echo "Full cross compilation done:"
@ls -ld $(GOBIN)/geth-*
geth-linux: geth-linux-386 geth-linux-amd64 geth-linux-arm geth-linux-mips64 geth-linux-mips64le
@echo "Linux cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-*
geth-linux-386:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/386 -v ./cmd/geth
@echo "Linux 386 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep 386
geth-linux-amd64:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/amd64 -v ./cmd/geth
@echo "Linux amd64 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep amd64
geth-linux-arm: geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64
@echo "Linux ARM cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep arm
geth-linux-arm-5:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-5 -v ./cmd/geth
@echo "Linux ARMv5 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep arm-5
geth-linux-arm-6:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-6 -v ./cmd/geth
@echo "Linux ARMv6 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep arm-6
geth-linux-arm-7:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-7 -v ./cmd/geth
@echo "Linux ARMv7 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep arm-7
geth-linux-arm64:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm64 -v ./cmd/geth
@echo "Linux ARM64 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep arm64
geth-linux-mips:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips --ldflags '-extldflags "-static"' -v ./cmd/geth
@echo "Linux MIPS cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep mips
geth-linux-mipsle:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mipsle --ldflags '-extldflags "-static"' -v ./cmd/geth
@echo "Linux MIPSle cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep mipsle
geth-linux-mips64:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64 --ldflags '-extldflags "-static"' -v ./cmd/geth
@echo "Linux MIPS64 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep mips64
geth-linux-mips64le:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64le --ldflags '-extldflags "-static"' -v ./cmd/geth
@echo "Linux MIPS64le cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep mips64le
geth-darwin: geth-darwin-386 geth-darwin-amd64
@echo "Darwin cross compilation done:"
@ls -ld $(GOBIN)/geth-darwin-*
geth-darwin-386:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/386 -v ./cmd/geth
@echo "Darwin 386 cross compilation done:"
@ls -ld $(GOBIN)/geth-darwin-* | grep 386
geth-darwin-amd64:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/amd64 -v ./cmd/geth
@echo "Darwin amd64 cross compilation done:"
@ls -ld $(GOBIN)/geth-darwin-* | grep amd64
geth-windows: geth-windows-386 geth-windows-amd64
@echo "Windows cross compilation done:"
@ls -ld $(GOBIN)/geth-windows-*
geth-windows-386:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/386 -v ./cmd/geth
@echo "Windows 386 cross compilation done:"
@ls -ld $(GOBIN)/geth-windows-* | grep 386
geth-windows-amd64:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/amd64 -v ./cmd/geth
@echo "Windows amd64 cross compilation done:"
@ls -ld $(GOBIN)/geth-windows-* | grep amd64

View File

@ -1,6 +1,6 @@
## Go Ethereum ## Go Ethereum
Official Golang implementation of the Ethereum protocol. Official Golang execution layer implementation of the Ethereum protocol.
[![API Reference]( [![API Reference](
https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667 https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667
@ -16,7 +16,7 @@ archives are published at https://geth.ethereum.org/downloads/.
For prerequisites and detailed build instructions please read the [Installation Instructions](https://geth.ethereum.org/docs/install-and-build/installing-geth). For prerequisites and detailed build instructions please read the [Installation Instructions](https://geth.ethereum.org/docs/install-and-build/installing-geth).
Building `geth` requires both a Go (version 1.14 or later) and a C compiler. You can install Building `geth` requires both a Go (version 1.16 or later) and a C compiler. You can install
them using your favourite package manager. Once the dependencies are installed, run them using your favourite package manager. Once the dependencies are installed, run
```shell ```shell
@ -39,10 +39,10 @@ directory.
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI page](https://geth.ethereum.org/docs/interface/command-line-options) for command line options. | | **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI page](https://geth.ethereum.org/docs/interface/command-line-options) for command line options. |
| `clef` | Stand-alone signing tool, which can be used as a backend signer for `geth`. | | `clef` | Stand-alone signing tool, which can be used as a backend signer for `geth`. |
| `devp2p` | Utilities to interact with nodes on the networking layer, without running a full blockchain. | | `devp2p` | Utilities to interact with nodes on the networking layer, without running a full blockchain. |
| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://docs.soliditylang.org/en/develop/abi-spec.html) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://geth.ethereum.org/docs/dapp/native-bindings) page for details. | | `abigen` | Source code generator to convert Ethereum contract definitions into easy-to-use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://docs.soliditylang.org/en/develop/abi-spec.html) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://geth.ethereum.org/docs/dapp/native-bindings) page for details. |
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. | | `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). | | `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). |
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://eth.wiki/en/fundamentals/rlp)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). | | `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://ethereum.org/en/developers/docs/data-structures-and-encoding/rlp)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
| `puppeth` | a CLI wizard that aids in creating a new Ethereum network. | | `puppeth` | a CLI wizard that aids in creating a new Ethereum network. |
## Running `geth` ## Running `geth`
@ -52,26 +52,42 @@ Going through all the possible command line flags is out of scope here (please c
but we've enumerated a few common parameter combos to get you up to speed quickly but we've enumerated a few common parameter combos to get you up to speed quickly
on how you can run your own `geth` instance. on how you can run your own `geth` instance.
### Hardware Requirements
Minimum:
* CPU with 2+ cores
* 4GB RAM
* 1TB free storage space to sync the Mainnet
* 8 MBit/sec download Internet service
Recommended:
* Fast CPU with 4+ cores
* 16GB+ RAM
* High-performance SSD with at least 1TB of free space
* 25+ MBit/sec download Internet service
### Full node on the main Ethereum network ### Full node on the main Ethereum network
By far the most common scenario is people wanting to simply interact with the Ethereum By far the most common scenario is people wanting to simply interact with the Ethereum
network: create accounts; transfer funds; deploy and interact with contracts. For this network: create accounts; transfer funds; deploy and interact with contracts. For this
particular use-case the user doesn't care about years-old historical data, so we can particular use case, the user doesn't care about years-old historical data, so we can
fast-sync quickly to the current state of the network. To do so: sync quickly to the current state of the network. To do so:
```shell ```shell
$ geth console $ geth console
``` ```
This command will: This command will:
* Start `geth` in fast sync mode (default, can be changed with the `--syncmode` flag), * Start `geth` in snap sync mode (default, can be changed with the `--syncmode` flag),
causing it to download more data in exchange for avoiding processing the entire history causing it to download more data in exchange for avoiding processing the entire history
of the Ethereum network, which is very CPU intensive. of the Ethereum network, which is very CPU intensive.
* Start up `geth`'s built-in interactive [JavaScript console](https://geth.ethereum.org/docs/interface/javascript-console), * Start the built-in interactive [JavaScript console](https://geth.ethereum.org/docs/interface/javascript-console),
(via the trailing `console` subcommand) through which you can interact using [`web3` methods](https://web3js.readthedocs.io/en/) (via the trailing `console` subcommand) through which you can interact using [`web3` methods](https://github.com/ChainSafe/web3.js/blob/0.20.7/DOCUMENTATION.md)
(note: the `web3` version bundled within `geth` is very old, and not up to date with official docs), (note: the `web3` version bundled within `geth` is very old, and not up to date with official docs),
as well as `geth`'s own [management APIs](https://geth.ethereum.org/docs/rpc/server). as well as `geth`'s own [management APIs](https://geth.ethereum.org/docs/rpc/server).
This tool is optional and if you leave it out you can always attach to an already running This tool is optional and if you leave it out you can always attach it to an already running
`geth` instance with `geth attach`. `geth` instance with `geth attach`.
### A Full node on the Görli test network ### A Full node on the Görli test network
@ -86,12 +102,12 @@ the main network, but with play-Ether only.
$ geth --goerli console $ geth --goerli console
``` ```
The `console` subcommand has the exact same meaning as above and they are equally The `console` subcommand has the same meaning as above and is equally
useful on the testnet too. Please, see above for their explanations if you've skipped here. useful on the testnet too.
Specifying the `--goerli` flag, however, will reconfigure your `geth` instance a bit: Specifying the `--goerli` flag, however, will reconfigure your `geth` instance a bit:
* Instead of connecting the main Ethereum network, the client will connect to the Görli * Instead of connecting to the main Ethereum network, the client will connect to the Görli
test network, which uses different P2P bootnodes, different network IDs and genesis test network, which uses different P2P bootnodes, different network IDs and genesis
states. states.
* Instead of using the default data directory (`~/.ethereum` on Linux for example), `geth` * Instead of using the default data directory (`~/.ethereum` on Linux for example), `geth`
@ -102,9 +118,9 @@ Specifying the `--goerli` flag, however, will reconfigure your `geth` instance a
`geth attach <datadir>/goerli/geth.ipc`. Windows users are not affected by `geth attach <datadir>/goerli/geth.ipc`. Windows users are not affected by
this. this.
*Note: Although there are some internal protective measures to prevent transactions from *Note: Although some internal protective measures prevent transactions from
crossing over between the main network and test network, you should make sure to always crossing over between the main network and test network, you should always
use separate accounts for play-money and real-money. Unless you manually move use separate accounts for play and real money. Unless you manually move
accounts, `geth` will by default correctly separate the two networks and will not make any accounts, `geth` will by default correctly separate the two networks and will not make any
accounts available between them.* accounts available between them.*
@ -139,7 +155,7 @@ configuration file via:
$ geth --config /path/to/your_config.toml $ geth --config /path/to/your_config.toml
``` ```
To get an idea how the file should look like you can use the `dumpconfig` subcommand to To get an idea of how the file should look like you can use the `dumpconfig` subcommand to
export your existing configuration: export your existing configuration:
```shell ```shell
@ -159,20 +175,20 @@ docker run -d --name ethereum-node -v /Users/alice/ethereum:/root \
ethereum/client-go ethereum/client-go
``` ```
This will start `geth` in fast-sync mode with a DB memory allowance of 1GB just as the This will start `geth` in snap-sync mode with a DB memory allowance of 1GB, as the
above command does. It will also create a persistent volume in your home directory for above command does. It will also create a persistent volume in your home directory for
saving your blockchain as well as map the default ports. There is also an `alpine` tag saving your blockchain as well as map the default ports. There is also an `alpine` tag
available for a slim version of the image. available for a slim version of the image.
Do not forget `--http.addr 0.0.0.0`, if you want to access RPC from other containers Do not forget `--http.addr 0.0.0.0`, if you want to access RPC from other containers
and/or hosts. By default, `geth` binds to the local interface and RPC endpoints is not and/or hosts. By default, `geth` binds to the local interface and RPC endpoints are not
accessible from the outside. accessible from the outside.
### Programmatically interfacing `geth` nodes ### Programmatically interfacing `geth` nodes
As a developer, sooner rather than later you'll want to start interacting with `geth` and the As a developer, sooner rather than later you'll want to start interacting with `geth` and the
Ethereum network via your own programs and not manually through the console. To aid Ethereum network via your own programs and not manually through the console. To aid
this, `geth` has built-in support for a JSON-RPC based APIs ([standard APIs](https://eth.wiki/json-rpc/API) this, `geth` has built-in support for a JSON-RPC based APIs ([standard APIs](https://ethereum.github.io/execution-apis/api-documentation/)
and [`geth` specific APIs](https://geth.ethereum.org/docs/rpc/server)). and [`geth` specific APIs](https://geth.ethereum.org/docs/rpc/server)).
These can be exposed via HTTP, WebSockets and IPC (UNIX sockets on UNIX based These can be exposed via HTTP, WebSockets and IPC (UNIX sockets on UNIX based
platforms, and named pipes on Windows). platforms, and named pipes on Windows).
@ -193,9 +209,9 @@ HTTP based JSON-RPC API options:
* `--ws.addr` WS-RPC server listening interface (default: `localhost`) * `--ws.addr` WS-RPC server listening interface (default: `localhost`)
* `--ws.port` WS-RPC server listening port (default: `8546`) * `--ws.port` WS-RPC server listening port (default: `8546`)
* `--ws.api` API's offered over the WS-RPC interface (default: `eth,net,web3`) * `--ws.api` API's offered over the WS-RPC interface (default: `eth,net,web3`)
* `--ws.origins` Origins from which to accept websockets requests * `--ws.origins` Origins from which to accept WebSocket requests
* `--ipcdisable` Disable the IPC-RPC server * `--ipcdisable` Disable the IPC-RPC server
* `--ipcapi` API's offered over the IPC-RPC interface (default: `admin,debug,eth,miner,net,personal,shh,txpool,web3`) * `--ipcapi` API's offered over the IPC-RPC interface (default: `admin,debug,eth,miner,net,personal,txpool,web3`)
* `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it) * `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it)
You'll need to use your own programming environments' capabilities (libraries, tools, etc) to You'll need to use your own programming environments' capabilities (libraries, tools, etc) to
@ -231,7 +247,8 @@ aware of and agree upon. This consists of a small JSON file (e.g. call it `genes
"constantinopleBlock": 0, "constantinopleBlock": 0,
"petersburgBlock": 0, "petersburgBlock": 0,
"istanbulBlock": 0, "istanbulBlock": 0,
"berlinBlock": 0 "berlinBlock": 0,
"londonBlock": 0
}, },
"alloc": {}, "alloc": {},
"coinbase": "0x0000000000000000000000000000000000000000", "coinbase": "0x0000000000000000000000000000000000000000",
@ -280,7 +297,7 @@ $ bootnode --genkey=boot.key
$ bootnode --nodekey=boot.key $ bootnode --nodekey=boot.key
``` ```
With the bootnode online, it will display an [`enode` URL](https://eth.wiki/en/fundamentals/enode-url-format) With the bootnode online, it will display an [`enode` URL](https://ethereum.org/en/developers/docs/networking-layer/network-addresses/#enode)
that other nodes can use to connect to it and exchange peer information. Make sure to that other nodes can use to connect to it and exchange peer information. Make sure to
replace the displayed IP address information (most probably `[::]`) with your externally replace the displayed IP address information (most probably `[::]`) with your externally
accessible IP to get the actual `enode` URL. accessible IP to get the actual `enode` URL.
@ -310,7 +327,7 @@ requiring an OpenCL or CUDA enabled `ethminer` instance. For information on such
setup, please consult the [EtherMining subreddit](https://www.reddit.com/r/EtherMining/) setup, please consult the [EtherMining subreddit](https://www.reddit.com/r/EtherMining/)
and the [ethminer](https://github.com/ethereum-mining/ethminer) repository. and the [ethminer](https://github.com/ethereum-mining/ethminer) repository.
In a private network setting, however a single CPU miner instance is more than enough for In a private network setting, however, a single CPU miner instance is more than enough for
practical purposes as it can produce a stable stream of blocks at the correct intervals practical purposes as it can produce a stable stream of blocks at the correct intervals
without needing heavy resources (consider running on a single thread, no need for multiple without needing heavy resources (consider running on a single thread, no need for multiple
ones either). To start a `geth` instance for mining, run it with all your usual flags, extended ones either). To start a `geth` instance for mining, run it with all your usual flags, extended
@ -327,7 +344,7 @@ transactions are accepted at (`--miner.gasprice`).
## Contribution ## Contribution
Thank you for considering to help out with the source code! We welcome contributions Thank you for considering helping out with the source code! We welcome contributions
from anyone on the internet, and are grateful for even the smallest of fixes! from anyone on the internet, and are grateful for even the smallest of fixes!
If you'd like to contribute to go-ethereum, please fork, fix, commit and send a pull request If you'd like to contribute to go-ethereum, please fork, fix, commit and send a pull request
@ -357,6 +374,6 @@ The go-ethereum library (i.e. all code outside of the `cmd` directory) is licens
[GNU Lesser General Public License v3.0](https://www.gnu.org/licenses/lgpl-3.0.en.html), [GNU Lesser General Public License v3.0](https://www.gnu.org/licenses/lgpl-3.0.en.html),
also included in our repository in the `COPYING.LESSER` file. also included in our repository in the `COPYING.LESSER` file.
The go-ethereum binaries (i.e. all code inside of the `cmd` directory) is licensed under the The go-ethereum binaries (i.e. all code inside of the `cmd` directory) are licensed under the
[GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html), also [GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html), also
included in our repository in the `COPYING` file. included in our repository in the `COPYING` file.

View File

@ -12,12 +12,14 @@ Audit reports are published in the `docs` folder: https://github.com/ethereum/go
| ------- | ------- | ----------- | | ------- | ------- | ----------- |
| `geth` | 20170425 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2017-04-25_Geth-audit_Truesec.pdf) | | `geth` | 20170425 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2017-04-25_Geth-audit_Truesec.pdf) |
| `clef` | 20180914 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2018-09-14_Clef-audit_NCC.pdf) | | `clef` | 20180914 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2018-09-14_Clef-audit_NCC.pdf) |
| `Discv5` | 20191015 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2019-10-15_Discv5_audit_LeastAuthority.pdf) |
| `Discv5` | 20200124 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2020-01-24_DiscV5_audit_Cure53.pdf) |
## Reporting a Vulnerability ## Reporting a Vulnerability
**Please do not file a public ticket** mentioning the vulnerability. **Please do not file a public ticket** mentioning the vulnerability.
To find out how to disclose a vulnerability in Ethereum visit [https://bounty.ethereum.org](https://bounty.ethereum.org) or email bounty@ethereum.org. Please read the [disclosure page](https://github.com/ethereum/go-ethereum/security/advisories?state=published) for more information about publically disclosed security vulnerabilities. To find out how to disclose a vulnerability in Ethereum visit [https://bounty.ethereum.org](https://bounty.ethereum.org) or email bounty@ethereum.org. Please read the [disclosure page](https://github.com/ethereum/go-ethereum/security/advisories?state=published) for more information about publicly disclosed security vulnerabilities.
Use the built-in `geth version-check` feature to check whether the software is affected by any known vulnerability. This command will fetch the latest [`vulnerabilities.json`](https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json) file which contains known security vulnerabilities concerning `geth`, and cross-check the data against its own version number. Use the built-in `geth version-check` feature to check whether the software is affected by any known vulnerability. This command will fetch the latest [`vulnerabilities.json`](https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json) file which contains known security vulnerabilities concerning `geth`, and cross-check the data against its own version number.
@ -27,92 +29,147 @@ Fingerprint: `AE96 ED96 9E47 9B00 84F3 E17F E88D 3334 FA5F 6A0A`
``` ```
-----BEGIN PGP PUBLIC KEY BLOCK----- -----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1 Version: SKS 1.1.6
Comment: Hostname: pgp.mit.edu
mQINBFgl3tgBEAC8A1tUBkD9YV+eLrOmtgy+/JS/H9RoZvkg3K1WZ8IYfj6iIRaY mQINBFgl3tgBEAC8A1tUBkD9YV+eLrOmtgy+/JS/H9RoZvkg3K1WZ8IYfj6iIRaYneAk3Bp1
neAk3Bp182GUPVz/zhKr2g0tMXIScDR3EnaDsY+Qg+JqQl8NOG+Cikr1nnkG2on9 82GUPVz/zhKr2g0tMXIScDR3EnaDsY+Qg+JqQl8NOG+Cikr1nnkG2on9L8c8yiqry1ZTCmYM
L8c8yiqry1ZTCmYMqCa2acTFqnyuXJ482aZNtB4QG2BpzfhW4k8YThpegk/EoRUi qCa2acTFqnyuXJ482aZNtB4QG2BpzfhW4k8YThpegk/EoRUim+y7buJDtoNf7YILlhDQXN8q
m+y7buJDtoNf7YILlhDQXN8qlHB02DWOVUihph9tUIFsPK6BvTr9SIr/eG6j6k0b lHB02DWOVUihph9tUIFsPK6BvTr9SIr/eG6j6k0bfUo9pexOn7LS4SojoJmsm/5dp6AoKlac
fUo9pexOn7LS4SojoJmsm/5dp6AoKlac48cZU5zwR9AYcq/nvkrfmf2WkObg/xRd 48cZU5zwR9AYcq/nvkrfmf2WkObg/xRdEvKZzn05jRopmAIwmoC3CiLmqCHPmT5a29vEob/y
EvKZzn05jRopmAIwmoC3CiLmqCHPmT5a29vEob/yPFE335k+ujjZCPOu7OwjzDk7 PFE335k+ujjZCPOu7OwjzDk7M0zMSfnNfDq8bXh16nn+ueBxJ0NzgD1oC6c2PhM+XRQCXCho
M0zMSfnNfDq8bXh16nn+ueBxJ0NzgD1oC6c2PhM+XRQCXChoyI8vbfp4dGvCvYqv yI8vbfp4dGvCvYqvQAE1bWjqnumZ/7vUPgZN6gDfiAzG2mUxC2SeFBhacgzDvtQls+uuvm+F
QAE1bWjqnumZ/7vUPgZN6gDfiAzG2mUxC2SeFBhacgzDvtQls+uuvm+FnQOUgg2H nQOUgg2Hh8x2zgoZ7kqV29wjaUPFREuew7e+Th5BxielnzOfVycVXeSuvvIn6cd3g/s8mX1c
h8x2zgoZ7kqV29wjaUPFREuew7e+Th5BxielnzOfVycVXeSuvvIn6cd3g/s8mX1c 2kLSXJR7+KdWDrIrR5Az0kwAqFZt6B6QTlDrPswu3mxsm5TzMbny0PsbL/HBM+GZEZCjMXxB
2kLSXJR7+KdWDrIrR5Az0kwAqFZt6B6QTlDrPswu3mxsm5TzMbny0PsbL/HBM+GZ 8bqV2eSaktjnSlUNX1VXxyOxXA+ZG2jwpr51egi57riVRXokrQARAQABtDRFdGhlcmV1bSBG
EZCjMXxB8bqV2eSaktjnSlUNX1VXxyOxXA+ZG2jwpr51egi57riVRXokrQARAQAB b3VuZGF0aW9uIEJ1ZyBCb3VudHkgPGJvdW50eUBldGhlcmV1bS5vcmc+iQIcBBEBCAAGBQJa
tDlFdGhlcmV1bSBGb3VuZGF0aW9uIFNlY3VyaXR5IFRlYW0gPHNlY3VyaXR5QGV0 FCY6AAoJEHoMA3Q0/nfveH8P+gJBPo9BXZL8isUfbUWjwLi81Yi70hZqIJUnz64SWTqBzg5b
aGVyZXVtLm9yZz6JAj4EEwECACgCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheA mCZ69Ji5637THsxQetS2ARabz0DybQ779FhD/IWnqV9T3KuBM/9RzJtuhLzKCyMrAINPMo28
BQJaCWH6BQkFo2BYAAoJEOiNMzT6X2oK+DEP/3H6dxkm0hvHZKoHLVuuxcu3EHYo rKWdunHHarpuR4m3tL2zWJkle5QVYb+vkZXJJE98PJw+N4IYeKKeCs2ubeqZu636GA0sMzzB
k5sd3MMWPrZSN8qzZnY7ayEDMxnarWOizc+2jfOxfJlzX/g8lR1/fsHdWPFPhPoV Jn3m/dRRA2va+/zzbr6F6b51ynzbMxWKTsJnstjC8gs8EeI+Zcd6otSyelLtCUkk3h5sTvpV
Qk8ygrHn1H8U8+rpw/U03BqmqHpYCDzJ+CIis9UWROniqXw1nuqu/FtWOsdWxNKh Wv67BNSU0BYsMkxyFi9PUyy07Wixgeas89K5jG1oOtDva/FkpRHrTE/WA5OXDRcLrHJM+SwD
jUo6k/0EsaXsxRPzgJv7fEUcVcQ7as/C3x9sy3muc2gvgA4/BKoGPb1/U0GuA8lV CwqcLQqJd09NxwUW1iKeBmPptTiOGu1Gv2o7aEyoaWrHRBO7JuYrQrj6q2B3H1Je0zjAd2qt
fDIDshAggmnSUAg+TuYSAAdoFQ1sKwFMPigcLJF2eyKuK3iUyixJrec/c4LSf3wA 09ni2bLwLn4LA+VDpprNTO+eZDprv09s2oFSU6NwziHybovu0y7X4pADGkK2evOM7c86PohX
cGghbeuqI8INP0Y2zvXDQN2cByxsFAuoZG+m0cyKGaDH2MVUvOKKYqn/03qvrf15 QRQ1M1T16xLj6wP8/Ykwl6v/LUk7iDPXP3GPILnh4YOkwBR3DsCOPn8098xy7FxEELmupRzt
AWAsW0l0yQwOTCo3FbsNzemClm5Bj/xH0E4XuwXwChcMCMOWJrFoxyvCEI+keoQc Cj9oC7YAoweeShgUjBPzb+nGY1m6OcFfbUPBgFyMMfwF6joHbiVIO+39+Ut2g2ysZa7KF+yp
c08/a8/MtS7vBAABXwOziSmm6CNqmzpWrh/fDrjlJlba9U3MxzvqU3IFlTdMratv XqVDqyEkYXsOLb25OC7brt8IJEPgBPwcHK5GNag6RfLxnQV+iVZ9KNH1yQgSiQI+BBMBAgAo
6V+SgX+L25lCzW4NxxUavoB8fAlvo8lxpHKo24FP+RcLQ8XqkU3RiUsgRjQRFOqQ AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAUCWglh+gUJBaNgWAAKCRDojTM0+l9qCgQ2
TaJcsp8mimmiYyf24mNu6b48pi+a5c/eQR9w59emeEUZqsJU+nqv8BWIIp7o4Agh D/4udJpV4zGIZW1yNaVvtd3vfKsTLi7GIRJLUBqVb2Yx/uhnN8jTl/tAhCVosCQ1pzvi9kMl
NYnKjkhPlY5e1fLVfAHIADZFynWwRPkPMJSrBiP5EtcOFxQGHGjRxU/KjXkvE0hV s8qO1vu2kw5EWFFkwK96roI8pTql3VIjwhRVQrCkR7oAk/eUd1U/nt2q6J4UTYeVgqbq4dsI
xYb1PB8pWMTu/beeiQI+BBMBAgAoBQJYJd7YAhsDBQkB4TOABgsJCAcDAgYVCAIJ ZZTRyPJMD667YpuAIcaah+w9j/E5xksYQdMeprnDrQkkBCb4FIMqfDzBPKvEa8DcQr949K85
CgsEFgIDAQIeAQIXgAAKCRDojTM0+l9qCplDD/9IZ2i+m1cnqQKtiyHbyFGx32oL kxhr6LDq9i5l4Egxt2JdH8DaR4GLca6+oHy0MyPs/bZOsfmZUObfM2oZgPpqYM96JanhzO1j
fzqPylX2bOG5DPsSTorSUdJMGVfT04oVxXc4S/2DVnNvi7RAbSiLapCWSplgtBOj dpnItyBii2pc+kNx5nMOf4eikE/MBv+WUJ0TttWzApGGmFUzDhtuEvRH9NBjtJ/pMrYspIGu
j1xlblOoXxT3m7s1XHGCX5tENxI9fVSSPVKJn+fQaWpPB2MhBA+1lUI6GJ+11T7K O/QNY5KKOKQTvVIlwGcm8dTsSkqtBDSUwZyWbfKfKOI1/RhM9dC3gj5/BOY57DYYV4rdTK01
J8LrP/fiw1/nOb7rW61HW44Gtyox23sA/d1+DsFVaF8hxJlNj5coPKr8xWzQ8pQl ZtYjuhdfs2bhuP1uF/cgnSSZlv8azvf7Egh7tHPnYxvLjfq1bJAhCIX0hNg0a81/ndPAEFky
juzdjHDukjevuw4rRmRq9vozvj9keEU9XJ5dldyEVXFmdDk7KT0p0Rla9nxYhzf/ fSko+JPKvdSvsUcSi2QQ4U2HX//jNBjXRfG4F0utgbJnhXzEckz6gqt7wSDZH2oddVuO8Ssc
r/Bv8Bzy0HCWRb2D31BjXXGG05oVnYmNGxGFxYja4MwgrMmne3ilEVjfUJsapsqi T7sK+CdXthSKnRyuI+sGUpG+6glpKWIfYkWFKNZWuQ+YUatY3QEDHXTIioycSmV8p4d/g/0S
w41BAyQgIdfREulYN7ahsF5PrjVAqBd9IGtE8ULelF2SQxEBQBngEkP0ahP6tRAL V6TegidLxY8bXMkbqz+3n6FArRffv5MH7qt3cYkCPgQTAQIAKAUCWCXhOwIbAwUJAeEzgAYL
i7/CBjPKOyKijtqVny7qrGOnU2ygcA88/WDibexDhrjz0Gx8WmErU7rIWZiZ5u4Y CQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQ6I0zNPpfagrN/w/+Igp3vtYdNunikw3yHnYf
vJYVRo0+6rBCXRPeSJfiP5h1p17Anr2l42boAYslfcrzquB8MHtrNcyn650OLtHG Jkm0MmaMDUM9mtsaXVN6xb9n25N3Xa3GWCpmdsbYZ8334tI/oQ4/NHq/bEI5WFH5F1aFkMkm
nbxgIdniKrpuzGN6Opw+O2id2JhD1/1p4SOemwAmthplr1MIyOHNP3q93rEj2J7h 5AJVLuUkipCtmCZ5NkbRPJA9l0uNUUE6uuFXBhf4ddu7jb0jMetRF/kifJHVCCo5fISUNhLp
5zPS/AJuKkMDFUpslPNLQjCOwPXtdzL7/kUZGBSyez1T3TaW1uY6l9XaJJRaSn+v 7bwcWq9qgDQNZNYMOo4s9WX5Tl+5x4gTZdd2/cAYt49h/wnkw+huM+Jm0GojpLqIQ1jZiffm
1zPgfp4GJ3lPs4AlAbQ0RXRoZXJldW0gRm91bmRhdGlvbiBCdWcgQm91bnR5IDxi otf5rF4L+JhIIdW0W4IIh1v9BhHVllXw+z9oj0PALstT5h8/DuKoIiirFJ4DejU85GR1KKAS
b3VudHlAZXRoZXJldW0ub3JnPokCPgQTAQIAKAIbAwYLCQgHAwIGFQgCCQoLBBYC DeO19G/lSpWj1rSgFv2N2gAOxq0X+BbQTua2jdcY6JpHR4H1JJ2wzfHsHPgDQcgY1rGlmjVF
AwECHgECF4AFAloJYfoFCQWjYFgACgkQ6I0zNPpfagoENg/+LnSaVeMxiGVtcjWl aqU73WV4/hzXc/HshK/k4Zd8uD4zypv6rFsZ3UemK0aL2zXLVpV8SPWQ61nS03x675SmDlYr
b7Xd73yrEy4uxiESS1AalW9mMf7oZzfI05f7QIQlaLAkNac74vZDJbPKjtb7tpMO A80ENfdqvsn00JQuBVIv4Tv0Ub7NfDraDGJCst8rObjBT/0vnBWTBCebb2EsnS2iStIFkWdz
RFhRZMCveq6CPKU6pd1SI8IUVUKwpEe6AJP3lHdVP57dquieFE2HlYKm6uHbCGWU /WXs4L4Yzre1iJwqRjiuqahZR5jHsjAUf2a0O29HVHE7zlFtCFmLPClml2lGQfQOpm5klGZF
0cjyTA+uu2KbgCHGmofsPY/xOcZLGEHTHqa5w60JJAQm+BSDKnw8wTyrxGvA3EK/ rmvus+qZ9rt35UgWHPZezykkwtWrFOwspwuCWaPDto6tgbRJZ4ftitpdYYM3dKW9IGJXBwrt
ePSvOZMYa+iw6vYuZeBIMbdiXR/A2keBi3GuvqB8tDMj7P22TrH5mVDm3zNqGYD6 BQrMsu+lp0vDF+yJAlUEEwEIAD8CGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAFiEErpbt
amDPeiWp4cztY3aZyLcgYotqXPpDceZzDn+HopBPzAb/llCdE7bVswKRhphVMw4b lp5HmwCE8+F/6I0zNPpfagoFAmEAEJwFCQycmLgACgkQ6I0zNPpfagpWoBAAhOcbMAUw6Zt0
bhL0R/TQY7Sf6TK2LKSBrjv0DWOSijikE71SJcBnJvHU7EpKrQQ0lMGclm3ynyji GYzT3sR5/c0iatezPzXEXJf9ebzR8M5uPElXcxcnMx1dvXZmGPXPJKCPa99WCu1NZYy8F+Wj
Nf0YTPXQt4I+fwTmOew2GFeK3UytNWbWI7oXX7Nm4bj9bhf3IJ0kmZb/Gs73+xII GTOY9tfIkvSxhys1p/giPAmvid6uQmD+bz7ivktnyzCkDWfMA+l8lsCSEqVlaq6y5T+a6SWB
e7Rz52Mby436tWyQIQiF9ITYNGvNf53TwBBZMn0pKPiTyr3Ur7FHEotkEOFNh1// 6TzC2S0MPb/RrC/7DpwyrNYWumvyVJh09adm1Mw/UGgst/sZ8eMaRYEd3X0yyT1CBpX4zp2E
4zQY10XxuBdLrYGyZ4V8xHJM+oKre8Eg2R9qHXVbjvErHE+7CvgnV7YUip0criPr qQj9IEOTizvzv1x2jkHe5ZUeU3+nTBNlhSA+WFHUi0pfBdo2qog3Mv2EC1P2qMKoSdD5tPbA
BlKRvuoJaSliH2JFhSjWVrkPmFGrWN0BAx10yIqMnEplfKeHf4P9Elek3oInS8WP zql1yKoHHnXOMsqdftGwbiv2sYXWvrYvmaCd3Ys/viOyt3HOy9uV2ZEtBd9Yqo9x/NZj8QMA
G1zJG6s/t5+hQK0X37+TB+6rd3GJAj4EEwECACgFAlgl4TsCGwMFCQHhM4AGCwkI nY5k8jjrIXbUC89MqrJsQ6xxWQIg5ikMT7DvY0Ln89ev4oJyVvwIQAwCm4jUzFNm9bZLYDOP
BwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOiNMzT6X2oKzf8P/iIKd77WHTbp4pMN 5lGJCV7tF5NYVU7NxNM8vescKc40mVNK/pygS5mxhK9QYOUjZsIv8gddrl1TkqrFMuxFnTyN
8h52HyZJtDJmjA1DPZrbGl1TesW/Z9uTd12txlgqZnbG2GfN9+LSP6EOPzR6v2xC WvzE29wFu/n4N1DkF+ZBqS70SlRvB+Hjz5LrDgEzF1Wf1eA/wq1dZbvMjjDVIc2VGlYp8Cp2
OVhR+RdWhZDJJuQCVS7lJIqQrZgmeTZG0TyQPZdLjVFBOrrhVwYX+HXbu429IzHr 8ob23c1seTtYXTNYgSR5go4EpH+xi+bIWv01bQQ9xGwBbT5sm4WUeWOcmX4QewzLZ3T/wK9+
URf5InyR1QgqOXyElDYS6e28HFqvaoA0DWTWDDqOLPVl+U5fuceIE2XXdv3AGLeP N4Ye/hmU9O34FwWJOY58EIe0OUV0aGVyZXVtIEZvdW5kYXRpb24gU2VjdXJpdHkgVGVhbSA8
Yf8J5MPobjPiZtBqI6S6iENY2Yn35qLX+axeC/iYSCHVtFuCCIdb/QYR1ZZV8Ps/ c2VjdXJpdHlAZXRoZXJldW0ub3JnPokCHAQRAQgABgUCWhQmOgAKCRB6DAN0NP5372LSEACT
aI9DwC7LU+YfPw7iqCIoqxSeA3o1PORkdSigEg3jtfRv5UqVo9a0oBb9jdoADsat wZk1TASWZj5QF7rmkIM1GEyBxLE+PundNcMgM9Ktj1315ED8SmiukNI4knVS1MY99OIgXhQl
F/gW0E7mto3XGOiaR0eB9SSdsM3x7Bz4A0HIGNaxpZo1RWqlO91leP4c13Px7ISv D1foF2GKdTomrwwC4012zTNyUYCY60LnPZ6Z511HG+rZgZtZrbkz0IiUpwAlhGQND77lBqem
5OGXfLg+M8qb+qxbGd1HpitGi9s1y1aVfEj1kOtZ0tN8eu+Upg5WKwPNBDX3ar7J J3K+CFX2XpDA/ojui/kqrY4cwMT5P8xPJkwgpRgw/jgdcZyJTsXdHblV9IGU4H1Vd1SgcfAf
9NCULgVSL+E79FG+zXw62gxiQrLfKzm4wU/9L5wVkwQnm29hLJ0tokrSBZFnc/1l Db3YxDUlBtzlp0NkZqxen8irLIXUQvsfuIfRUbUSkWoK/n3U/gOCajAe8ZNF07iX4OWjH4Sw
7OC+GM63tYicKkY4rqmoWUeYx7IwFH9mtDtvR1RxO85RbQhZizwpZpdpRkH0DqZu NDA841WhFWcGE+d8+pfMVfPASU3UPKH72uw86b2VgR46Av6voyMFd1pj+yCA+YAhJuOpV4yL
ZJRmRa5r7rPqmfa7d+VIFhz2Xs8pJMLVqxTsLKcLglmjw7aOrYG0SWeH7YraXWGD QaGg2Z0kVOjuNWK/kBzp1F58DWGh4YBatbhE/UyQOqAAtR7lNf0M3QF9AdrHTxX8oZeqVW3V
N3SlvSBiVwcK7QUKzLLvpadLwxfsuQINBFgl3tgBEACbgq6HTN5gEBi0lkD/MafI Fmi2mk0NwCIUv8SSrZr1dTchp04OtyXe5gZBXSfzncCSRQIUDC8OgNWaOzAaUmK299v4bvye
nmNi+59U5gRGYqk46WlfRjhHudXjDpgD0lolGb4hYontkMaKRlCg2Rvgjvk3Zve0 uSCxOysxC7Q1hZtjzFPKdljS81mRlYeUL4fHlJU9R57bg8mriSXLmn7eKrSEDm/EG5T8nRx7
PKWjKw7gr8YBa9fMFY8BhAXI32OdyI9rFhxEZFfWAfwKVmT19BdeAQRFvcfd+8w8 TgX2MqJs8sWFxD2+bboVEu75yuFmZ//nmCBApAit9Hr2/sCshGIEpa9MQ6xJCYUxyqeJH+Cc
f1XVc+zddULMJFBTr+xKDlIRWwTkdLPQeWbjo0eHl/g4tuLiLrTxVbnj26bf+2+1 Aja0UfXhnK2uvPClpJLIl4RE3gm4OXeE1IkCPgQTAQIAKAIbAwYLCQgHAwIGFQgCCQoLBBYC
DbM/w5VavzPrkviHqvKe/QP/gay4QDViWvFgLb90idfAHIdsPgflp0VDS5rVHFL6 AwECHgECF4AFAloJYfoFCQWjYFgACgkQ6I0zNPpfagr4MQ//cfp3GSbSG8dkqgctW67Fy7cQ
D73rSRdIRo3I8c8mYoNjSR4XDuvgOkAKW9LR3pvouFHHjp6Fr0GesRbrbb2EG66i diiTmx3cwxY+tlI3yrNmdjtrIQMzGdqtY6LNz7aN87F8mXNf+DyVHX9+wd1Y8U+E+hVCTzKC
PsR99MQ7FqIL9VMHPm2mtR+XvbnKkH2rYyEqaMbSdk29jGapkAWle4sIhSKk749A sefUfxTz6unD9TTcGqaoelgIPMn4IiKz1RZE6eKpfDWe6q78W1Y6x1bE0qGNSjqT/QSxpezF
4tGkHl08KZ2N9o6GrfUehP/V2eJLaph2DioFL1HxRryrKy80QQKLMJRekxigq8gr E/OAm/t8RRxVxDtqz8LfH2zLea5zaC+ADj8EqgY9vX9TQa4DyVV8MgOyECCCadJQCD5O5hIA
eW8xB4zuf9Mkuou+RHNmo8PebHjFstLigiD6/zP2e+4tUmrT0/JTGOShoGMl8Rt0 B2gVDWwrAUw+KBwskXZ7Iq4reJTKLEmt5z9zgtJ/fABwaCFt66ojwg0/RjbO9cNA3ZwHLGwU
VRxdPImKun+4LOXbfOxArOSkY6i35+gsgkkSy1gTJE0BY3S9auT6+YrglY/TWPQ9 C6hkb6bRzIoZoMfYxVS84opiqf/Teq+t/XkBYCxbSXTJDA5MKjcVuw3N6YKWbkGP/EfQThe7
IJxWVOKlT+3WIp5wJu2bBKQ420VLqDYzkoWytel/bM1ACUtipMiIVeUs2uFiRjpz BfAKFwwIw5YmsWjHK8IQj6R6hBxzTz9rz8y1Lu8EAAFfA7OJKaboI2qbOlauH98OuOUmVtr1
A1Wy0QHKPTdSuGlJPRrfcQARAQABiQIlBBgBAgAPAhsMBQJaCWIIBQkFo2BYAAoJ TczHO+pTcgWVN0ytq2/pX5KBf4vbmULNbg3HFRq+gHx8CW+jyXGkcqjbgU/5FwtDxeqRTdGJ
EOiNMzT6X2oKgSwQAKKs7BGF8TyZeIEO2EUK7R2bdQDCdSGZY06tqLFg3IHMGxDM SyBGNBEU6pBNolyynyaKaaJjJ/biY27pvjymL5rlz95BH3Dn16Z4RRmqwlT6eq/wFYginujg
b/7FVoa2AEsFgv6xpoebxBB5zkhUk7lslgxvKiSLYjxfNjTBltfiFJ+eQnf+OTs8 CCE1icqOSE+Vjl7V8tV8AcgANkXKdbBE+Q8wlKsGI/kS1w4XFAYcaNHFT8qNeS8TSFXFhvU8
KeR51lLa66rvIH2qUzkNDCCTF45H4wIDpV05AXhBjKYkrDCrtey1rQyFp5fxI+0I HylYxO79t56JAj4EEwECACgFAlgl3tgCGwMFCQHhM4AGCwkIBwMCBhUIAgkKCwQWAgMBAh4B
Q1UKKXvzZK4GdxhxDbOUSd38MYy93nqcmclGSGK/gF8XiyuVjeifDCM6+T1NQTX0 AheAAAoJEOiNMzT6X2oKmUMP/0hnaL6bVyepAq2LIdvIUbHfagt/Oo/KVfZs4bkM+xJOitJR
K9lneidcqtBDvlggJTLJtQPO33o5EHzXSiud+dKth1uUhZOFEaYRZoye1YE3yB0T 0kwZV9PTihXFdzhL/YNWc2+LtEBtKItqkJZKmWC0E6OPXGVuU6hfFPebuzVccYJfm0Q3Ej19
NOOE8fXlvu8iuIAMBSDL9ep6sEIaXYwoD60I2gHdWD0lkP0DOjGQpi4ouXM3Edsd VJI9Uomf59Bpak8HYyEED7WVQjoYn7XVPsonwus/9+LDX+c5vutbrUdbjga3KjHbewD93X4O
5MTi0MDRNTij431kn8T/D0LCgmoUmYYMBgbwFhXr67axPZlKjrqR0z3F/Elv0ZPP wVVoXyHEmU2Plyg8qvzFbNDylCWO7N2McO6SN6+7DitGZGr2+jO+P2R4RT1cnl2V3IRVcWZ0
cVg1tNznsALYQ9Ovl6b5M3cJ5GapbbvNWC7yEE1qScl9HiMxjt/H6aPastH63/7w OTspPSnRGVr2fFiHN/+v8G/wHPLQcJZFvYPfUGNdcYbTmhWdiY0bEYXFiNrgzCCsyad7eKUR
cN0TslW+zRBy05VNJvpWGStQXcngsSUeJtI1Gd992YNjUJq4/Lih6Z1TlwcFVap+ WN9QmxqmyqLDjUEDJCAh19ES6Vg3tqGwXk+uNUCoF30ga0TxQt6UXZJDEQFAGeASQ/RqE/q1
cTcDptoUvXYGg/9mRNNPZwErSfIJ0Ibnx9wPVuRN6NiCLOt2mtKp2F1pM6AOQPpZ EAuLv8IGM8o7IqKO2pWfLuqsY6dTbKBwDzz9YOJt7EOGuPPQbHxaYStTushZmJnm7hi8lhVG
85vEh6I8i6OaO0w/Z0UHBwvpY6jDUliaROsWUQsqz78Z34CVj4cy6vPW2EF4 jT7qsEJdE95Il+I/mHWnXsCevaXjZugBiyV9yvOq4Hwwe2s1zKfrnQ4u0cadvGAh2eIqum7M
=r6KK Y3o6nD47aJ3YmEPX/WnhI56bACa2GmWvUwjI4c0/er3esSPYnuHnM9L8Am4qQwMVSmyU80tC
-----END PGP PUBLIC KEY BLOCK----- MI7A9e13Mvv+RRkYFLJ7PVPdNpbW5jqX1doklFpKf6/XM+B+ngYneU+zgCUBiQJVBBMBCAA/
AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgBYhBK6W7ZaeR5sAhPPhf+iNMzT6X2oKBQJh
ABCQBQkMnJi4AAoJEOiNMzT6X2oKAv0P+gJ3twBp5efNWyVLcIg4h4cOo9uD0NPvz8/fm2gX
FoOJL3MeigtPuSVfE9kuTaTuRbArzuFtdvH6G/kcRQvOlO4zyiIRHCk1gDHoIvvtn6RbRhVm
/Xo4uGIsFHst7n4A7BjicwEK5Op6Ih5Hoq19xz83YSBgBVk2fYEJIRyJiKFbyPjH0eSYe8v+
Ra5/F85ugLx1P6mMVkW+WPzULns89riW7BGTnZmXFHZp8nO2pkUlcI7F3KRG7l4kmlC50ox6
DiG/6AJCVulbAClky9C68TmJ/R1RazQxU/9IqVywsydq66tbJQbm5Z7GEti0C5jjbSRJL2oT
1xC7Rilr85PMREkPL3vegJdgj5PKlffZ/MocD/0EohiQ7wFpejFD4iTljeh0exRUwCRb6655
9ib34JSQgU8Hl4JJu+mEgd9v0ZHD0/1mMD6fnAR84zca+O3cdASbnQmzTOKcGzLIrkE8TEnU
+2UZ8Ol7SAAqmBgzY1gKOilUho6dkyCAwNL+QDpvrITDPLEFPsjyB/M2KudZSVEn+Rletju1
qkMW31qFMNlsbwzMZw+0USeGcs31Cs0B2/WQsro99CExlhS9auUFkmoVjJmYVTIYOM0zuPa4
OyGspqPhRu5hEsmMDPDWD7Aad5k4GTqogQNnuKyRliZjXXrDZqFD5nfsJSL8Ky/sJGEMuQIN
BFgl3tgBEACbgq6HTN5gEBi0lkD/MafInmNi+59U5gRGYqk46WlfRjhHudXjDpgD0lolGb4h
YontkMaKRlCg2Rvgjvk3Zve0PKWjKw7gr8YBa9fMFY8BhAXI32OdyI9rFhxEZFfWAfwKVmT1
9BdeAQRFvcfd+8w8f1XVc+zddULMJFBTr+xKDlIRWwTkdLPQeWbjo0eHl/g4tuLiLrTxVbnj
26bf+2+1DbM/w5VavzPrkviHqvKe/QP/gay4QDViWvFgLb90idfAHIdsPgflp0VDS5rVHFL6
D73rSRdIRo3I8c8mYoNjSR4XDuvgOkAKW9LR3pvouFHHjp6Fr0GesRbrbb2EG66iPsR99MQ7
FqIL9VMHPm2mtR+XvbnKkH2rYyEqaMbSdk29jGapkAWle4sIhSKk749A4tGkHl08KZ2N9o6G
rfUehP/V2eJLaph2DioFL1HxRryrKy80QQKLMJRekxigq8greW8xB4zuf9Mkuou+RHNmo8Pe
bHjFstLigiD6/zP2e+4tUmrT0/JTGOShoGMl8Rt0VRxdPImKun+4LOXbfOxArOSkY6i35+gs
gkkSy1gTJE0BY3S9auT6+YrglY/TWPQ9IJxWVOKlT+3WIp5wJu2bBKQ420VLqDYzkoWytel/
bM1ACUtipMiIVeUs2uFiRjpzA1Wy0QHKPTdSuGlJPRrfcQARAQABiQIlBBgBAgAPAhsMBQJa
CWIIBQkFo2BYAAoJEOiNMzT6X2oKgSwQAKKs7BGF8TyZeIEO2EUK7R2bdQDCdSGZY06tqLFg
3IHMGxDMb/7FVoa2AEsFgv6xpoebxBB5zkhUk7lslgxvKiSLYjxfNjTBltfiFJ+eQnf+OTs8
KeR51lLa66rvIH2qUzkNDCCTF45H4wIDpV05AXhBjKYkrDCrtey1rQyFp5fxI+0IQ1UKKXvz
ZK4GdxhxDbOUSd38MYy93nqcmclGSGK/gF8XiyuVjeifDCM6+T1NQTX0K9lneidcqtBDvlgg
JTLJtQPO33o5EHzXSiud+dKth1uUhZOFEaYRZoye1YE3yB0TNOOE8fXlvu8iuIAMBSDL9ep6
sEIaXYwoD60I2gHdWD0lkP0DOjGQpi4ouXM3Edsd5MTi0MDRNTij431kn8T/D0LCgmoUmYYM
BgbwFhXr67axPZlKjrqR0z3F/Elv0ZPPcVg1tNznsALYQ9Ovl6b5M3cJ5GapbbvNWC7yEE1q
Scl9HiMxjt/H6aPastH63/7wcN0TslW+zRBy05VNJvpWGStQXcngsSUeJtI1Gd992YNjUJq4
/Lih6Z1TlwcFVap+cTcDptoUvXYGg/9mRNNPZwErSfIJ0Ibnx9wPVuRN6NiCLOt2mtKp2F1p
M6AOQPpZ85vEh6I8i6OaO0w/Z0UHBwvpY6jDUliaROsWUQsqz78Z34CVj4cy6vPW2EF4iQIl
BBgBAgAPBQJYJd7YAhsMBQkB4TOAAAoJEOiNMzT6X2oKTjgP/1ojCVyGyvHMLUgnX0zwrR5Q
1M5RKFz6kHwKjODVLR3Isp8I935oTQt3DY7yFDI4t0GqbYRQMtxcNEb7maianhK2trCXfhPs
6/L04igjDf5iTcmzamXN6xnh5xkz06hZJJCMuu4MvKxC9MQHCVKAwjswl/9H9JqIBXAY3E2l
LpX5P+5jDZuPxS86p3+k4Rrdp9KTGXjiuEleM3zGlz5BLWydqovOck7C2aKh27ETFpDYY0z3
yQ5AsPJyk1rAr0wrH6+ywmwWlzuQewavnrLnJ2M8iMFXpIhyHeEIU/f7o8f+dQk72rZ9CGzd
cqig2za/BS3zawZWgbv2vB2elNsIllYLdir45jxBOxx2yvJvEuu4glz78y4oJTCTAYAbMlle
5gVdPkVcGyvvVS9tinnSaiIzuvWrYHKWll1uYPm2Q1CDs06P5I7bUGAXpgQLUh/XQguy/0sX
GWqW3FS5JzP+XgcR/7UASvwBdHylubKbeqEpB7G1s+m+8C67qOrc7EQv3Jmy1YDOkhEyNig1
rmjplLuir3tC1X+D7dHpn7NJe7nMwFx2b2MpMkLA9jPPAGPp/ekcu5sxCe+E0J/4UF++K+CR
XIxgtzU2UJfp8p9x+ygbx5qHinR0tVRdIzv3ZnGsXrfxnWfSOaB582cU3VRN9INzHHax8ETa
QVDnGO5uQa+FiQI8BBgBCAAmAhsMFiEErpbtlp5HmwCE8+F/6I0zNPpfagoFAmEAELYFCQyc
mN4ACgkQ6I0zNPpfagoqAQ/+MnDjBx8JWMd/XjeFoYKx/Oo0ntkInV+ME61JTBls4PdVk+TB
8PWZdPQHw9SnTvRmykFeznXIRzuxkowjrZYXdPXBxY2b1WyD5V3Ati1TM9vqpaR4osyPs2xy
I4dzDssh9YvUsIRL99O04/65lGiYeBNuACq+yK/7nD/ErzBkDYJHhMCdadbVWUACxvVIDvro
yQeVLKMsHqMCd8BTGD7VDs79NXskPnN77pAFnkzS4Z2b8SNzrlgTc5pUiuZHIXPIpEYmsYzh
ucTU6uI3dN1PbSFHK5tG2pHb4ZrPxY3L20Dgc2Tfu5/SDApZzwvvKTqjdO891MEJ++H+ssOz
i4O1UeWKs9owWttan9+PI47ozBSKOTxmMqLSQ0f56Np9FJsV0ilGxRKfjhzJ4KniOMUBA7mP
+m+TmXfVtthJred4sHlJMTJNpt+sCcT6wLMmyc3keIEAu33gsJj3LTpkEA2q+V+ZiP6Q8HRB
402ITklABSArrPSE/fQU9L8hZ5qmy0Z96z0iyILgVMLuRCCfQOMWhwl8yQWIIaf1yPI07xur
epy6lH7HmxjjOR7eo0DaSxQGQpThAtFGwkWkFh8yki8j3E42kkrxvEyyYZDXn2YcI3bpqhJx
PtwCMZUJ3kc/skOrs6bOI19iBNaEoNX5Dllm7UHjOgWNDQkcCuOCxucKano=
=arte
-----END PGP PUBLIC KEY BLOCK------
``` ```

View File

@ -34,6 +34,7 @@ type ABI struct {
Constructor Method Constructor Method
Methods map[string]Method Methods map[string]Method
Events map[string]Event Events map[string]Event
Errors map[string]Error
// Additional "special" functions introduced in solidity v0.6.0. // Additional "special" functions introduced in solidity v0.6.0.
// It's separated from the original default fallback. Each contract // It's separated from the original default fallback. Each contract
@ -94,7 +95,7 @@ func (abi ABI) getArguments(name string, data []byte) (Arguments, error) {
args = event.Inputs args = event.Inputs
} }
if args == nil { if args == nil {
return nil, errors.New("abi: could not locate named method or event") return nil, fmt.Errorf("abi: could not locate named method or event: %s", name)
} }
return args, nil return args, nil
} }
@ -157,12 +158,13 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
} }
abi.Methods = make(map[string]Method) abi.Methods = make(map[string]Method)
abi.Events = make(map[string]Event) abi.Events = make(map[string]Event)
abi.Errors = make(map[string]Error)
for _, field := range fields { for _, field := range fields {
switch field.Type { switch field.Type {
case "constructor": case "constructor":
abi.Constructor = NewMethod("", "", Constructor, field.StateMutability, field.Constant, field.Payable, field.Inputs, nil) abi.Constructor = NewMethod("", "", Constructor, field.StateMutability, field.Constant, field.Payable, field.Inputs, nil)
case "function": case "function":
name := abi.overloadedMethodName(field.Name) name := ResolveNameConflict(field.Name, func(s string) bool { _, ok := abi.Methods[s]; return ok })
abi.Methods[name] = NewMethod(name, field.Name, Function, field.StateMutability, field.Constant, field.Payable, field.Inputs, field.Outputs) abi.Methods[name] = NewMethod(name, field.Name, Function, field.StateMutability, field.Constant, field.Payable, field.Inputs, field.Outputs)
case "fallback": case "fallback":
// New introduced function type in v0.6.0, check more detail // New introduced function type in v0.6.0, check more detail
@ -182,8 +184,12 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
} }
abi.Receive = NewMethod("", "", Receive, field.StateMutability, field.Constant, field.Payable, nil, nil) abi.Receive = NewMethod("", "", Receive, field.StateMutability, field.Constant, field.Payable, nil, nil)
case "event": case "event":
name := abi.overloadedEventName(field.Name) name := ResolveNameConflict(field.Name, func(s string) bool { _, ok := abi.Events[s]; return ok })
abi.Events[name] = NewEvent(name, field.Name, field.Anonymous, field.Inputs) abi.Events[name] = NewEvent(name, field.Name, field.Anonymous, field.Inputs)
case "error":
// Errors cannot be overloaded or overridden but are inherited,
// no need to resolve the name conflict here.
abi.Errors[field.Name] = NewError(field.Name, field.Inputs)
default: default:
return fmt.Errorf("abi: could not recognize type %v of field %v", field.Type, field.Name) return fmt.Errorf("abi: could not recognize type %v of field %v", field.Type, field.Name)
} }
@ -191,36 +197,6 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
return nil return nil
} }
// overloadedMethodName returns the next available name for a given function.
// Needed since solidity allows for function overload.
//
// e.g. if the abi contains Methods send, send1
// overloadedMethodName would return send2 for input send.
func (abi *ABI) overloadedMethodName(rawName string) string {
name := rawName
_, ok := abi.Methods[name]
for idx := 0; ok; idx++ {
name = fmt.Sprintf("%s%d", rawName, idx)
_, ok = abi.Methods[name]
}
return name
}
// overloadedEventName returns the next available name for a given event.
// Needed since solidity allows for event overload.
//
// e.g. if the abi contains events received, received1
// overloadedEventName would return received2 for input received.
func (abi *ABI) overloadedEventName(rawName string) string {
name := rawName
_, ok := abi.Events[name]
for idx := 0; ok; idx++ {
name = fmt.Sprintf("%s%d", rawName, idx)
_, ok = abi.Events[name]
}
return name
}
// MethodById looks up a method by the 4-byte id, // MethodById looks up a method by the 4-byte id,
// returns nil if none found. // returns nil if none found.
func (abi *ABI) MethodById(sigdata []byte) (*Method, error) { func (abi *ABI) MethodById(sigdata []byte) (*Method, error) {

View File

@ -18,6 +18,7 @@ package abi
import ( import (
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"reflect" "reflect"
"strings" "strings"
@ -78,16 +79,10 @@ func (arguments Arguments) isTuple() bool {
// Unpack performs the operation hexdata -> Go format. // Unpack performs the operation hexdata -> Go format.
func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) { func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) {
if len(data) == 0 { if len(data) == 0 {
if len(arguments) != 0 { if len(arguments.NonIndexed()) != 0 {
return nil, fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected") return nil, errors.New("abi: attempting to unmarshall an empty string while arguments are expected")
} }
// Nothing to unmarshal, return default variables return make([]interface{}, 0), nil
nonIndexedArgs := arguments.NonIndexed()
defaultVars := make([]interface{}, len(nonIndexedArgs))
for index, arg := range nonIndexedArgs {
defaultVars[index] = reflect.New(arg.Type.GetType())
}
return defaultVars, nil
} }
return arguments.UnpackValues(data) return arguments.UnpackValues(data)
} }
@ -96,11 +91,11 @@ func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) {
func (arguments Arguments) UnpackIntoMap(v map[string]interface{}, data []byte) error { func (arguments Arguments) UnpackIntoMap(v map[string]interface{}, data []byte) error {
// Make sure map is not nil // Make sure map is not nil
if v == nil { if v == nil {
return fmt.Errorf("abi: cannot unpack into a nil map") return errors.New("abi: cannot unpack into a nil map")
} }
if len(data) == 0 { if len(data) == 0 {
if len(arguments) != 0 { if len(arguments.NonIndexed()) != 0 {
return fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected") return errors.New("abi: attempting to unmarshall an empty string while arguments are expected")
} }
return nil // Nothing to unmarshal, return return nil // Nothing to unmarshal, return
} }
@ -121,8 +116,8 @@ func (arguments Arguments) Copy(v interface{}, values []interface{}) error {
return fmt.Errorf("abi: Unpack(non-pointer %T)", v) return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
} }
if len(values) == 0 { if len(values) == 0 {
if len(arguments) != 0 { if len(arguments.NonIndexed()) != 0 {
return fmt.Errorf("abi: attempting to copy no values while %d arguments are expected", len(arguments)) return errors.New("abi: attempting to copy no values while arguments are expected")
} }
return nil // Nothing to copy, return return nil // Nothing to copy, return
} }
@ -137,7 +132,7 @@ func (arguments Arguments) copyAtomic(v interface{}, marshalledValues interface{
dst := reflect.ValueOf(v).Elem() dst := reflect.ValueOf(v).Elem()
src := reflect.ValueOf(marshalledValues) src := reflect.ValueOf(marshalledValues)
if dst.Kind() == reflect.Struct && src.Kind() != reflect.Struct { if dst.Kind() == reflect.Struct {
return set(dst.Field(0), src) return set(dst.Field(0), src)
} }
return set(dst, src) return set(dst, src)
@ -192,6 +187,9 @@ func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) {
virtualArgs := 0 virtualArgs := 0
for index, arg := range nonIndexedArgs { for index, arg := range nonIndexedArgs {
marshalledValue, err := toGoType((index+virtualArgs)*32, arg.Type, data) marshalledValue, err := toGoType((index+virtualArgs)*32, arg.Type, data)
if err != nil {
return nil, err
}
if arg.Type.T == ArrayTy && !isDynamicType(arg.Type) { if arg.Type.T == ArrayTy && !isDynamicType(arg.Type) {
// If we have a static array, like [3]uint256, these are coded as // If we have a static array, like [3]uint256, these are coded as
// just like uint256,uint256,uint256. // just like uint256,uint256,uint256.
@ -209,9 +207,6 @@ func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) {
// coded as just like uint256,bool,uint256 // coded as just like uint256,bool,uint256
virtualArgs += getTypeSize(arg.Type)/32 - 1 virtualArgs += getTypeSize(arg.Type)/32 - 1
} }
if err != nil {
return nil, err
}
retval = append(retval, marshalledValue) retval = append(retval, marshalledValue)
} }
return retval, nil return retval, nil

View File

@ -17,10 +17,10 @@
package bind package bind
import ( import (
"context"
"crypto/ecdsa" "crypto/ecdsa"
"errors" "errors"
"io" "io"
"io/ioutil"
"math/big" "math/big"
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
@ -44,7 +44,7 @@ var ErrNotAuthorized = errors.New("not authorized to sign this account")
// Deprecated: Use NewTransactorWithChainID instead. // Deprecated: Use NewTransactorWithChainID instead.
func NewTransactor(keyin io.Reader, passphrase string) (*TransactOpts, error) { func NewTransactor(keyin io.Reader, passphrase string) (*TransactOpts, error) {
log.Warn("WARNING: NewTransactor has been deprecated in favour of NewTransactorWithChainID") log.Warn("WARNING: NewTransactor has been deprecated in favour of NewTransactorWithChainID")
json, err := ioutil.ReadAll(keyin) json, err := io.ReadAll(keyin)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -74,6 +74,7 @@ func NewKeyStoreTransactor(keystore *keystore.KeyStore, account accounts.Account
} }
return tx.WithSignature(signer, signature) return tx.WithSignature(signer, signature)
}, },
Context: context.Background(),
}, nil }, nil
} }
@ -97,13 +98,14 @@ func NewKeyedTransactor(key *ecdsa.PrivateKey) *TransactOpts {
} }
return tx.WithSignature(signer, signature) return tx.WithSignature(signer, signature)
}, },
Context: context.Background(),
} }
} }
// NewTransactorWithChainID is a utility method to easily create a transaction signer from // NewTransactorWithChainID is a utility method to easily create a transaction signer from
// an encrypted json key stream and the associated passphrase. // an encrypted json key stream and the associated passphrase.
func NewTransactorWithChainID(keyin io.Reader, passphrase string, chainID *big.Int) (*TransactOpts, error) { func NewTransactorWithChainID(keyin io.Reader, passphrase string, chainID *big.Int) (*TransactOpts, error) {
json, err := ioutil.ReadAll(keyin) json, err := io.ReadAll(keyin)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -133,6 +135,7 @@ func NewKeyStoreTransactorWithChainID(keystore *keystore.KeyStore, account accou
} }
return tx.WithSignature(signer, signature) return tx.WithSignature(signer, signature)
}, },
Context: context.Background(),
}, nil }, nil
} }
@ -156,6 +159,7 @@ func NewKeyedTransactorWithChainID(key *ecdsa.PrivateKey, chainID *big.Int) (*Tr
} }
return tx.WithSignature(signer, signature) return tx.WithSignature(signer, signature)
}, },
Context: context.Background(),
}, nil }, nil
} }
@ -170,5 +174,6 @@ func NewClefTransactor(clef *external.ExternalSigner, account accounts.Account)
} }
return clef.SignTx(account, transaction, nil) // Clef enforces its own chain id return clef.SignTx(account, transaction, nil) // Clef enforces its own chain id
}, },
Context: context.Background(),
} }
} }

View File

@ -21,6 +21,8 @@ import (
"errors" "errors"
"fmt" "fmt"
"math/big" "math/big"
"strings"
"sync"
"github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi"
@ -30,6 +32,8 @@ import (
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
) )
const basefeeWiggleMultiplier = 2
// SignerFn is a signer function callback when a contract requires a method to // SignerFn is a signer function callback when a contract requires a method to
// sign the transaction before submission. // sign the transaction before submission.
type SignerFn func(common.Address, *types.Transaction) (*types.Transaction, error) type SignerFn func(common.Address, *types.Transaction) (*types.Transaction, error)
@ -76,6 +80,29 @@ type WatchOpts struct {
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout) Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
} }
// MetaData collects all metadata for a bound contract.
type MetaData struct {
mu sync.Mutex
Sigs map[string]string
Bin string
ABI string
ab *abi.ABI
}
func (m *MetaData) GetAbi() (*abi.ABI, error) {
m.mu.Lock()
defer m.mu.Unlock()
if m.ab != nil {
return m.ab, nil
}
if parsed, err := abi.JSON(strings.NewReader(m.ABI)); err != nil {
return nil, err
} else {
m.ab = &parsed
}
return m.ab, nil
}
// BoundContract is the base wrapper object that reflects a contract on the // BoundContract is the base wrapper object that reflects a contract on the
// Ethereum network. It contains a collection of methods that are used by the // Ethereum network. It contains a collection of methods that are used by the
// higher level contract bindings to operate. // higher level contract bindings to operate.
@ -146,7 +173,10 @@ func (c *BoundContract) Call(opts *CallOpts, results *[]interface{}, method stri
return ErrNoPendingState return ErrNoPendingState
} }
output, err = pb.PendingCallContract(ctx, msg) output, err = pb.PendingCallContract(ctx, msg)
if err == nil && len(output) == 0 { if err != nil {
return err
}
if len(output) == 0 {
// Make sure we have a contract to operate on, and bail out otherwise. // Make sure we have a contract to operate on, and bail out otherwise.
if code, err = pb.PendingCodeAt(ctx, c.address); err != nil { if code, err = pb.PendingCodeAt(ctx, c.address); err != nil {
return err return err
@ -206,108 +236,158 @@ func (c *BoundContract) Transfer(opts *TransactOpts) (*types.Transaction, error)
return c.transact(opts, &c.address, nil) return c.transact(opts, &c.address, nil)
} }
// transact executes an actual transaction invocation, first deriving any missing func (c *BoundContract) createDynamicTx(opts *TransactOpts, contract *common.Address, input []byte, head *types.Header) (*types.Transaction, error) {
// authorization fields, and then scheduling the transaction for execution. // Normalize value
func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, input []byte) (*types.Transaction, error) {
var err error
// Ensure a valid value field and resolve the account nonce
value := opts.Value value := opts.Value
if value == nil { if value == nil {
value = new(big.Int) value = new(big.Int)
} }
var nonce uint64 // Estimate TipCap
if opts.Nonce == nil { gasTipCap := opts.GasTipCap
nonce, err = c.transactor.PendingNonceAt(ensureContext(opts.Context), opts.From) if gasTipCap == nil {
if err != nil {
return nil, fmt.Errorf("failed to retrieve account nonce: %v", err)
}
} else {
nonce = opts.Nonce.Uint64()
}
// Figure out reasonable gas price values
if opts.GasPrice != nil && (opts.GasFeeCap != nil || opts.GasTipCap != nil) {
return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
}
head, err := c.transactor.HeaderByNumber(ensureContext(opts.Context), nil)
if err != nil {
return nil, err
}
if head.BaseFee != nil && opts.GasPrice == nil {
if opts.GasTipCap == nil {
tip, err := c.transactor.SuggestGasTipCap(ensureContext(opts.Context)) tip, err := c.transactor.SuggestGasTipCap(ensureContext(opts.Context))
if err != nil { if err != nil {
return nil, err return nil, err
} }
opts.GasTipCap = tip gasTipCap = tip
} }
if opts.GasFeeCap == nil { // Estimate FeeCap
gasFeeCap := new(big.Int).Add( gasFeeCap := opts.GasFeeCap
opts.GasTipCap, if gasFeeCap == nil {
new(big.Int).Mul(head.BaseFee, big.NewInt(2)), gasFeeCap = new(big.Int).Add(
gasTipCap,
new(big.Int).Mul(head.BaseFee, big.NewInt(basefeeWiggleMultiplier)),
) )
opts.GasFeeCap = gasFeeCap
} }
if opts.GasFeeCap.Cmp(opts.GasTipCap) < 0 { if gasFeeCap.Cmp(gasTipCap) < 0 {
return nil, fmt.Errorf("maxFeePerGas (%v) < maxPriorityFeePerGas (%v)", opts.GasFeeCap, opts.GasTipCap) return nil, fmt.Errorf("maxFeePerGas (%v) < maxPriorityFeePerGas (%v)", gasFeeCap, gasTipCap)
} }
} else { // Estimate GasLimit
gasLimit := opts.GasLimit
if opts.GasLimit == 0 {
var err error
gasLimit, err = c.estimateGasLimit(opts, contract, input, nil, gasTipCap, gasFeeCap, value)
if err != nil {
return nil, err
}
}
// create the transaction
nonce, err := c.getNonce(opts)
if err != nil {
return nil, err
}
baseTx := &types.DynamicFeeTx{
To: contract,
Nonce: nonce,
GasFeeCap: gasFeeCap,
GasTipCap: gasTipCap,
Gas: gasLimit,
Value: value,
Data: input,
}
return types.NewTx(baseTx), nil
}
func (c *BoundContract) createLegacyTx(opts *TransactOpts, contract *common.Address, input []byte) (*types.Transaction, error) {
if opts.GasFeeCap != nil || opts.GasTipCap != nil { if opts.GasFeeCap != nil || opts.GasTipCap != nil {
return nil, errors.New("maxFeePerGas or maxPriorityFeePerGas specified but london is not active yet") return nil, errors.New("maxFeePerGas or maxPriorityFeePerGas specified but london is not active yet")
} }
if opts.GasPrice == nil { // Normalize value
value := opts.Value
if value == nil {
value = new(big.Int)
}
// Estimate GasPrice
gasPrice := opts.GasPrice
if gasPrice == nil {
price, err := c.transactor.SuggestGasPrice(ensureContext(opts.Context)) price, err := c.transactor.SuggestGasPrice(ensureContext(opts.Context))
if err != nil { if err != nil {
return nil, err return nil, err
} }
opts.GasPrice = price gasPrice = price
}
} }
// Estimate GasLimit
gasLimit := opts.GasLimit gasLimit := opts.GasLimit
if gasLimit == 0 { if opts.GasLimit == 0 {
// Gas estimation cannot succeed without code for method invocations var err error
if contract != nil { gasLimit, err = c.estimateGasLimit(opts, contract, input, gasPrice, nil, nil, value)
if code, err := c.transactor.PendingCodeAt(ensureContext(opts.Context), c.address); err != nil {
return nil, err
} else if len(code) == 0 {
return nil, ErrNoCode
}
}
// If the contract surely has code (or code is not needed), estimate the transaction
msg := ethereum.CallMsg{From: opts.From, To: contract, GasPrice: opts.GasPrice, GasTipCap: opts.GasTipCap, GasFeeCap: opts.GasFeeCap, Value: value, Data: input}
gasLimit, err = c.transactor.EstimateGas(ensureContext(opts.Context), msg)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to estimate gas needed: %v", err) return nil, err
} }
} }
// Create the transaction, sign it and schedule it for execution // create the transaction
var rawTx *types.Transaction nonce, err := c.getNonce(opts)
if opts.GasFeeCap == nil { if err != nil {
return nil, err
}
baseTx := &types.LegacyTx{ baseTx := &types.LegacyTx{
To: contract,
Nonce: nonce, Nonce: nonce,
GasPrice: opts.GasPrice, GasPrice: gasPrice,
Gas: gasLimit, Gas: gasLimit,
Value: value, Value: value,
Data: input, Data: input,
} }
if contract != nil { return types.NewTx(baseTx), nil
baseTx.To = &c.address
} }
rawTx = types.NewTx(baseTx)
func (c *BoundContract) estimateGasLimit(opts *TransactOpts, contract *common.Address, input []byte, gasPrice, gasTipCap, gasFeeCap, value *big.Int) (uint64, error) {
if contract != nil {
// Gas estimation cannot succeed without code for method invocations.
if code, err := c.transactor.PendingCodeAt(ensureContext(opts.Context), c.address); err != nil {
return 0, err
} else if len(code) == 0 {
return 0, ErrNoCode
}
}
msg := ethereum.CallMsg{
From: opts.From,
To: contract,
GasPrice: gasPrice,
GasTipCap: gasTipCap,
GasFeeCap: gasFeeCap,
Value: value,
Data: input,
}
return c.transactor.EstimateGas(ensureContext(opts.Context), msg)
}
func (c *BoundContract) getNonce(opts *TransactOpts) (uint64, error) {
if opts.Nonce == nil {
return c.transactor.PendingNonceAt(ensureContext(opts.Context), opts.From)
} else { } else {
baseTx := &types.DynamicFeeTx{ return opts.Nonce.Uint64(), nil
Nonce: nonce,
GasFeeCap: opts.GasFeeCap,
GasTipCap: opts.GasTipCap,
Gas: gasLimit,
Value: value,
Data: input,
} }
if contract != nil {
baseTx.To = &c.address
} }
rawTx = types.NewTx(baseTx)
// transact executes an actual transaction invocation, first deriving any missing
// authorization fields, and then scheduling the transaction for execution.
func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, input []byte) (*types.Transaction, error) {
if opts.GasPrice != nil && (opts.GasFeeCap != nil || opts.GasTipCap != nil) {
return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
} }
// Create the transaction
var (
rawTx *types.Transaction
err error
)
if opts.GasPrice != nil {
rawTx, err = c.createLegacyTx(opts, contract, input)
} else {
// Only query for basefee if gasPrice not specified
if head, errHead := c.transactor.HeaderByNumber(ensureContext(opts.Context), nil); errHead != nil {
return nil, errHead
} else if head.BaseFee != nil {
rawTx, err = c.createDynamicTx(opts, contract, input, head)
} else {
// Chain is not London ready -> use legacy transaction
rawTx, err = c.createLegacyTx(opts, contract, input)
}
}
if err != nil {
return nil, err
}
// Sign the transaction and schedule it for execution
if opts.Signer == nil { if opts.Signer == nil {
return nil, errors.New("no signer to authorize the transaction with") return nil, errors.New("no signer to authorize the transaction with")
} }
@ -406,6 +486,9 @@ func (c *BoundContract) WatchLogs(opts *WatchOpts, name string, query ...[]inter
// UnpackLog unpacks a retrieved log into the provided output structure. // UnpackLog unpacks a retrieved log into the provided output structure.
func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log) error { func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log) error {
if log.Topics[0] != c.abi.Events[event].ID {
return fmt.Errorf("event signature mismatch")
}
if len(log.Data) > 0 { if len(log.Data) > 0 {
if err := c.abi.UnpackIntoInterface(out, event, log.Data); err != nil { if err := c.abi.UnpackIntoInterface(out, event, log.Data); err != nil {
return err return err
@ -422,6 +505,9 @@ func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log)
// UnpackLogIntoMap unpacks a retrieved log into the provided map. // UnpackLogIntoMap unpacks a retrieved log into the provided map.
func (c *BoundContract) UnpackLogIntoMap(out map[string]interface{}, event string, log types.Log) error { func (c *BoundContract) UnpackLogIntoMap(out map[string]interface{}, event string, log types.Log) error {
if log.Topics[0] != c.abi.Events[event].ID {
return fmt.Errorf("event signature mismatch")
}
if len(log.Data) > 0 { if len(log.Data) > 0 {
if err := c.abi.UnpackIntoMap(out, event, log.Data); err != nil { if err := c.abi.UnpackIntoMap(out, event, log.Data); err != nil {
return err return err

View File

@ -43,6 +43,43 @@ const (
LangObjC LangObjC
) )
func isKeyWord(arg string) bool {
switch arg {
case "break":
case "case":
case "chan":
case "const":
case "continue":
case "default":
case "defer":
case "else":
case "fallthrough":
case "for":
case "func":
case "go":
case "goto":
case "if":
case "import":
case "interface":
case "iota":
case "map":
case "make":
case "new":
case "package":
case "range":
case "return":
case "select":
case "struct":
case "switch":
case "type":
case "var":
default:
return false
}
return true
}
// Bind generates a Go wrapper around a contract ABI. This wrapper isn't meant // Bind generates a Go wrapper around a contract ABI. This wrapper isn't meant
// to be used as is in client code, but rather as an intermediate struct which // to be used as is in client code, but rather as an intermediate struct which
// enforces compile time type safety and naming convention opposed to having to // enforces compile time type safety and naming convention opposed to having to
@ -88,10 +125,18 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
transactIdentifiers = make(map[string]bool) transactIdentifiers = make(map[string]bool)
eventIdentifiers = make(map[string]bool) eventIdentifiers = make(map[string]bool)
) )
for _, input := range evmABI.Constructor.Inputs {
if hasStruct(input.Type) {
bindStructType[lang](input.Type, structs)
}
}
for _, original := range evmABI.Methods { for _, original := range evmABI.Methods {
// Normalize the method for capital cases and non-anonymous inputs/outputs // Normalize the method for capital cases and non-anonymous inputs/outputs
normalized := original normalized := original
normalizedName := methodNormalizer[lang](alias(aliases, original.Name)) normalizedName := methodNormalizer[lang](alias(aliases, original.Name))
// Ensure there is no duplicated identifier // Ensure there is no duplicated identifier
var identifiers = callIdentifiers var identifiers = callIdentifiers
if !original.IsConstant() { if !original.IsConstant() {
@ -101,11 +146,12 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
return "", fmt.Errorf("duplicated identifier \"%s\"(normalized \"%s\"), use --alias for renaming", original.Name, normalizedName) return "", fmt.Errorf("duplicated identifier \"%s\"(normalized \"%s\"), use --alias for renaming", original.Name, normalizedName)
} }
identifiers[normalizedName] = true identifiers[normalizedName] = true
normalized.Name = normalizedName normalized.Name = normalizedName
normalized.Inputs = make([]abi.Argument, len(original.Inputs)) normalized.Inputs = make([]abi.Argument, len(original.Inputs))
copy(normalized.Inputs, original.Inputs) copy(normalized.Inputs, original.Inputs)
for j, input := range normalized.Inputs { for j, input := range normalized.Inputs {
if input.Name == "" { if input.Name == "" || isKeyWord(input.Name) {
normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j) normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j)
} }
if hasStruct(input.Type) { if hasStruct(input.Type) {
@ -145,12 +191,22 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
eventIdentifiers[normalizedName] = true eventIdentifiers[normalizedName] = true
normalized.Name = normalizedName normalized.Name = normalizedName
used := make(map[string]bool)
normalized.Inputs = make([]abi.Argument, len(original.Inputs)) normalized.Inputs = make([]abi.Argument, len(original.Inputs))
copy(normalized.Inputs, original.Inputs) copy(normalized.Inputs, original.Inputs)
for j, input := range normalized.Inputs { for j, input := range normalized.Inputs {
if input.Name == "" { if input.Name == "" || isKeyWord(input.Name) {
normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j) normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j)
} }
// Event is a bit special, we need to define event struct in binding,
// ensure there is no camel-case-style name conflict.
for index := 0; ; index++ {
if !used[capitalise(normalized.Inputs[j].Name)] {
used[capitalise(normalized.Inputs[j].Name)] = true
break
}
normalized.Inputs[j].Name = fmt.Sprintf("%s%d", normalized.Inputs[j].Name, index)
}
if hasStruct(input.Type) { if hasStruct(input.Type) {
bindStructType[lang](input.Type, structs) bindStructType[lang](input.Type, structs)
} }
@ -172,7 +228,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
contracts[types[i]] = &tmplContract{ contracts[types[i]] = &tmplContract{
Type: capitalise(types[i]), Type: capitalise(types[i]),
InputABI: strings.Replace(strippedABI, "\"", "\\\"", -1), InputABI: strings.ReplaceAll(strippedABI, "\"", "\\\""),
InputBin: strings.TrimPrefix(strings.TrimSpace(bytecodes[i]), "0x"), InputBin: strings.TrimPrefix(strings.TrimSpace(bytecodes[i]), "0x"),
Constructor: evmABI.Constructor, Constructor: evmABI.Constructor,
Calls: calls, Calls: calls,
@ -425,15 +481,22 @@ func bindStructTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
if s, exist := structs[id]; exist { if s, exist := structs[id]; exist {
return s.Name return s.Name
} }
var fields []*tmplField var (
names = make(map[string]bool)
fields []*tmplField
)
for i, elem := range kind.TupleElems { for i, elem := range kind.TupleElems {
field := bindStructTypeGo(*elem, structs) name := capitalise(kind.TupleRawNames[i])
fields = append(fields, &tmplField{Type: field, Name: capitalise(kind.TupleRawNames[i]), SolKind: *elem}) name = abi.ResolveNameConflict(name, func(s string) bool { return names[s] })
names[name] = true
fields = append(fields, &tmplField{Type: bindStructTypeGo(*elem, structs), Name: name, SolKind: *elem})
} }
name := kind.TupleRawName name := kind.TupleRawName
if name == "" { if name == "" {
name = fmt.Sprintf("Struct%d", len(structs)) name = fmt.Sprintf("Struct%d", len(structs))
} }
name = capitalise(name)
structs[id] = &tmplStruct{ structs[id] = &tmplStruct{
Name: name, Name: name,
Fields: fields, Fields: fields,

View File

@ -90,6 +90,7 @@ package {{.Package}}
import ( import (
"math/big" "math/big"
"strings" "strings"
"errors"
ethereum "github.com/ethereum/go-ethereum" ethereum "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi"
@ -101,6 +102,7 @@ import (
// Reference imports to suppress errors if they are not otherwise used. // Reference imports to suppress errors if they are not otherwise used.
var ( var (
_ = errors.New
_ = big.NewInt _ = big.NewInt
_ = strings.NewReader _ = strings.NewReader
_ = ethereum.NotFound _ = ethereum.NotFound
@ -108,6 +110,7 @@ var (
_ = common.Big1 _ = common.Big1
_ = types.BloomLookup _ = types.BloomLookup
_ = event.NewSubscription _ = event.NewSubscription
_ = abi.ConvertType
) )
{{$structs := .Structs}} {{$structs := .Structs}}
@ -120,32 +123,48 @@ var (
{{end}} {{end}}
{{range $contract := .Contracts}} {{range $contract := .Contracts}}
// {{.Type}}ABI is the input ABI used to generate the binding from. // {{.Type}}MetaData contains all meta data concerning the {{.Type}} contract.
const {{.Type}}ABI = "{{.InputABI}}" var {{.Type}}MetaData = &bind.MetaData{
ABI: "{{.InputABI}}",
{{if $contract.FuncSigs}} {{if $contract.FuncSigs -}}
// {{.Type}}FuncSigs maps the 4-byte function signature to its string representation. Sigs: map[string]string{
var {{.Type}}FuncSigs = map[string]string{
{{range $strsig, $binsig := .FuncSigs}}"{{$binsig}}": "{{$strsig}}", {{range $strsig, $binsig := .FuncSigs}}"{{$binsig}}": "{{$strsig}}",
{{end}} {{end}}
},
{{end -}}
{{if .InputBin -}}
Bin: "0x{{.InputBin}}",
{{end}}
} }
// {{.Type}}ABI is the input ABI used to generate the binding from.
// Deprecated: Use {{.Type}}MetaData.ABI instead.
var {{.Type}}ABI = {{.Type}}MetaData.ABI
{{if $contract.FuncSigs}}
// Deprecated: Use {{.Type}}MetaData.Sigs instead.
// {{.Type}}FuncSigs maps the 4-byte function signature to its string representation.
var {{.Type}}FuncSigs = {{.Type}}MetaData.Sigs
{{end}} {{end}}
{{if .InputBin}} {{if .InputBin}}
// {{.Type}}Bin is the compiled bytecode used for deploying new contracts. // {{.Type}}Bin is the compiled bytecode used for deploying new contracts.
var {{.Type}}Bin = "0x{{.InputBin}}" // Deprecated: Use {{.Type}}MetaData.Bin instead.
var {{.Type}}Bin = {{.Type}}MetaData.Bin
// Deploy{{.Type}} deploys a new Ethereum contract, binding an instance of {{.Type}} to it. // Deploy{{.Type}} deploys a new Ethereum contract, binding an instance of {{.Type}} to it.
func Deploy{{.Type}}(auth *bind.TransactOpts, backend bind.ContractBackend {{range .Constructor.Inputs}}, {{.Name}} {{bindtype .Type $structs}}{{end}}) (common.Address, *types.Transaction, *{{.Type}}, error) { func Deploy{{.Type}}(auth *bind.TransactOpts, backend bind.ContractBackend {{range .Constructor.Inputs}}, {{.Name}} {{bindtype .Type $structs}}{{end}}) (common.Address, *types.Transaction, *{{.Type}}, error) {
parsed, err := abi.JSON(strings.NewReader({{.Type}}ABI)) parsed, err := {{.Type}}MetaData.GetAbi()
if err != nil { if err != nil {
return common.Address{}, nil, nil, err return common.Address{}, nil, nil, err
} }
if parsed == nil {
return common.Address{}, nil, nil, errors.New("GetABI returned nil")
}
{{range $pattern, $name := .Libraries}} {{range $pattern, $name := .Libraries}}
{{decapitalise $name}}Addr, _, _, _ := Deploy{{capitalise $name}}(auth, backend) {{decapitalise $name}}Addr, _, _, _ := Deploy{{capitalise $name}}(auth, backend)
{{$contract.Type}}Bin = strings.Replace({{$contract.Type}}Bin, "__${{$pattern}}$__", {{decapitalise $name}}Addr.String()[2:], -1) {{$contract.Type}}Bin = strings.ReplaceAll({{$contract.Type}}Bin, "__${{$pattern}}$__", {{decapitalise $name}}Addr.String()[2:])
{{end}} {{end}}
address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex({{.Type}}Bin), backend {{range .Constructor.Inputs}}, {{.Name}}{{end}}) address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex({{.Type}}Bin), backend {{range .Constructor.Inputs}}, {{.Name}}{{end}})
if err != nil { if err != nil {
return common.Address{}, nil, nil, err return common.Address{}, nil, nil, err
} }
@ -250,11 +269,11 @@ var (
// bind{{.Type}} binds a generic wrapper to an already deployed contract. // bind{{.Type}} binds a generic wrapper to an already deployed contract.
func bind{{.Type}}(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { func bind{{.Type}}(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {
parsed, err := abi.JSON(strings.NewReader({{.Type}}ABI)) parsed, err := {{.Type}}MetaData.GetAbi()
if err != nil { if err != nil {
return nil, err return nil, err
} }
return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil
} }
// Call invokes the (constant) contract method with params as input values and // Call invokes the (constant) contract method with params as input values and

View File

@ -21,6 +21,7 @@ import (
"errors" "errors"
"time" "time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
@ -35,14 +36,16 @@ func WaitMined(ctx context.Context, b DeployBackend, tx *types.Transaction) (*ty
logger := log.New("hash", tx.Hash()) logger := log.New("hash", tx.Hash())
for { for {
receipt, err := b.TransactionReceipt(ctx, tx.Hash()) receipt, err := b.TransactionReceipt(ctx, tx.Hash())
if receipt != nil { if err == nil {
return receipt, nil return receipt, nil
} }
if err != nil {
logger.Trace("Receipt retrieval failed", "err", err) if errors.Is(err, ethereum.NotFound) {
} else {
logger.Trace("Transaction not yet mined") logger.Trace("Transaction not yet mined")
} else {
logger.Trace("Receipt retrieval failed", "err", err)
} }
// Wait for the next round. // Wait for the next round.
select { select {
case <-ctx.Done(): case <-ctx.Done():

View File

@ -17,66 +17,77 @@
package abi package abi
import ( import (
"bytes"
"errors" "errors"
"fmt" "fmt"
"reflect" "strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
) )
var ( type Error struct {
errBadBool = errors.New("abi: improperly encoded boolean value") Name string
) Inputs Arguments
str string
// formatSliceString formats the reflection kind with the given slice size // Sig contains the string signature according to the ABI spec.
// and returns a formatted string representation. // e.g. error foo(uint32 a, int b) = "foo(uint32,int256)"
func formatSliceString(kind reflect.Kind, sliceSize int) string { // Please note that "int" is substitute for its canonical representation "int256"
if sliceSize == -1 { Sig string
return fmt.Sprintf("[]%v", kind)
} // ID returns the canonical representation of the error's signature used by the
return fmt.Sprintf("[%d]%v", sliceSize, kind) // abi definition to identify event names and types.
ID common.Hash
} }
// sliceTypeCheck checks that the given slice can by assigned to the reflection func NewError(name string, inputs Arguments) Error {
// type in t. // sanitize inputs to remove inputs without names
func sliceTypeCheck(t Type, val reflect.Value) error { // and precompute string and sig representation.
if val.Kind() != reflect.Slice && val.Kind() != reflect.Array { names := make([]string, len(inputs))
return typeErr(formatSliceString(t.GetType().Kind(), t.Size), val.Type()) types := make([]string, len(inputs))
for i, input := range inputs {
if input.Name == "" {
inputs[i] = Argument{
Name: fmt.Sprintf("arg%d", i),
Indexed: input.Indexed,
Type: input.Type,
} }
if t.T == ArrayTy && val.Len() != t.Size {
return typeErr(formatSliceString(t.Elem.GetType().Kind(), t.Size), formatSliceString(val.Type().Elem().Kind(), val.Len()))
}
if t.Elem.T == SliceTy || t.Elem.T == ArrayTy {
if val.Len() > 0 {
return sliceTypeCheck(*t.Elem, val.Index(0))
}
}
if val.Type().Elem().Kind() != t.Elem.GetType().Kind() {
return typeErr(formatSliceString(t.Elem.GetType().Kind(), t.Size), val.Type())
}
return nil
}
// typeCheck checks that the given reflection value can be assigned to the reflection
// type in t.
func typeCheck(t Type, value reflect.Value) error {
if t.T == SliceTy || t.T == ArrayTy {
return sliceTypeCheck(t, value)
}
// Check base type validity. Element types will be checked later on.
if t.GetType().Kind() != value.Kind() {
return typeErr(t.GetType().Kind(), value.Kind())
} else if t.T == FixedBytesTy && t.Size != value.Len() {
return typeErr(t.GetType(), value.Type())
} else { } else {
return nil inputs[i] = input
}
// string representation
names[i] = fmt.Sprintf("%v %v", input.Type, inputs[i].Name)
if input.Indexed {
names[i] = fmt.Sprintf("%v indexed %v", input.Type, inputs[i].Name)
}
// sig representation
types[i] = input.Type.String()
} }
str := fmt.Sprintf("error %v(%v)", name, strings.Join(names, ", "))
sig := fmt.Sprintf("%v(%v)", name, strings.Join(types, ","))
id := common.BytesToHash(crypto.Keccak256([]byte(sig)))
return Error{
Name: name,
Inputs: inputs,
str: str,
Sig: sig,
ID: id,
}
} }
// typeErr returns a formatted type casting error. func (e *Error) String() string {
func typeErr(expected, got interface{}) error { return e.str
return fmt.Errorf("abi: cannot use %v as type %v as argument", got, expected) }
func (e *Error) Unpack(data []byte) (interface{}, error) {
if len(data) < 4 {
return "", errors.New("invalid data for unpacking")
}
if !bytes.Equal(data[:4], e.ID[:4]) {
return "", errors.New("invalid data for unpacking")
}
return e.Inputs.Unpack(data[4:])
} }

View File

@ -0,0 +1,81 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import (
"errors"
"fmt"
"reflect"
)
var (
errBadBool = errors.New("abi: improperly encoded boolean value")
)
// formatSliceString formats the reflection kind with the given slice size
// and returns a formatted string representation.
func formatSliceString(kind reflect.Kind, sliceSize int) string {
if sliceSize == -1 {
return fmt.Sprintf("[]%v", kind)
}
return fmt.Sprintf("[%d]%v", sliceSize, kind)
}
// sliceTypeCheck checks that the given slice can by assigned to the reflection
// type in t.
func sliceTypeCheck(t Type, val reflect.Value) error {
if val.Kind() != reflect.Slice && val.Kind() != reflect.Array {
return typeErr(formatSliceString(t.GetType().Kind(), t.Size), val.Type())
}
if t.T == ArrayTy && val.Len() != t.Size {
return typeErr(formatSliceString(t.Elem.GetType().Kind(), t.Size), formatSliceString(val.Type().Elem().Kind(), val.Len()))
}
if t.Elem.T == SliceTy || t.Elem.T == ArrayTy {
if val.Len() > 0 {
return sliceTypeCheck(*t.Elem, val.Index(0))
}
}
if val.Type().Elem().Kind() != t.Elem.GetType().Kind() {
return typeErr(formatSliceString(t.Elem.GetType().Kind(), t.Size), val.Type())
}
return nil
}
// typeCheck checks that the given reflection value can be assigned to the reflection
// type in t.
func typeCheck(t Type, value reflect.Value) error {
if t.T == SliceTy || t.T == ArrayTy {
return sliceTypeCheck(t, value)
}
// Check base type validity. Element types will be checked later on.
if t.GetType().Kind() != value.Kind() {
return typeErr(t.GetType().Kind(), value.Kind())
} else if t.T == FixedBytesTy && t.Size != value.Len() {
return typeErr(t.GetType(), value.Type())
} else {
return nil
}
}
// typeErr returns a formatted type casting error.
func typeErr(expected, got interface{}) error {
return fmt.Errorf("abi: cannot use %v as type %v as argument", got, expected)
}

View File

@ -29,24 +29,27 @@ import (
// don't get the signature canonical representation as the first LOG topic. // don't get the signature canonical representation as the first LOG topic.
type Event struct { type Event struct {
// Name is the event name used for internal representation. It's derived from // Name is the event name used for internal representation. It's derived from
// the raw name and a suffix will be added in the case of a event overload. // the raw name and a suffix will be added in the case of event overloading.
// //
// e.g. // e.g.
// These are two events that have the same name: // These are two events that have the same name:
// * foo(int,int) // * foo(int,int)
// * foo(uint,uint) // * foo(uint,uint)
// The event name of the first one wll be resolved as foo while the second one // The event name of the first one will be resolved as foo while the second one
// will be resolved as foo0. // will be resolved as foo0.
Name string Name string
// RawName is the raw event name parsed from ABI. // RawName is the raw event name parsed from ABI.
RawName string RawName string
Anonymous bool Anonymous bool
Inputs Arguments Inputs Arguments
str string str string
// Sig contains the string signature according to the ABI spec. // Sig contains the string signature according to the ABI spec.
// e.g. event foo(uint32 a, int b) = "foo(uint32,int256)" // e.g. event foo(uint32 a, int b) = "foo(uint32,int256)"
// Please note that "int" is substitute for its canonical representation "int256" // Please note that "int" is substitute for its canonical representation "int256"
Sig string Sig string
// ID returns the canonical representation of the event's signature used by the // ID returns the canonical representation of the event's signature used by the
// abi definition to identify event names and types. // abi definition to identify event names and types.
ID common.Hash ID common.Hash

View File

@ -25,15 +25,18 @@ import (
) )
// ConvertType converts an interface of a runtime type into a interface of the // ConvertType converts an interface of a runtime type into a interface of the
// given type // given type, e.g. turn this code:
// e.g. turn //
// var fields []reflect.StructField // var fields []reflect.StructField
//
// fields = append(fields, reflect.StructField{ // fields = append(fields, reflect.StructField{
// Name: "X", // Name: "X",
// Type: reflect.TypeOf(new(big.Int)), // Type: reflect.TypeOf(new(big.Int)),
// Tag: reflect.StructTag("json:\"" + "x" + "\""), // Tag: reflect.StructTag("json:\"" + "x" + "\""),
// } // }
// into //
// into:
//
// type TupleT struct { X *big.Int } // type TupleT struct { X *big.Int }
func ConvertType(in interface{}, proto interface{}) interface{} { func ConvertType(in interface{}, proto interface{}) interface{} {
protoType := reflect.TypeOf(proto) protoType := reflect.TypeOf(proto)
@ -99,7 +102,7 @@ func mustArrayToByteSlice(value reflect.Value) reflect.Value {
func set(dst, src reflect.Value) error { func set(dst, src reflect.Value) error {
dstType, srcType := dst.Type(), src.Type() dstType, srcType := dst.Type(), src.Type()
switch { switch {
case dstType.Kind() == reflect.Interface && dst.Elem().IsValid(): case dstType.Kind() == reflect.Interface && dst.Elem().IsValid() && (dst.Elem().Type().Kind() == reflect.Ptr || dst.Elem().CanSet()):
return set(dst.Elem(), src) return set(dst.Elem(), src)
case dstType.Kind() == reflect.Ptr && dstType.Elem() != reflect.TypeOf(big.Int{}): case dstType.Kind() == reflect.Ptr && dstType.Elem() != reflect.TypeOf(big.Int{}):
return set(dst.Elem(), src) return set(dst.Elem(), src)
@ -123,16 +126,9 @@ func set(dst, src reflect.Value) error {
func setSlice(dst, src reflect.Value) error { func setSlice(dst, src reflect.Value) error {
slice := reflect.MakeSlice(dst.Type(), src.Len(), src.Len()) slice := reflect.MakeSlice(dst.Type(), src.Len(), src.Len())
for i := 0; i < src.Len(); i++ { for i := 0; i < src.Len(); i++ {
if src.Index(i).Kind() == reflect.Struct {
if err := set(slice.Index(i), src.Index(i)); err != nil { if err := set(slice.Index(i), src.Index(i)); err != nil {
return err return err
} }
} else {
// e.g. [][32]uint8 to []common.Hash
if err := set(slice.Index(i), src.Index(i)); err != nil {
return err
}
}
} }
if dst.CanSet() { if dst.CanSet() {
dst.Set(slice) dst.Set(slice)
@ -177,11 +173,13 @@ func setStruct(dst, src reflect.Value) error {
} }
// mapArgNamesToStructFields maps a slice of argument names to struct fields. // mapArgNamesToStructFields maps a slice of argument names to struct fields.
// first round: for each Exportable field that contains a `abi:""` tag //
// and this field name exists in the given argument name list, pair them together. // first round: for each Exportable field that contains a `abi:""` tag and this field name
// second round: for each argument name that has not been already linked, // exists in the given argument name list, pair them together.
// find what variable is expected to be mapped into, if it exists and has not been //
// used, pair them. // second round: for each argument name that has not been already linked, find what
// variable is expected to be mapped into, if it exists and has not been used, pair them.
//
// Note this function assumes the given value is a struct value. // Note this function assumes the given value is a struct value.
func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[string]string, error) { func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[string]string, error) {
typ := value.Type() typ := value.Type()
@ -227,7 +225,6 @@ func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[stri
// second round ~~~ // second round ~~~
for _, argName := range argNames { for _, argName := range argNames {
structFieldName := ToCamelCase(argName) structFieldName := ToCamelCase(argName)
if structFieldName == "" { if structFieldName == "" {

View File

@ -0,0 +1,176 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import (
"fmt"
)
type SelectorMarshaling struct {
Name string `json:"name"`
Type string `json:"type"`
Inputs []ArgumentMarshaling `json:"inputs"`
}
func isDigit(c byte) bool {
return c >= '0' && c <= '9'
}
func isAlpha(c byte) bool {
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}
func isIdentifierSymbol(c byte) bool {
return c == '$' || c == '_'
}
func parseToken(unescapedSelector string, isIdent bool) (string, string, error) {
if len(unescapedSelector) == 0 {
return "", "", fmt.Errorf("empty token")
}
firstChar := unescapedSelector[0]
position := 1
if !(isAlpha(firstChar) || (isIdent && isIdentifierSymbol(firstChar))) {
return "", "", fmt.Errorf("invalid token start: %c", firstChar)
}
for position < len(unescapedSelector) {
char := unescapedSelector[position]
if !(isAlpha(char) || isDigit(char) || (isIdent && isIdentifierSymbol(char))) {
break
}
position++
}
return unescapedSelector[:position], unescapedSelector[position:], nil
}
func parseIdentifier(unescapedSelector string) (string, string, error) {
return parseToken(unescapedSelector, true)
}
func parseElementaryType(unescapedSelector string) (string, string, error) {
parsedType, rest, err := parseToken(unescapedSelector, false)
if err != nil {
return "", "", fmt.Errorf("failed to parse elementary type: %v", err)
}
// handle arrays
for len(rest) > 0 && rest[0] == '[' {
parsedType = parsedType + string(rest[0])
rest = rest[1:]
for len(rest) > 0 && isDigit(rest[0]) {
parsedType = parsedType + string(rest[0])
rest = rest[1:]
}
if len(rest) == 0 || rest[0] != ']' {
return "", "", fmt.Errorf("failed to parse array: expected ']', got %c", unescapedSelector[0])
}
parsedType = parsedType + string(rest[0])
rest = rest[1:]
}
return parsedType, rest, nil
}
func parseCompositeType(unescapedSelector string) ([]interface{}, string, error) {
if len(unescapedSelector) == 0 || unescapedSelector[0] != '(' {
return nil, "", fmt.Errorf("expected '(', got %c", unescapedSelector[0])
}
parsedType, rest, err := parseType(unescapedSelector[1:])
if err != nil {
return nil, "", fmt.Errorf("failed to parse type: %v", err)
}
result := []interface{}{parsedType}
for len(rest) > 0 && rest[0] != ')' {
parsedType, rest, err = parseType(rest[1:])
if err != nil {
return nil, "", fmt.Errorf("failed to parse type: %v", err)
}
result = append(result, parsedType)
}
if len(rest) == 0 || rest[0] != ')' {
return nil, "", fmt.Errorf("expected ')', got '%s'", rest)
}
if len(rest) >= 3 && rest[1] == '[' && rest[2] == ']' {
return append(result, "[]"), rest[3:], nil
}
return result, rest[1:], nil
}
func parseType(unescapedSelector string) (interface{}, string, error) {
if len(unescapedSelector) == 0 {
return nil, "", fmt.Errorf("empty type")
}
if unescapedSelector[0] == '(' {
return parseCompositeType(unescapedSelector)
} else {
return parseElementaryType(unescapedSelector)
}
}
func assembleArgs(args []interface{}) ([]ArgumentMarshaling, error) {
arguments := make([]ArgumentMarshaling, 0)
for i, arg := range args {
// generate dummy name to avoid unmarshal issues
name := fmt.Sprintf("name%d", i)
if s, ok := arg.(string); ok {
arguments = append(arguments, ArgumentMarshaling{name, s, s, nil, false})
} else if components, ok := arg.([]interface{}); ok {
subArgs, err := assembleArgs(components)
if err != nil {
return nil, fmt.Errorf("failed to assemble components: %v", err)
}
tupleType := "tuple"
if len(subArgs) != 0 && subArgs[len(subArgs)-1].Type == "[]" {
subArgs = subArgs[:len(subArgs)-1]
tupleType = "tuple[]"
}
arguments = append(arguments, ArgumentMarshaling{name, tupleType, tupleType, subArgs, false})
} else {
return nil, fmt.Errorf("failed to assemble args: unexpected type %T", arg)
}
}
return arguments, nil
}
// ParseSelector converts a method selector into a struct that can be JSON encoded
// and consumed by other functions in this package.
// Note, although uppercase letters are not part of the ABI spec, this function
// still accepts it as the general format is valid.
func ParseSelector(unescapedSelector string) (SelectorMarshaling, error) {
name, rest, err := parseIdentifier(unescapedSelector)
if err != nil {
return SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': %v", unescapedSelector, err)
}
args := []interface{}{}
if len(rest) >= 2 && rest[0] == '(' && rest[1] == ')' {
rest = rest[2:]
} else {
args, rest, err = parseCompositeType(rest)
if err != nil {
return SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': %v", unescapedSelector, err)
}
}
if len(rest) > 0 {
return SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': unexpected string '%s'", unescapedSelector, rest)
}
// Reassemble the fake ABI and construct the JSON
fakeArgs, err := assembleArgs(args)
if err != nil {
return SelectorMarshaling{}, fmt.Errorf("failed to parse selector: %v", err)
}
return SelectorMarshaling{name, "function", fakeArgs}, nil
}

View File

@ -23,6 +23,8 @@ import (
"regexp" "regexp"
"strconv" "strconv"
"strings" "strings"
"unicode"
"unicode/utf8"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
) )
@ -161,19 +163,26 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
elems []*Type elems []*Type
names []string names []string
expression string // canonical parameter expression expression string // canonical parameter expression
used = make(map[string]bool)
) )
expression += "(" expression += "("
overloadedNames := make(map[string]string)
for idx, c := range components { for idx, c := range components {
cType, err := NewType(c.Type, c.InternalType, c.Components) cType, err := NewType(c.Type, c.InternalType, c.Components)
if err != nil { if err != nil {
return Type{}, err return Type{}, err
} }
fieldName, err := overloadedArgName(c.Name, overloadedNames) name := ToCamelCase(c.Name)
if name == "" {
return Type{}, errors.New("abi: purely anonymous or underscored field is not supported")
}
fieldName := ResolveNameConflict(name, func(s string) bool { return used[s] })
if err != nil { if err != nil {
return Type{}, err return Type{}, err
} }
overloadedNames[fieldName] = fieldName used[fieldName] = true
if !isValidFieldName(fieldName) {
return Type{}, fmt.Errorf("field %d has invalid name", idx)
}
fields = append(fields, reflect.StructField{ fields = append(fields, reflect.StructField{
Name: fieldName, // reflect.StructOf will panic for any exported field. Name: fieldName, // reflect.StructOf will panic for any exported field.
Type: cType.GetType(), Type: cType.GetType(),
@ -201,7 +210,7 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
if internalType != "" && strings.HasPrefix(internalType, structPrefix) { if internalType != "" && strings.HasPrefix(internalType, structPrefix) {
// Foo.Bar type definition is not allowed in golang, // Foo.Bar type definition is not allowed in golang,
// convert the format to FooBar // convert the format to FooBar
typ.TupleRawName = strings.Replace(internalType[len(structPrefix):], ".", "", -1) typ.TupleRawName = strings.ReplaceAll(internalType[len(structPrefix):], ".", "")
} }
case "function": case "function":
@ -250,20 +259,6 @@ func (t Type) GetType() reflect.Type {
} }
} }
func overloadedArgName(rawName string, names map[string]string) (string, error) {
fieldName := ToCamelCase(rawName)
if fieldName == "" {
return "", errors.New("abi: purely anonymous or underscored field is not supported")
}
// Handle overloaded fieldNames
_, ok := names[fieldName]
for idx := 0; ok; idx++ {
fieldName = fmt.Sprintf("%s%d", ToCamelCase(rawName), idx)
_, ok = names[fieldName]
}
return fieldName, nil
}
// String implements Stringer. // String implements Stringer.
func (t Type) String() (out string) { func (t Type) String() (out string) {
return t.stringKind return t.stringKind
@ -399,3 +394,30 @@ func getTypeSize(t Type) int {
} }
return 32 return 32
} }
// isLetter reports whether a given 'rune' is classified as a Letter.
// This method is copied from reflect/type.go
func isLetter(ch rune) bool {
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
}
// isValidFieldName checks if a string is a valid (struct) field name or not.
//
// According to the language spec, a field name should be an identifier.
//
// identifier = letter { letter | unicode_digit } .
// letter = unicode_letter | "_" .
// This method is copied from reflect/type.go
func isValidFieldName(fieldName string) bool {
for i, c := range fieldName {
if i == 0 && !isLetter(c) {
return false
}
if !(isLetter(c) || unicode.IsDigit(c)) {
return false
}
}
return len(fieldName) > 0
}

View File

@ -115,7 +115,6 @@ func ReadFixedBytes(t Type, word []byte) (interface{}, error) {
reflect.Copy(array, reflect.ValueOf(word[0:t.Size])) reflect.Copy(array, reflect.ValueOf(word[0:t.Size]))
return array.Interface(), nil return array.Interface(), nil
} }
// forEachUnpack iteratively unpack elements. // forEachUnpack iteratively unpack elements.
@ -163,6 +162,9 @@ func forTupleUnpack(t Type, output []byte) (interface{}, error) {
virtualArgs := 0 virtualArgs := 0
for index, elem := range t.TupleElems { for index, elem := range t.TupleElems {
marshalledValue, err := toGoType((index+virtualArgs)*32, *elem, output) marshalledValue, err := toGoType((index+virtualArgs)*32, *elem, output)
if err != nil {
return nil, err
}
if elem.T == ArrayTy && !isDynamicType(*elem) { if elem.T == ArrayTy && !isDynamicType(*elem) {
// If we have a static array, like [3]uint256, these are coded as // If we have a static array, like [3]uint256, these are coded as
// just like uint256,uint256,uint256. // just like uint256,uint256,uint256.
@ -180,9 +182,6 @@ func forTupleUnpack(t Type, output []byte) (interface{}, error) {
// coded as just like uint256,bool,uint256 // coded as just like uint256,bool,uint256
virtualArgs += getTypeSize(*elem)/32 - 1 virtualArgs += getTypeSize(*elem)/32 - 1
} }
if err != nil {
return nil, err
}
retval.Field(index).Set(reflect.ValueOf(marshalledValue)) retval.Field(index).Set(reflect.ValueOf(marshalledValue))
} }
return retval.Interface(), nil return retval.Interface(), nil
@ -255,7 +254,7 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
// lengthPrefixPointsTo interprets a 32 byte slice as an offset and then determines which indices to look to decode the type. // lengthPrefixPointsTo interprets a 32 byte slice as an offset and then determines which indices to look to decode the type.
func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err error) { func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err error) {
bigOffsetEnd := big.NewInt(0).SetBytes(output[index : index+32]) bigOffsetEnd := new(big.Int).SetBytes(output[index : index+32])
bigOffsetEnd.Add(bigOffsetEnd, common.Big32) bigOffsetEnd.Add(bigOffsetEnd, common.Big32)
outputLength := big.NewInt(int64(len(output))) outputLength := big.NewInt(int64(len(output)))
@ -268,11 +267,9 @@ func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err
} }
offsetEnd := int(bigOffsetEnd.Uint64()) offsetEnd := int(bigOffsetEnd.Uint64())
lengthBig := big.NewInt(0).SetBytes(output[offsetEnd-32 : offsetEnd]) lengthBig := new(big.Int).SetBytes(output[offsetEnd-32 : offsetEnd])
totalSize := big.NewInt(0) totalSize := new(big.Int).Add(bigOffsetEnd, lengthBig)
totalSize.Add(totalSize, bigOffsetEnd)
totalSize.Add(totalSize, lengthBig)
if totalSize.BitLen() > 63 { if totalSize.BitLen() > 63 {
return 0, 0, fmt.Errorf("abi: length larger than int64: %v", totalSize) return 0, 0, fmt.Errorf("abi: length larger than int64: %v", totalSize)
} }
@ -287,10 +284,10 @@ func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err
// tuplePointsTo resolves the location reference for dynamic tuple. // tuplePointsTo resolves the location reference for dynamic tuple.
func tuplePointsTo(index int, output []byte) (start int, err error) { func tuplePointsTo(index int, output []byte) (start int, err error) {
offset := big.NewInt(0).SetBytes(output[index : index+32]) offset := new(big.Int).SetBytes(output[index : index+32])
outputLen := big.NewInt(int64(len(output))) outputLen := big.NewInt(int64(len(output)))
if offset.Cmp(big.NewInt(int64(len(output)))) > 0 { if offset.Cmp(outputLen) > 0 {
return 0, fmt.Errorf("abi: cannot marshal in to go slice: offset %v would go over slice boundary (len=%v)", offset, outputLen) return 0, fmt.Errorf("abi: cannot marshal in to go slice: offset %v would go over slice boundary (len=%v)", offset, outputLen)
} }
if offset.BitLen() > 63 { if offset.BitLen() > 63 {

View File

@ -0,0 +1,40 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import "fmt"
// ResolveNameConflict returns the next available name for a given thing.
// This helper can be used for lots of purposes:
//
// - In solidity function overloading is supported, this function can fix
// the name conflicts of overloaded functions.
// - In golang binding generation, the parameter(in function, event, error,
// and struct definition) name will be converted to camelcase style which
// may eventually lead to name conflicts.
//
// Name conflicts are mostly resolved by adding number suffix. e.g. if the abi contains
// Methods "send" and "send1", ResolveNameConflict would return "send2" for input "send".
func ResolveNameConflict(rawName string, used func(string) bool) string {
name := rawName
ok := used(name)
for idx := 0; ok; idx++ {
name = fmt.Sprintf("%s%d", rawName, idx)
ok = used(name)
}
return name
}

View File

@ -46,7 +46,7 @@ const (
// accounts (derived from the same seed). // accounts (derived from the same seed).
type Wallet interface { type Wallet interface {
// URL retrieves the canonical path under which this wallet is reachable. It is // URL retrieves the canonical path under which this wallet is reachable. It is
// user by upper layers to define a sorting order over all wallets from multiple // used by upper layers to define a sorting order over all wallets from multiple
// backends. // backends.
URL() URL URL() URL
@ -89,7 +89,7 @@ type Wallet interface {
// accounts. // accounts.
// //
// Note, self derivation will increment the last component of the specified path // Note, self derivation will increment the last component of the specified path
// opposed to decending into a child path to allow discovering accounts starting // opposed to descending into a child path to allow discovering accounts starting
// from non zero components. // from non zero components.
// //
// Some hardware wallets switched derivation paths through their evolution, so // Some hardware wallets switched derivation paths through their evolution, so
@ -105,7 +105,7 @@ type Wallet interface {
// or optionally with the aid of any location metadata from the embedded URL field. // or optionally with the aid of any location metadata from the embedded URL field.
// //
// If the wallet requires additional authentication to sign the request (e.g. // If the wallet requires additional authentication to sign the request (e.g.
// a password to decrypt the account, or a PIN code o verify the transaction), // a password to decrypt the account, or a PIN code to verify the transaction),
// an AuthNeededError instance will be returned, containing infos for the user // an AuthNeededError instance will be returned, containing infos for the user
// about which fields or actions are needed. The user may retry by providing // about which fields or actions are needed. The user may retry by providing
// the needed details via SignDataWithPassphrase, or by other means (e.g. unlock // the needed details via SignDataWithPassphrase, or by other means (e.g. unlock
@ -124,13 +124,13 @@ type Wallet interface {
// or optionally with the aid of any location metadata from the embedded URL field. // or optionally with the aid of any location metadata from the embedded URL field.
// //
// If the wallet requires additional authentication to sign the request (e.g. // If the wallet requires additional authentication to sign the request (e.g.
// a password to decrypt the account, or a PIN code o verify the transaction), // a password to decrypt the account, or a PIN code to verify the transaction),
// an AuthNeededError instance will be returned, containing infos for the user // an AuthNeededError instance will be returned, containing infos for the user
// about which fields or actions are needed. The user may retry by providing // about which fields or actions are needed. The user may retry by providing
// the needed details via SignTextWithPassphrase, or by other means (e.g. unlock // the needed details via SignTextWithPassphrase, or by other means (e.g. unlock
// the account in a keystore). // the account in a keystore).
// //
// This method should return the signature in 'canonical' format, with v 0 or 1 // This method should return the signature in 'canonical' format, with v 0 or 1.
SignText(account Account, text []byte) ([]byte, error) SignText(account Account, text []byte) ([]byte, error)
// SignTextWithPassphrase is identical to Signtext, but also takes a password // SignTextWithPassphrase is identical to Signtext, but also takes a password
@ -176,7 +176,8 @@ type Backend interface {
// TextHash is a helper function that calculates a hash for the given message that can be // TextHash is a helper function that calculates a hash for the given message that can be
// safely used to calculate a signature from. // safely used to calculate a signature from.
// //
// The hash is calulcated as // The hash is calculated as
//
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}). // keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
// //
// This gives context to the signed message and prevents signing of transactions. // This gives context to the signed message and prevents signing of transactions.
@ -188,7 +189,8 @@ func TextHash(data []byte) []byte {
// TextAndHash is a helper function that calculates a hash for the given message that can be // TextAndHash is a helper function that calculates a hash for the given message that can be
// safely used to calculate a signature from. // safely used to calculate a signature from.
// //
// The hash is calulcated as // The hash is calculated as
//
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}). // keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
// //
// This gives context to the signed message and prevents signing of transactions. // This gives context to the signed message and prevents signing of transactions.

View File

@ -41,8 +41,7 @@ var ErrInvalidPassphrase = errors.New("invalid password")
// second time. // second time.
var ErrWalletAlreadyOpen = errors.New("wallet already open") var ErrWalletAlreadyOpen = errors.New("wallet already open")
// ErrWalletClosed is returned if a wallet is attempted to be opened the // ErrWalletClosed is returned if a wallet is offline.
// secodn time.
var ErrWalletClosed = errors.New("wallet closed") var ErrWalletClosed = errors.New("wallet closed")
// AuthNeededError is returned by backends for signing requests where the user // AuthNeededError is returned by backends for signing requests where the user

View File

@ -29,7 +29,7 @@ import (
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/signer/core" "github.com/ethereum/go-ethereum/signer/core/apitypes"
) )
type ExternalBackend struct { type ExternalBackend struct {
@ -152,10 +152,6 @@ func (api *ExternalSigner) SelfDerive(bases []accounts.DerivationPath, chain eth
log.Error("operation SelfDerive not supported on external signers") log.Error("operation SelfDerive not supported on external signers")
} }
func (api *ExternalSigner) signHash(account accounts.Account, hash []byte) ([]byte, error) {
return []byte{}, fmt.Errorf("operation not supported on external signers")
}
// SignData signs keccak256(data). The mimetype parameter describes the type of data being signed // SignData signs keccak256(data). The mimetype parameter describes the type of data being signed
func (api *ExternalSigner) SignData(account accounts.Account, mimeType string, data []byte) ([]byte, error) { func (api *ExternalSigner) SignData(account accounts.Account, mimeType string, data []byte) ([]byte, error) {
var res hexutil.Bytes var res hexutil.Bytes
@ -196,6 +192,10 @@ type signTransactionResult struct {
Tx *types.Transaction `json:"tx"` Tx *types.Transaction `json:"tx"`
} }
// SignTx sends the transaction to the external signer.
// If chainID is nil, or tx.ChainID is zero, the chain ID will be assigned
// by the external signer. For non-legacy transactions, the chain ID of the
// transaction overrides the chainID parameter.
func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) { func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
data := hexutil.Bytes(tx.Data()) data := hexutil.Bytes(tx.Data())
var to *common.MixedcaseAddress var to *common.MixedcaseAddress
@ -203,7 +203,7 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio
t := common.NewMixedcaseAddress(*tx.To()) t := common.NewMixedcaseAddress(*tx.To())
to = &t to = &t
} }
args := &core.SendTxArgs{ args := &apitypes.SendTxArgs{
Data: &data, Data: &data,
Nonce: hexutil.Uint64(tx.Nonce()), Nonce: hexutil.Uint64(tx.Nonce()),
Value: hexutil.Big(*tx.Value()), Value: hexutil.Big(*tx.Value()),
@ -211,21 +211,24 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio
To: to, To: to,
From: common.NewMixedcaseAddress(account.Address), From: common.NewMixedcaseAddress(account.Address),
} }
if tx.GasFeeCap() != nil { switch tx.Type() {
case types.LegacyTxType, types.AccessListTxType:
args.GasPrice = (*hexutil.Big)(tx.GasPrice())
case types.DynamicFeeTxType:
args.MaxFeePerGas = (*hexutil.Big)(tx.GasFeeCap()) args.MaxFeePerGas = (*hexutil.Big)(tx.GasFeeCap())
args.MaxPriorityFeePerGas = (*hexutil.Big)(tx.GasTipCap()) args.MaxPriorityFeePerGas = (*hexutil.Big)(tx.GasTipCap())
} else { default:
args.GasPrice = (*hexutil.Big)(tx.GasPrice()) return nil, fmt.Errorf("unsupported tx type %d", tx.Type())
} }
// We should request the default chain id that we're operating with // We should request the default chain id that we're operating with
// (the chain we're executing on) // (the chain we're executing on)
if chainID != nil { if chainID != nil && chainID.Sign() != 0 {
args.ChainID = (*hexutil.Big)(chainID) args.ChainID = (*hexutil.Big)(chainID)
} }
if tx.Type() != types.LegacyTxType { if tx.Type() != types.LegacyTxType {
// However, if the user asked for a particular chain id, then we should // However, if the user asked for a particular chain id, then we should
// use that instead. // use that instead.
if tx.ChainId() != nil { if tx.ChainId().Sign() != 0 {
args.ChainID = (*hexutil.Big)(tx.ChainId()) args.ChainID = (*hexutil.Big)(tx.ChainId())
} }
accessList := tx.AccessList() accessList := tx.AccessList()

View File

@ -41,7 +41,7 @@ var DefaultBaseDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60,
var LegacyLedgerBaseDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0} var LegacyLedgerBaseDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}
// DerivationPath represents the computer friendly version of a hierarchical // DerivationPath represents the computer friendly version of a hierarchical
// deterministic wallet account derivaion path. // deterministic wallet account derivation path.
// //
// The BIP-32 spec https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki // The BIP-32 spec https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki
// defines derivation paths to be of the form: // defines derivation paths to be of the form:

View File

@ -146,6 +146,14 @@ func (ac *accountCache) deleteByFile(path string) {
} }
} }
// watcherStarted returns true if the watcher loop started running (even if it
// has since also ended).
func (ac *accountCache) watcherStarted() bool {
ac.mu.Lock()
defer ac.mu.Unlock()
return ac.watcher.running || ac.watcher.runEnded
}
func removeAccount(slice []accounts.Account, elem accounts.Account) []accounts.Account { func removeAccount(slice []accounts.Account, elem accounts.Account) []accounts.Account {
for i := range slice { for i := range slice {
if slice[i] == elem { if slice[i] == elem {

View File

@ -17,7 +17,6 @@
package keystore package keystore
import ( import (
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
@ -40,8 +39,8 @@ type fileCache struct {
func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, error) { func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, error) {
t0 := time.Now() t0 := time.Now()
// List all the failes from the keystore folder // List all the files from the keystore folder
files, err := ioutil.ReadDir(keyDir) files, err := os.ReadDir(keyDir)
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }
@ -62,10 +61,14 @@ func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, er
log.Trace("Ignoring file on account scan", "path", path) log.Trace("Ignoring file on account scan", "path", path)
continue continue
} }
// Gather the set of all and fresly modified files // Gather the set of all and freshly modified files
all.Add(path) all.Add(path)
modified := fi.ModTime() info, err := fi.Info()
if err != nil {
return nil, nil, nil, err
}
modified := info.ModTime()
if modified.After(fc.lastMod) { if modified.After(fc.lastMod) {
mods.Add(path) mods.Add(path)
} }
@ -89,13 +92,13 @@ func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, er
} }
// nonKeyFile ignores editor backups, hidden files and folders/symlinks. // nonKeyFile ignores editor backups, hidden files and folders/symlinks.
func nonKeyFile(fi os.FileInfo) bool { func nonKeyFile(fi os.DirEntry) bool {
// Skip editor backups and UNIX-style hidden files. // Skip editor backups and UNIX-style hidden files.
if strings.HasSuffix(fi.Name(), "~") || strings.HasPrefix(fi.Name(), ".") { if strings.HasSuffix(fi.Name(), "~") || strings.HasPrefix(fi.Name(), ".") {
return true return true
} }
// Skip misc special files, directories (yes, symlinks too). // Skip misc special files, directories (yes, symlinks too).
if fi.IsDir() || fi.Mode()&os.ModeType != 0 { if fi.IsDir() || !fi.Type().IsRegular() {
return true return true
} }
return false return false

View File

@ -23,7 +23,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
@ -244,7 +243,7 @@ func writeTemporaryKeyFile(file string, content []byte) (string, error) {
} }
// Atomic write: create a temporary hidden file first // Atomic write: create a temporary hidden file first
// then move it into place. TempFile assigns mode 0600. // then move it into place. TempFile assigns mode 0600.
f, err := ioutil.TempFile(filepath.Dir(file), "."+filepath.Base(file)+".tmp") f, err := os.CreateTemp(filepath.Dir(file), "."+filepath.Base(file)+".tmp")
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@ -572,6 +572,14 @@ func (ks *KeyStore) ImportPreSaleKey(keyJSON []byte, passphrase string) (account
return a, nil return a, nil
} }
// isUpdating returns whether the event notification loop is running.
// This method is mainly meant for tests.
func (ks *KeyStore) isUpdating() bool {
ks.mu.RLock()
defer ks.mu.RUnlock()
return ks.updating
}
// zeroKey zeroes a private key in memory. // zeroKey zeroes a private key in memory.
func zeroKey(k *ecdsa.PrivateKey) { func zeroKey(k *ecdsa.PrivateKey) {
if k == nil { if k == nil {

View File

@ -34,7 +34,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
@ -83,7 +82,7 @@ type keyStorePassphrase struct {
func (ks keyStorePassphrase) GetKey(addr common.Address, filename, auth string) (*Key, error) { func (ks keyStorePassphrase) GetKey(addr common.Address, filename, auth string) (*Key, error) {
// Load the key from the keystore and decrypt its contents // Load the key from the keystore and decrypt its contents
keyjson, err := ioutil.ReadFile(filename) keyjson, err := os.ReadFile(filename)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -140,7 +139,6 @@ func (ks keyStorePassphrase) JoinPath(filename string) string {
// Encryptdata encrypts the data given as 'data' with the password 'auth'. // Encryptdata encrypts the data given as 'data' with the password 'auth'.
func EncryptDataV3(data, auth []byte, scryptN, scryptP int) (CryptoJSON, error) { func EncryptDataV3(data, auth []byte, scryptN, scryptP int) (CryptoJSON, error) {
salt := make([]byte, 32) salt := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, salt); err != nil { if _, err := io.ReadFull(rand.Reader, salt); err != nil {
panic("reading from crypto/rand failed: " + err.Error()) panic("reading from crypto/rand failed: " + err.Error())
@ -465,7 +463,6 @@ func getKDFKey(cryptoJSON CryptoJSON, auth string) ([]byte, error) {
r := ensureInt(cryptoJSON.KDFParams["r"]) r := ensureInt(cryptoJSON.KDFParams["r"])
p := ensureInt(cryptoJSON.KDFParams["p"]) p := ensureInt(cryptoJSON.KDFParams["p"])
return scrypt.Key(authArray, salt, n, r, p, dkLen) return scrypt.Key(authArray, salt, n, r, p, dkLen)
} else if cryptoJSON.KDF == "pbkdf2" { } else if cryptoJSON.KDF == "pbkdf2" {
c := ensureInt(cryptoJSON.KDFParams["c"]) c := ensureInt(cryptoJSON.KDFParams["c"])
prf := cryptoJSON.KDFParams["prf"].(string) prf := cryptoJSON.KDFParams["prf"].(string)

View File

@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build (darwin && !ios && cgo) || freebsd || (linux && !arm64) || netbsd || solaris
// +build darwin,!ios,cgo freebsd linux,!arm64 netbsd solaris // +build darwin,!ios,cgo freebsd linux,!arm64 netbsd solaris
package keystore package keystore
@ -27,8 +28,9 @@ import (
type watcher struct { type watcher struct {
ac *accountCache ac *accountCache
starting bool running bool // set to true when runloop begins
running bool runEnded bool // set to true when runloop ends
starting bool // set to true prior to runloop starting
ev chan notify.EventInfo ev chan notify.EventInfo
quit chan struct{} quit chan struct{}
} }
@ -41,6 +43,9 @@ func newWatcher(ac *accountCache) *watcher {
} }
} }
// enabled returns false on systems not supported.
func (*watcher) enabled() bool { return true }
// starts the watcher loop in the background. // starts the watcher loop in the background.
// Start a watcher in the background if that's not already in progress. // Start a watcher in the background if that's not already in progress.
// The caller must hold w.ac.mu. // The caller must hold w.ac.mu.
@ -61,6 +66,7 @@ func (w *watcher) loop() {
w.ac.mu.Lock() w.ac.mu.Lock()
w.running = false w.running = false
w.starting = false w.starting = false
w.runEnded = true
w.ac.mu.Unlock() w.ac.mu.Unlock()
}() }()
logger := log.New("path", w.ac.keydir) logger := log.New("path", w.ac.keydir)

View File

@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build (darwin && !cgo) || ios || (linux && arm64) || windows || (!darwin && !freebsd && !linux && !netbsd && !solaris)
// +build darwin,!cgo ios linux,arm64 windows !darwin,!freebsd,!linux,!netbsd,!solaris // +build darwin,!cgo ios linux,arm64 windows !darwin,!freebsd,!linux,!netbsd,!solaris
// This is the fallback implementation of directory watching. // This is the fallback implementation of directory watching.
@ -21,8 +22,14 @@
package keystore package keystore
type watcher struct{ running bool } type watcher struct {
running bool
runEnded bool
}
func newWatcher(*accountCache) *watcher { return new(watcher) } func newWatcher(*accountCache) *watcher { return new(watcher) }
func (*watcher) start() {} func (*watcher) start() {}
func (*watcher) close() {} func (*watcher) close() {}
// enabled returns false on systems not supported.
func (*watcher) enabled() bool { return false }

View File

@ -25,6 +25,10 @@ import (
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
) )
// managerSubBufferSize determines how many incoming wallet events
// the manager will buffer in its channel.
const managerSubBufferSize = 50
// Config contains the settings of the global account manager. // Config contains the settings of the global account manager.
// //
// TODO(rjl493456442, karalabe, holiman): Get rid of this when account management // TODO(rjl493456442, karalabe, holiman): Get rid of this when account management
@ -33,6 +37,13 @@ type Config struct {
InsecureUnlockAllowed bool // Whether account unlocking in insecure environment is allowed InsecureUnlockAllowed bool // Whether account unlocking in insecure environment is allowed
} }
// newBackendEvent lets the manager know it should
// track the given backend for wallet updates.
type newBackendEvent struct {
backend Backend
processed chan struct{} // Informs event emitter that backend has been integrated
}
// Manager is an overarching account manager that can communicate with various // Manager is an overarching account manager that can communicate with various
// backends for signing transactions. // backends for signing transactions.
type Manager struct { type Manager struct {
@ -40,11 +51,13 @@ type Manager struct {
backends map[reflect.Type][]Backend // Index of backends currently registered backends map[reflect.Type][]Backend // Index of backends currently registered
updaters []event.Subscription // Wallet update subscriptions for all backends updaters []event.Subscription // Wallet update subscriptions for all backends
updates chan WalletEvent // Subscription sink for backend wallet changes updates chan WalletEvent // Subscription sink for backend wallet changes
newBackends chan newBackendEvent // Incoming backends to be tracked by the manager
wallets []Wallet // Cache of all wallets from all registered backends wallets []Wallet // Cache of all wallets from all registered backends
feed event.Feed // Wallet feed notifying of arrivals/departures feed event.Feed // Wallet feed notifying of arrivals/departures
quit chan chan error quit chan chan error
term chan struct{} // Channel is closed upon termination of the update loop
lock sync.RWMutex lock sync.RWMutex
} }
@ -57,7 +70,7 @@ func NewManager(config *Config, backends ...Backend) *Manager {
wallets = merge(wallets, backend.Wallets()...) wallets = merge(wallets, backend.Wallets()...)
} }
// Subscribe to wallet notifications from all backends // Subscribe to wallet notifications from all backends
updates := make(chan WalletEvent, 4*len(backends)) updates := make(chan WalletEvent, managerSubBufferSize)
subs := make([]event.Subscription, len(backends)) subs := make([]event.Subscription, len(backends))
for i, backend := range backends { for i, backend := range backends {
@ -69,8 +82,10 @@ func NewManager(config *Config, backends ...Backend) *Manager {
backends: make(map[reflect.Type][]Backend), backends: make(map[reflect.Type][]Backend),
updaters: subs, updaters: subs,
updates: updates, updates: updates,
newBackends: make(chan newBackendEvent),
wallets: wallets, wallets: wallets,
quit: make(chan chan error), quit: make(chan chan error),
term: make(chan struct{}),
} }
for _, backend := range backends { for _, backend := range backends {
kind := reflect.TypeOf(backend) kind := reflect.TypeOf(backend)
@ -93,6 +108,14 @@ func (am *Manager) Config() *Config {
return am.config return am.config
} }
// AddBackend starts the tracking of an additional backend for wallet updates.
// cmd/geth assumes once this func returns the backends have been already integrated.
func (am *Manager) AddBackend(backend Backend) {
done := make(chan struct{})
am.newBackends <- newBackendEvent{backend, done}
<-done
}
// update is the wallet event loop listening for notifications from the backends // update is the wallet event loop listening for notifications from the backends
// and updating the cache of wallets. // and updating the cache of wallets.
func (am *Manager) update() { func (am *Manager) update() {
@ -122,10 +145,22 @@ func (am *Manager) update() {
// Notify any listeners of the event // Notify any listeners of the event
am.feed.Send(event) am.feed.Send(event)
case event := <-am.newBackends:
am.lock.Lock()
// Update caches
backend := event.backend
am.wallets = merge(am.wallets, backend.Wallets()...)
am.updaters = append(am.updaters, backend.Subscribe(am.updates))
kind := reflect.TypeOf(backend)
am.backends[kind] = append(am.backends[kind], backend)
am.lock.Unlock()
close(event.processed)
case errc := <-am.quit: case errc := <-am.quit:
// Manager terminating, return // Manager terminating, return
errc <- nil errc <- nil
// Signals event emitters the loop is not receiving values
// to prevent them from getting stuck.
close(am.term)
return return
} }
} }
@ -133,6 +168,9 @@ func (am *Manager) update() {
// Backends retrieves the backend(s) with the given type from the account manager. // Backends retrieves the backend(s) with the given type from the account manager.
func (am *Manager) Backends(kind reflect.Type) []Backend { func (am *Manager) Backends(kind reflect.Type) []Backend {
am.lock.RLock()
defer am.lock.RUnlock()
return am.backends[kind] return am.backends[kind]
} }
@ -219,7 +257,7 @@ func merge(slice []Wallet, wallets ...Wallet) []Wallet {
return slice return slice
} }
// drop is the couterpart of merge, which looks up wallets from within the sorted // drop is the counterpart of merge, which looks up wallets from within the sorted
// cache and removes the ones specified. // cache and removes the ones specified.
func drop(slice []Wallet, wallets ...Wallet) []Wallet { func drop(slice []Wallet, wallets ...Wallet) []Wallet {
for _, wallet := range wallets { for _, wallet := range wallets {

View File

@ -34,7 +34,7 @@ package scwallet
import ( import (
"encoding/json" "encoding/json"
"io/ioutil" "io"
"os" "os"
"path/filepath" "path/filepath"
"sort" "sort"
@ -96,7 +96,7 @@ func (hub *Hub) readPairings() error {
return err return err
} }
pairingData, err := ioutil.ReadAll(pairingFile) pairingData, err := io.ReadAll(pairingFile)
if err != nil { if err != nil {
return err return err
} }

View File

@ -178,7 +178,7 @@ func (s *SecureChannelSession) mutuallyAuthenticate() error {
return err return err
} }
if response.Sw1 != 0x90 || response.Sw2 != 0x00 { if response.Sw1 != 0x90 || response.Sw2 != 0x00 {
return fmt.Errorf("got unexpected response from MUTUALLY_AUTHENTICATE: 0x%x%x", response.Sw1, response.Sw2) return fmt.Errorf("got unexpected response from MUTUALLY_AUTHENTICATE: %#x%x", response.Sw1, response.Sw2)
} }
if len(response.Data) != scSecretLength { if len(response.Data) != scSecretLength {
@ -261,7 +261,7 @@ func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []b
rapdu.deserialize(plainData) rapdu.deserialize(plainData)
if rapdu.Sw1 != sw1Ok { if rapdu.Sw1 != sw1Ok {
return nil, fmt.Errorf("unexpected response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", cla, ins, rapdu.Sw1, rapdu.Sw2) return nil, fmt.Errorf("unexpected response status Cla=%#x, Ins=%#x, Sw=%#x%x", cla, ins, rapdu.Sw1, rapdu.Sw2)
} }
return rapdu, nil return rapdu, nil

View File

@ -167,7 +167,7 @@ func transmit(card *pcsc.Card, command *commandAPDU) (*responseAPDU, error) {
} }
if response.Sw1 != sw1Ok { if response.Sw1 != sw1Ok {
return nil, fmt.Errorf("unexpected insecure response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", command.Cla, command.Ins, response.Sw1, response.Sw2) return nil, fmt.Errorf("unexpected insecure response status Cla=%#x, Ins=%#x, Sw=%#x%x", command.Cla, command.Ins, response.Sw1, response.Sw2)
} }
return response, nil return response, nil
@ -638,7 +638,7 @@ func (w *Wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Accoun
// accounts. // accounts.
// //
// Note, self derivation will increment the last component of the specified path // Note, self derivation will increment the last component of the specified path
// opposed to decending into a child path to allow discovering accounts starting // opposed to descending into a child path to allow discovering accounts starting
// from non zero components. // from non zero components.
// //
// Some hardware wallets switched derivation paths through their evolution, so // Some hardware wallets switched derivation paths through their evolution, so
@ -879,6 +879,7 @@ func (s *Session) walletStatus() (*walletStatus, error) {
} }
// derivationPath fetches the wallet's current derivation path from the card. // derivationPath fetches the wallet's current derivation path from the card.
//
//lint:ignore U1000 needs to be added to the console interface //lint:ignore U1000 needs to be added to the console interface
func (s *Session) derivationPath() (accounts.DerivationPath, error) { func (s *Session) derivationPath() (accounts.DerivationPath, error) {
response, err := s.Channel.transmitEncrypted(claSCWallet, insStatus, statusP1Path, 0, nil) response, err := s.Channel.transmitEncrypted(claSCWallet, insStatus, statusP1Path, 0, nil)
@ -994,6 +995,7 @@ func (s *Session) derive(path accounts.DerivationPath) (accounts.Account, error)
} }
// keyExport contains information on an exported keypair. // keyExport contains information on an exported keypair.
//
//lint:ignore U1000 needs to be added to the console interface //lint:ignore U1000 needs to be added to the console interface
type keyExport struct { type keyExport struct {
PublicKey []byte `asn1:"tag:0"` PublicKey []byte `asn1:"tag:0"`
@ -1001,6 +1003,7 @@ type keyExport struct {
} }
// publicKey returns the public key for the current derivation path. // publicKey returns the public key for the current derivation path.
//
//lint:ignore U1000 needs to be added to the console interface //lint:ignore U1000 needs to be added to the console interface
func (s *Session) publicKey() ([]byte, error) { func (s *Session) publicKey() ([]byte, error) {
response, err := s.Channel.transmitEncrypted(claSCWallet, insExportKey, exportP1Any, exportP2Pubkey, nil) response, err := s.Channel.transmitEncrypted(claSCWallet, insExportKey, exportP1Any, exportP2Pubkey, nil)

View File

@ -95,7 +95,6 @@ func (u *URL) UnmarshalJSON(input []byte) error {
// -1 if x < y // -1 if x < y
// 0 if x == y // 0 if x == y
// +1 if x > y // +1 if x > y
//
func (u URL) Cmp(url URL) int { func (u URL) Cmp(url URL) int {
if u.Scheme == url.Scheme { if u.Scheme == url.Scheme {
return strings.Compare(u.Path, url.Path) return strings.Compare(u.Path, url.Path)

View File

@ -71,18 +71,28 @@ type Hub struct {
// NewLedgerHub creates a new hardware wallet manager for Ledger devices. // NewLedgerHub creates a new hardware wallet manager for Ledger devices.
func NewLedgerHub() (*Hub, error) { func NewLedgerHub() (*Hub, error) {
return newHub(LedgerScheme, 0x2c97, []uint16{ return newHub(LedgerScheme, 0x2c97, []uint16{
// Device definitions taken from
// https://github.com/LedgerHQ/ledger-live/blob/38012bc8899e0f07149ea9cfe7e64b2c146bc92b/libs/ledgerjs/packages/devices/src/index.ts
// Original product IDs // Original product IDs
0x0000, /* Ledger Blue */ 0x0000, /* Ledger Blue */
0x0001, /* Ledger Nano S */ 0x0001, /* Ledger Nano S */
0x0004, /* Ledger Nano X */ 0x0004, /* Ledger Nano X */
0x0005, /* Ledger Nano S Plus */
0x0006, /* Ledger Nano FTS */
// Upcoming product IDs: https://www.ledger.com/2019/05/17/windows-10-update-sunsetting-u2f-tunnel-transport-for-ledger-devices/
0x0015, /* HID + U2F + WebUSB Ledger Blue */ 0x0015, /* HID + U2F + WebUSB Ledger Blue */
0x1015, /* HID + U2F + WebUSB Ledger Nano S */ 0x1015, /* HID + U2F + WebUSB Ledger Nano S */
0x4015, /* HID + U2F + WebUSB Ledger Nano X */ 0x4015, /* HID + U2F + WebUSB Ledger Nano X */
0x5015, /* HID + U2F + WebUSB Ledger Nano S Plus */
0x6015, /* HID + U2F + WebUSB Ledger Nano FTS */
0x0011, /* HID + WebUSB Ledger Blue */ 0x0011, /* HID + WebUSB Ledger Blue */
0x1011, /* HID + WebUSB Ledger Nano S */ 0x1011, /* HID + WebUSB Ledger Nano S */
0x4011, /* HID + WebUSB Ledger Nano X */ 0x4011, /* HID + WebUSB Ledger Nano X */
0x5011, /* HID + WebUSB Ledger Nano S Plus */
0x6011, /* HID + WebUSB Ledger Nano FTS */
}, 0xffa0, 0, newLedgerDriver) }, 0xffa0, 0, newLedgerDriver)
} }

View File

@ -407,8 +407,6 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
// domain hash | 32 bytes // domain hash | 32 bytes
// message hash | 32 bytes // message hash | 32 bytes
// //
//
//
// And the output data is: // And the output data is:
// //
// Description | Length // Description | Length

View File

@ -84,14 +84,14 @@ func (w *trezorDriver) Status() (string, error) {
// Open implements usbwallet.driver, attempting to initialize the connection to // Open implements usbwallet.driver, attempting to initialize the connection to
// the Trezor hardware wallet. Initializing the Trezor is a two or three phase operation: // the Trezor hardware wallet. Initializing the Trezor is a two or three phase operation:
// * The first phase is to initialize the connection and read the wallet's // - The first phase is to initialize the connection and read the wallet's
// features. This phase is invoked if the provided passphrase is empty. The // features. This phase is invoked if the provided passphrase is empty. The
// device will display the pinpad as a result and will return an appropriate // device will display the pinpad as a result and will return an appropriate
// error to notify the user that a second open phase is needed. // error to notify the user that a second open phase is needed.
// * The second phase is to unlock access to the Trezor, which is done by the // - The second phase is to unlock access to the Trezor, which is done by the
// user actually providing a passphrase mapping a keyboard keypad to the pin // user actually providing a passphrase mapping a keyboard keypad to the pin
// number of the user (shuffled according to the pinpad displayed). // number of the user (shuffled according to the pinpad displayed).
// * If needed the device will ask for passphrase which will require calling // - If needed the device will ask for passphrase which will require calling
// open again with the actual passphrase (3rd phase) // open again with the actual passphrase (3rd phase)
func (w *trezorDriver) Open(device io.ReadWriter, passphrase string) error { func (w *trezorDriver) Open(device io.ReadWriter, passphrase string) error {
w.device, w.failure = device, nil w.device, w.failure = device, nil
@ -196,10 +196,10 @@ func (w *trezorDriver) trezorDerive(derivationPath []uint32) (common.Address, er
if _, err := w.trezorExchange(&trezor.EthereumGetAddress{AddressN: derivationPath}, address); err != nil { if _, err := w.trezorExchange(&trezor.EthereumGetAddress{AddressN: derivationPath}, address); err != nil {
return common.Address{}, err return common.Address{}, err
} }
if addr := address.GetAddressBin(); len(addr) > 0 { // Older firmwares use binary fomats if addr := address.GetAddressBin(); len(addr) > 0 { // Older firmwares use binary formats
return common.BytesToAddress(addr), nil return common.BytesToAddress(addr), nil
} }
if addr := address.GetAddressHex(); len(addr) > 0 { // Newer firmwares use hexadecimal fomats if addr := address.GetAddressHex(); len(addr) > 0 { // Newer firmwares use hexadecimal formats
return common.HexToAddress(addr), nil return common.HexToAddress(addr), nil
} }
return common.Address{}, errors.New("missing derived address") return common.Address{}, errors.New("missing derived address")

View File

@ -380,7 +380,7 @@ func (w *wallet) selfDerive() {
// of legacy-ledger, the first account on the legacy-path will // of legacy-ledger, the first account on the legacy-path will
// be shown to the user, even if we don't actively track it // be shown to the user, even if we don't actively track it
if i < len(nextAddrs)-1 { if i < len(nextAddrs)-1 {
w.log.Info("Skipping trakcking first account on legacy path, use personal.deriveAccount(<url>,<path>, false) to track", w.log.Info("Skipping tracking first account on legacy path, use personal.deriveAccount(<url>,<path>, false) to track",
"path", path, "address", nextAddrs[i]) "path", path, "address", nextAddrs[i])
break break
} }
@ -496,7 +496,7 @@ func (w *wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Accoun
// accounts. // accounts.
// //
// Note, self derivation will increment the last component of the specified path // Note, self derivation will increment the last component of the specified path
// opposed to decending into a child path to allow discovering accounts starting // opposed to descending into a child path to allow discovering accounts starting
// from non zero components. // from non zero components.
// //
// Some hardware wallets switched derivation paths through their evolution, so // Some hardware wallets switched derivation paths through their evolution, so
@ -526,7 +526,6 @@ func (w *wallet) signHash(account accounts.Account, hash []byte) ([]byte, error)
// SignData signs keccak256(data). The mimetype parameter describes the type of data being signed // SignData signs keccak256(data). The mimetype parameter describes the type of data being signed
func (w *wallet) SignData(account accounts.Account, mimeType string, data []byte) ([]byte, error) { func (w *wallet) SignData(account accounts.Account, mimeType string, data []byte) ([]byte, error) {
// Unless we are doing 712 signing, simply dispatch to signHash // Unless we are doing 712 signing, simply dispatch to signHash
if !(mimeType == accounts.MimetypeTypedData && len(data) == 66 && data[0] == 0x19 && data[1] == 0x01) { if !(mimeType == accounts.MimetypeTypedData && len(data) == 66 && data[0] == 0x19 && data[1] == 0x01) {
return w.signHash(account, crypto.Keccak256(data)) return w.signHash(account, crypto.Keccak256(data))

View File

@ -1,29 +1,57 @@
os: Visual Studio 2019
clone_depth: 5 clone_depth: 5
version: "{branch}.{build}" version: "{branch}.{build}"
image:
- Ubuntu
- Visual Studio 2019
environment: environment:
matrix: matrix:
- GETH_ARCH: amd64
GETH_MINGW: 'C:\msys64\mingw64'
- GETH_ARCH: 386
GETH_MINGW: 'C:\msys64\mingw32'
install:
- git submodule update --init --depth 1 --recursive
- go version
for:
# Linux has its own script without -arch and -cc.
# The linux builder also runs lint.
- matrix:
only:
- image: Ubuntu
build_script:
- go run build/ci.go lint
- go run build/ci.go install -dlgo
test_script:
- go run build/ci.go test -dlgo -coverage
# linux/386 is disabled.
- matrix:
exclude:
- image: Ubuntu
GETH_ARCH: 386
# Windows builds for amd64 + 386.
- matrix:
only:
- image: Visual Studio 2019
environment:
# We use gcc from MSYS2 because it is the most recent compiler version available on # We use gcc from MSYS2 because it is the most recent compiler version available on
# AppVeyor. Note: gcc.exe only works properly if the corresponding bin/ directory is # AppVeyor. Note: gcc.exe only works properly if the corresponding bin/ directory is
# contained in PATH. # contained in PATH.
- GETH_ARCH: amd64 GETH_CC: '%GETH_MINGW%\bin\gcc.exe'
GETH_CC: C:\msys64\mingw64\bin\gcc.exe PATH: '%GETH_MINGW%\bin;C:\Program Files (x86)\NSIS\;%PATH%'
PATH: C:\msys64\mingw64\bin;C:\Program Files (x86)\NSIS\;%PATH%
- GETH_ARCH: 386
GETH_CC: C:\msys64\mingw32\bin\gcc.exe
PATH: C:\msys64\mingw32\bin;C:\Program Files (x86)\NSIS\;%PATH%
install:
- git submodule update --init --depth 1
- go version
- "%GETH_CC% --version"
build_script: build_script:
- go run build\ci.go install -dlgo -arch %GETH_ARCH% -cc %GETH_CC% - 'echo %GETH_ARCH%'
- 'echo %GETH_CC%'
- '%GETH_CC% --version'
- go run build/ci.go install -dlgo -arch %GETH_ARCH% -cc %GETH_CC%
after_build: after_build:
- go run build\ci.go archive -arch %GETH_ARCH% -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds # Upload builds. Note that ci.go makes this a no-op PR builds.
- go run build\ci.go nsis -arch %GETH_ARCH% -signer WINDOWS_SIGNING_KEY -upload gethstore/builds - go run build/ci.go archive -arch %GETH_ARCH% -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
- go run build/ci.go nsis -arch %GETH_ARCH% -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
test_script: test_script:
- go run build\ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -coverage - go run build/ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -coverage

View File

@ -19,6 +19,9 @@ package common
import ( import (
"encoding/hex" "encoding/hex"
"errors"
"github.com/ethereum/go-ethereum/common/hexutil"
) )
// FromHex returns the bytes represented by the hexadecimal string s. // FromHex returns the bytes represented by the hexadecimal string s.
@ -92,6 +95,15 @@ func Hex2BytesFixed(str string, flen int) []byte {
return hh return hh
} }
// ParseHexOrString tries to hexdecode b, but if the prefix is missing, it instead just returns the raw bytes
func ParseHexOrString(str string) ([]byte, error) {
b, err := hexutil.Decode(str)
if errors.Is(err, hexutil.ErrMissingPrefix) {
return []byte(str), nil
}
return b, err
}
// RightPadBytes zero-pads slice to the right up to length l. // RightPadBytes zero-pads slice to the right up to length l.
func RightPadBytes(slice []byte, l int) []byte { func RightPadBytes(slice []byte, l int) []byte {
if l <= len(slice) { if l <= len(slice) {

View File

@ -27,12 +27,12 @@ import (
// the unnecessary precision off from the formatted textual representation. // the unnecessary precision off from the formatted textual representation.
type PrettyDuration time.Duration type PrettyDuration time.Duration
var prettyDurationRe = regexp.MustCompile(`\.[0-9]+`) var prettyDurationRe = regexp.MustCompile(`\.[0-9]{4,}`)
// String implements the Stringer interface, allowing pretty printing of duration // String implements the Stringer interface, allowing pretty printing of duration
// values rounded to three decimals. // values rounded to three decimals.
func (d PrettyDuration) String() string { func (d PrettyDuration) String() string {
label := fmt.Sprintf("%v", time.Duration(d)) label := time.Duration(d).String()
if match := prettyDurationRe.FindString(label); len(match) > 4 { if match := prettyDurationRe.FindString(label); len(match) > 4 {
label = strings.Replace(label, match, match[:4], 1) label = strings.Replace(label, match, match[:4], 1)
} }

View File

@ -18,7 +18,7 @@
Package hexutil implements hex encoding with 0x prefix. Package hexutil implements hex encoding with 0x prefix.
This encoding is used by the Ethereum RPC API to transport binary data in JSON payloads. This encoding is used by the Ethereum RPC API to transport binary data in JSON payloads.
Encoding Rules # Encoding Rules
All hex data must have prefix "0x". All hex data must have prefix "0x".
@ -176,13 +176,14 @@ func MustDecodeBig(input string) *big.Int {
} }
// EncodeBig encodes bigint as a hex string with 0x prefix. // EncodeBig encodes bigint as a hex string with 0x prefix.
// The sign of the integer is ignored.
func EncodeBig(bigint *big.Int) string { func EncodeBig(bigint *big.Int) string {
nbits := bigint.BitLen() if sign := bigint.Sign(); sign == 0 {
if nbits == 0 {
return "0x0" return "0x0"
} else if sign > 0 {
return "0x" + bigint.Text(16)
} else {
return "-0x" + bigint.Text(16)[1:]
} }
return fmt.Sprintf("%#x", bigint)
} }
func has0xPrefix(input string) bool { func has0xPrefix(input string) bool {

View File

@ -58,7 +58,7 @@ func (s *Simulated) Run(d time.Duration) {
s.mu.Lock() s.mu.Lock()
s.init() s.init()
end := s.now + AbsTime(d) end := s.now.Add(d)
var do []func() var do []func()
for len(s.scheduled) > 0 && s.scheduled[0].at <= end { for len(s.scheduled) > 0 && s.scheduled[0].at <= end {
ev := heap.Pop(&s.scheduled).(*simTimer) ev := heap.Pop(&s.scheduled).(*simTimer)
@ -134,7 +134,7 @@ func (s *Simulated) AfterFunc(d time.Duration, fn func()) Timer {
func (s *Simulated) schedule(d time.Duration, fn func()) *simTimer { func (s *Simulated) schedule(d time.Duration, fn func()) *simTimer {
s.init() s.init()
at := s.now + AbsTime(d) at := s.now.Add(d)
ev := &simTimer{do: fn, at: at, s: s} ev := &simTimer{do: fn, at: at, s: s}
heap.Push(&s.scheduled, ev) heap.Push(&s.scheduled, ev)
s.cond.Broadcast() s.cond.Broadcast()

View File

@ -29,6 +29,7 @@ import (
// - priority evaluates the actual priority of an item // - priority evaluates the actual priority of an item
// - maxPriority gives an upper estimate for the priority in any moment between // - maxPriority gives an upper estimate for the priority in any moment between
// now and the given absolute time // now and the given absolute time
//
// If the upper estimate is exceeded then Update should be called for that item. // If the upper estimate is exceeded then Update should be called for that item.
// A global Refresh function should also be called periodically. // A global Refresh function should also be called periodically.
type LazyQueue struct { type LazyQueue struct {
@ -87,13 +88,13 @@ func (q *LazyQueue) Refresh() {
// refresh re-evaluates items in the older queue and swaps the two queues // refresh re-evaluates items in the older queue and swaps the two queues
func (q *LazyQueue) refresh(now mclock.AbsTime) { func (q *LazyQueue) refresh(now mclock.AbsTime) {
q.maxUntil = now + mclock.AbsTime(q.period) q.maxUntil = now.Add(q.period)
for q.queue[0].Len() != 0 { for q.queue[0].Len() != 0 {
q.Push(heap.Pop(q.queue[0]).(*item).value) q.Push(heap.Pop(q.queue[0]).(*item).value)
} }
q.queue[0], q.queue[1] = q.queue[1], q.queue[0] q.queue[0], q.queue[1] = q.queue[1], q.queue[0]
q.indexOffset = 1 - q.indexOffset q.indexOffset = 1 - q.indexOffset
q.maxUntil += mclock.AbsTime(q.period) q.maxUntil = q.maxUntil.Add(q.period)
} }
// Push adds an item to the queue // Push adds an item to the queue
@ -163,7 +164,7 @@ func (q *LazyQueue) PopItem() interface{} {
return i return i
} }
// Remove removes removes the item with the given index. // Remove removes the item with the given index.
func (q *LazyQueue) Remove(index int) interface{} { func (q *LazyQueue) Remove(index int) interface{} {
if index < 0 { if index < 0 {
return nil return nil

View File

@ -41,13 +41,13 @@ func (p *Prque) Push(data interface{}, priority int64) {
heap.Push(p.cont, &item{data, priority}) heap.Push(p.cont, &item{data, priority})
} }
// Peek returns the value with the greates priority but does not pop it off. // Peek returns the value with the greatest priority but does not pop it off.
func (p *Prque) Peek() (interface{}, int64) { func (p *Prque) Peek() (interface{}, int64) {
item := p.cont.blocks[0][0] item := p.cont.blocks[0][0]
return item.value, item.priority return item.value, item.priority
} }
// Pops the value with the greates priority off the stack and returns it. // Pops the value with the greatest priority off the stack and returns it.
// Currently no shrinking is done. // Currently no shrinking is done.
func (p *Prque) Pop() (interface{}, int64) { func (p *Prque) Pop() (interface{}, int64) {
item := heap.Pop(p.cont).(*item) item := heap.Pop(p.cont).(*item)

View File

@ -19,12 +19,12 @@ package common
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "os"
) )
// LoadJSON reads the given file and unmarshals its content. // LoadJSON reads the given file and unmarshals its content.
func LoadJSON(file string, val interface{}) error { func LoadJSON(file string, val interface{}) error {
content, err := ioutil.ReadFile(file) content, err := os.ReadFile(file)
if err != nil { if err != nil {
return err return err
} }

View File

@ -86,7 +86,7 @@ func (h Hash) String() string {
} }
// Format implements fmt.Formatter. // Format implements fmt.Formatter.
// Hash supports the %v, %s, %v, %x, %X and %d format verbs. // Hash supports the %v, %s, %q, %x, %X and %d format verbs.
func (h Hash) Format(s fmt.State, c rune) { func (h Hash) Format(s fmt.State, c rune) {
hexb := make([]byte, 2+len(h)*2) hexb := make([]byte, 2+len(h)*2)
copy(hexb, "0x") copy(hexb, "0x")
@ -270,7 +270,7 @@ func (a Address) hex() []byte {
} }
// Format implements fmt.Formatter. // Format implements fmt.Formatter.
// Address supports the %v, %s, %v, %x, %X and %d format verbs. // Address supports the %v, %s, %q, %x, %X and %d format verbs.
func (a Address) Format(s fmt.State, c rune) { func (a Address) Format(s fmt.State, c rune) {
switch c { switch c {
case 'v', 's': case 'v', 's':

View File

@ -17,11 +17,14 @@
package clique package clique
import ( import (
"encoding/json"
"fmt" "fmt"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
) )
@ -175,3 +178,58 @@ func (api *API) Status() (*status, error) {
NumBlocks: numBlocks, NumBlocks: numBlocks,
}, nil }, nil
} }
type blockNumberOrHashOrRLP struct {
*rpc.BlockNumberOrHash
RLP hexutil.Bytes `json:"rlp,omitempty"`
}
func (sb *blockNumberOrHashOrRLP) UnmarshalJSON(data []byte) error {
bnOrHash := new(rpc.BlockNumberOrHash)
// Try to unmarshal bNrOrHash
if err := bnOrHash.UnmarshalJSON(data); err == nil {
sb.BlockNumberOrHash = bnOrHash
return nil
}
// Try to unmarshal RLP
var input string
if err := json.Unmarshal(data, &input); err != nil {
return err
}
blob, err := hexutil.Decode(input)
if err != nil {
return err
}
sb.RLP = blob
return nil
}
// GetSigner returns the signer for a specific clique block.
// Can be called with either a blocknumber, blockhash or an rlp encoded blob.
// The RLP encoded blob can either be a block or a header.
func (api *API) GetSigner(rlpOrBlockNr *blockNumberOrHashOrRLP) (common.Address, error) {
if len(rlpOrBlockNr.RLP) == 0 {
blockNrOrHash := rlpOrBlockNr.BlockNumberOrHash
var header *types.Header
if blockNrOrHash == nil {
header = api.chain.CurrentHeader()
} else if hash, ok := blockNrOrHash.Hash(); ok {
header = api.chain.GetHeaderByHash(hash)
} else if number, ok := blockNrOrHash.Number(); ok {
header = api.chain.GetHeaderByNumber(uint64(number.Int64()))
}
if header == nil {
return common.Address{}, fmt.Errorf("missing block %v", blockNrOrHash.String())
}
return api.clique.Author(header)
}
block := new(types.Block)
if err := rlp.DecodeBytes(rlpOrBlockNr.RLP, block); err == nil {
return api.clique.Author(block.Header())
}
header := new(types.Header)
if err := rlp.DecodeBytes(rlpOrBlockNr.RLP, header); err != nil {
return common.Address{}, err
}
return api.clique.Author(header)
}

View File

@ -180,7 +180,7 @@ type Clique struct {
signer common.Address // Ethereum address of the signing key signer common.Address // Ethereum address of the signing key
signFn SignerFn // Signer function to authorize hashes with signFn SignerFn // Signer function to authorize hashes with
lock sync.RWMutex // Protects the signer fields lock sync.RWMutex // Protects the signer and proposals fields
// The fields below are for testing only // The fields below are for testing only
fakeDiff bool // Skip difficulty verifications fakeDiff bool // Skip difficulty verifications
@ -295,9 +295,8 @@ func (c *Clique) verifyHeader(chain consensus.ChainHeaderReader, header *types.H
} }
} }
// Verify that the gas limit is <= 2^63-1 // Verify that the gas limit is <= 2^63-1
cap := uint64(0x7fffffffffffffff) if header.GasLimit > params.MaxGasLimit {
if header.GasLimit > cap { return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, params.MaxGasLimit)
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, cap)
} }
// If all checks passed, validate any special fields for hard forks // If all checks passed, validate any special fields for hard forks
if err := misc.VerifyForkHashes(chain.Config(), header, false); err != nil { if err := misc.VerifyForkHashes(chain.Config(), header, false); err != nil {
@ -363,7 +362,7 @@ func (c *Clique) verifyCascadingFields(chain consensus.ChainHeaderReader, header
} }
} }
// All basic checks passed, verify the seal and return // All basic checks passed, verify the seal and return
return c.verifySeal(chain, header, parents) return c.verifySeal(snap, header, parents)
} }
// snapshot retrieves the authorization snapshot at a given point in time. // snapshot retrieves the authorization snapshot at a given point in time.
@ -460,18 +459,12 @@ func (c *Clique) VerifyUncles(chain consensus.ChainReader, block *types.Block) e
// consensus protocol requirements. The method accepts an optional list of parent // consensus protocol requirements. The method accepts an optional list of parent
// headers that aren't yet part of the local blockchain to generate the snapshots // headers that aren't yet part of the local blockchain to generate the snapshots
// from. // from.
func (c *Clique) verifySeal(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error { func (c *Clique) verifySeal(snap *Snapshot, header *types.Header, parents []*types.Header) error {
// Verifying the genesis block is not supported // Verifying the genesis block is not supported
number := header.Number.Uint64() number := header.Number.Uint64()
if number == 0 { if number == 0 {
return errUnknownBlock return errUnknownBlock
} }
// Retrieve the snapshot needed to verify this header and cache it
snap, err := c.snapshot(chain, number-1, header.ParentHash, parents)
if err != nil {
return err
}
// Resolve the authorization key and check against signers // Resolve the authorization key and check against signers
signer, err := ecrecover(header, c.signatures) signer, err := ecrecover(header, c.signatures)
if err != nil { if err != nil {
@ -514,9 +507,8 @@ func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header
if err != nil { if err != nil {
return err return err
} }
if number%c.config.Epoch != 0 {
c.lock.RLock() c.lock.RLock()
if number%c.config.Epoch != 0 {
// Gather all the proposals that make sense voting on // Gather all the proposals that make sense voting on
addresses := make([]common.Address, 0, len(c.proposals)) addresses := make([]common.Address, 0, len(c.proposals))
for address, authorize := range c.proposals { for address, authorize := range c.proposals {
@ -533,10 +525,14 @@ func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header
copy(header.Nonce[:], nonceDropVote) copy(header.Nonce[:], nonceDropVote)
} }
} }
c.lock.RUnlock()
} }
// Copy signer protected by mutex to avoid race condition
signer := c.signer
c.lock.RUnlock()
// Set the correct difficulty // Set the correct difficulty
header.Difficulty = calcDifficulty(snap, c.signer) header.Difficulty = calcDifficulty(snap, signer)
// Ensure the extra data has all its components // Ensure the extra data has all its components
if len(header.Extra) < extraVanity { if len(header.Extra) < extraVanity {
@ -606,8 +602,7 @@ func (c *Clique) Seal(chain consensus.ChainHeaderReader, block *types.Block, res
} }
// For 0-period chains, refuse to seal empty blocks (no reward but would spin sealing) // For 0-period chains, refuse to seal empty blocks (no reward but would spin sealing)
if c.config.Period == 0 && len(block.Transactions()) == 0 { if c.config.Period == 0 && len(block.Transactions()) == 0 {
log.Info("Sealing paused, waiting for transactions") return errors.New("sealing paused while waiting for transactions")
return nil
} }
// Don't hold the signer fields for the entire sealing procedure // Don't hold the signer fields for the entire sealing procedure
c.lock.RLock() c.lock.RLock()
@ -627,8 +622,7 @@ func (c *Clique) Seal(chain consensus.ChainHeaderReader, block *types.Block, res
if recent == signer { if recent == signer {
// Signer is among recents, only wait if the current block doesn't shift it out // Signer is among recents, only wait if the current block doesn't shift it out
if limit := uint64(len(snap.Signers)/2 + 1); number < limit || seen > number-limit { if limit := uint64(len(snap.Signers)/2 + 1); number < limit || seen > number-limit {
log.Info("Signed recently, must wait for others") return errors.New("signed recently, must wait for others")
return nil
} }
} }
} }
@ -675,7 +669,10 @@ func (c *Clique) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64,
if err != nil { if err != nil {
return nil return nil
} }
return calcDifficulty(snap, c.signer) c.lock.RLock()
signer := c.signer
c.lock.RUnlock()
return calcDifficulty(snap, signer)
} }
func calcDifficulty(snap *Snapshot, signer common.Address) *big.Int { func calcDifficulty(snap *Snapshot, signer common.Address) *big.Int {
@ -700,9 +697,7 @@ func (c *Clique) Close() error {
func (c *Clique) APIs(chain consensus.ChainHeaderReader) []rpc.API { func (c *Clique) APIs(chain consensus.ChainHeaderReader) []rpc.API {
return []rpc.API{{ return []rpc.API{{
Namespace: "clique", Namespace: "clique",
Version: "1.0",
Service: &API{chain: chain, clique: c}, Service: &API{chain: chain, clique: c},
Public: false,
}} }}
} }

View File

@ -23,6 +23,7 @@ import (
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
@ -87,7 +88,7 @@ func newSnapshot(config *params.CliqueConfig, sigcache *lru.ARCCache, number uin
// loadSnapshot loads an existing snapshot from the database. // loadSnapshot loads an existing snapshot from the database.
func loadSnapshot(config *params.CliqueConfig, sigcache *lru.ARCCache, db ethdb.Database, hash common.Hash) (*Snapshot, error) { func loadSnapshot(config *params.CliqueConfig, sigcache *lru.ARCCache, db ethdb.Database, hash common.Hash) (*Snapshot, error) {
blob, err := db.Get(append([]byte("clique-"), hash[:]...)) blob, err := db.Get(append(rawdb.CliqueSnapshotPrefix, hash[:]...))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -107,7 +108,7 @@ func (s *Snapshot) store(db ethdb.Database) error {
if err != nil { if err != nil {
return err return err
} }
return db.Put(append([]byte("clique-"), s.Hash[:]...), blob) return db.Put(append(rawdb.CliqueSnapshotPrefix, s.Hash[:]...), blob)
} }
// copy creates a deep copy of the snapshot, though not the individual votes. // copy creates a deep copy of the snapshot, though not the individual votes.

View File

@ -44,6 +44,9 @@ type ChainHeaderReader interface {
// GetHeaderByHash retrieves a block header from the database by its hash. // GetHeaderByHash retrieves a block header from the database by its hash.
GetHeaderByHash(hash common.Hash) *types.Header GetHeaderByHash(hash common.Hash) *types.Header
// GetTd retrieves the total difficulty from the database by hash and number.
GetTd(hash common.Hash, number uint64) *big.Int
} }
// ChainReader defines a small collection of methods needed to access the local // ChainReader defines a small collection of methods needed to access the local

View File

@ -34,4 +34,8 @@ var (
// ErrInvalidNumber is returned if a block's number doesn't equal its parent's // ErrInvalidNumber is returned if a block's number doesn't equal its parent's
// plus one. // plus one.
ErrInvalidNumber = errors.New("invalid block number") ErrInvalidNumber = errors.New("invalid block number")
// ErrInvalidTerminalBlock is returned if a block is invalid wrt. the terminal
// total difficulty.
ErrInvalidTerminalBlock = errors.New("invalid terminal block")
) )

View File

@ -34,6 +34,7 @@ type API struct {
// GetWork returns a work package for external miner. // GetWork returns a work package for external miner.
// //
// The work package consists of 3 strings: // The work package consists of 3 strings:
//
// result[0] - 32 bytes hex encoded current block header pow-hash // result[0] - 32 bytes hex encoded current block header pow-hash
// result[1] - 32 bytes hex encoded seed hash used for DAG // result[1] - 32 bytes hex encoded seed hash used for DAG
// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty // result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty

View File

@ -45,6 +45,16 @@ var (
maxUncles = 2 // Maximum number of uncles allowed in a single block maxUncles = 2 // Maximum number of uncles allowed in a single block
allowedFutureBlockTimeSeconds = int64(15) // Max seconds from current time allowed for blocks, before they're considered future blocks allowedFutureBlockTimeSeconds = int64(15) // Max seconds from current time allowed for blocks, before they're considered future blocks
// calcDifficultyEip5133 is the difficulty adjustment algorithm as specified by EIP 5133.
// It offsets the bomb a total of 11.4M blocks.
// Specification EIP-5133: https://eips.ethereum.org/EIPS/eip-5133
calcDifficultyEip5133 = makeDifficultyCalculator(big.NewInt(11_400_000))
// calcDifficultyEip4345 is the difficulty adjustment algorithm as specified by EIP 4345.
// It offsets the bomb a total of 10.7M blocks.
// Specification EIP-4345: https://eips.ethereum.org/EIPS/eip-4345
calcDifficultyEip4345 = makeDifficultyCalculator(big.NewInt(10_700_000))
// calcDifficultyEip3554 is the difficulty adjustment algorithm as specified by EIP 3554. // calcDifficultyEip3554 is the difficulty adjustment algorithm as specified by EIP 3554.
// It offsets the bomb a total of 9.7M blocks. // It offsets the bomb a total of 9.7M blocks.
// Specification EIP-3554: https://eips.ethereum.org/EIPS/eip-3554 // Specification EIP-3554: https://eips.ethereum.org/EIPS/eip-3554
@ -215,7 +225,7 @@ func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Blo
ancestors[parent] = ancestorHeader ancestors[parent] = ancestorHeader
// If the ancestor doesn't have any uncles, we don't have to iterate them // If the ancestor doesn't have any uncles, we don't have to iterate them
if ancestorHeader.UncleHash != types.EmptyUncleHash { if ancestorHeader.UncleHash != types.EmptyUncleHash {
// Need to add those uncles to the blacklist too // Need to add those uncles to the banned list too
ancestor := chain.GetBlock(parent, number) ancestor := chain.GetBlock(parent, number)
if ancestor == nil { if ancestor == nil {
break break
@ -276,9 +286,8 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainHeaderReader, header, pa
return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty, expected) return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty, expected)
} }
// Verify that the gas limit is <= 2^63-1 // Verify that the gas limit is <= 2^63-1
cap := uint64(0x7fffffffffffffff) if header.GasLimit > params.MaxGasLimit {
if header.GasLimit > cap { return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, params.MaxGasLimit)
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, cap)
} }
// Verify that the gasUsed is <= gasLimit // Verify that the gasUsed is <= gasLimit
if header.GasUsed > header.GasLimit { if header.GasUsed > header.GasLimit {
@ -330,8 +339,10 @@ func (ethash *Ethash) CalcDifficulty(chain consensus.ChainHeaderReader, time uin
func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int { func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
next := new(big.Int).Add(parent.Number, big1) next := new(big.Int).Add(parent.Number, big1)
switch { switch {
case config.IsCatalyst(next): case config.IsGrayGlacier(next):
return big.NewInt(1) return calcDifficultyEip5133(time, parent)
case config.IsArrowGlacier(next):
return calcDifficultyEip4345(time, parent)
case config.IsLondon(next): case config.IsLondon(next):
return calcDifficultyEip3554(time, parent) return calcDifficultyEip3554(time, parent)
case config.IsMuirGlacier(next): case config.IsMuirGlacier(next):
@ -503,8 +514,8 @@ func calcDifficultyFrontier(time uint64, parent *types.Header) *big.Int {
} }
// Exported for fuzzing // Exported for fuzzing
var FrontierDifficultyCalulator = calcDifficultyFrontier var FrontierDifficultyCalculator = calcDifficultyFrontier
var HomesteadDifficultyCalulator = calcDifficultyHomestead var HomesteadDifficultyCalculator = calcDifficultyHomestead
var DynamicDifficultyCalculator = makeDifficultyCalculator var DynamicDifficultyCalculator = makeDifficultyCalculator
// verifySeal checks whether a block satisfies the PoW difficulty requirements, // verifySeal checks whether a block satisfies the PoW difficulty requirements,
@ -639,10 +650,6 @@ var (
// reward. The total reward consists of the static block reward and rewards for // reward. The total reward consists of the static block reward and rewards for
// included uncles. The coinbase of each uncle block is also rewarded. // included uncles. The coinbase of each uncle block is also rewarded.
func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header) { func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header) {
// Skip block reward in catalyst mode
if config.IsCatalyst(header.Number) {
return
}
// Select the correct block reward based on chain progression // Select the correct block reward based on chain progression
blockReward := FrontierBlockReward blockReward := FrontierBlockReward
if config.IsByzantium(header.Number) { if config.IsByzantium(header.Number) {

View File

@ -136,13 +136,16 @@ func memoryMapAndGenerate(path string, size uint64, lock bool, generator func(bu
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }
if err = dump.Truncate(int64(len(dumpMagic))*4 + int64(size)); err != nil { if err = ensureSize(dump, int64(len(dumpMagic))*4+int64(size)); err != nil {
dump.Close()
os.Remove(temp)
return nil, nil, nil, err return nil, nil, nil, err
} }
// Memory map the file for writing and fill it with the generator // Memory map the file for writing and fill it with the generator
mem, buffer, err := memoryMapFile(dump, true) mem, buffer, err := memoryMapFile(dump, true)
if err != nil { if err != nil {
dump.Close() dump.Close()
os.Remove(temp)
return nil, nil, nil, err return nil, nil, nil, err
} }
copy(buffer, dumpMagic) copy(buffer, dumpMagic)
@ -275,8 +278,11 @@ func (c *cache) generate(dir string, limit int, lock bool, test bool) {
// Iterate over all previous instances and delete old ones // Iterate over all previous instances and delete old ones
for ep := int(c.epoch) - limit; ep >= 0; ep-- { for ep := int(c.epoch) - limit; ep >= 0; ep-- {
seed := seedHash(uint64(ep)*epochLength + 1) seed := seedHash(uint64(ep)*epochLength + 1)
path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s*", algorithmRevision, seed[:8], endian))
os.Remove(path) files, _ := filepath.Glob(path) // find also the temp files that are generated.
for _, file := range files {
os.Remove(file)
}
} }
}) })
} }
@ -358,7 +364,7 @@ func (d *dataset) generate(dir string, limit int, lock bool, test bool) {
if err != nil { if err != nil {
logger.Error("Failed to generate mapped ethash dataset", "err", err) logger.Error("Failed to generate mapped ethash dataset", "err", err)
d.dataset = make([]uint32, dsize/2) d.dataset = make([]uint32, dsize/4)
generateDataset(d.dataset, d.epoch, cache) generateDataset(d.dataset, d.epoch, cache)
} }
// Iterate over all previous instances and delete old ones // Iterate over all previous instances and delete old ones
@ -546,6 +552,11 @@ func NewShared() *Ethash {
// Close closes the exit channel to notify all backend threads exiting. // Close closes the exit channel to notify all backend threads exiting.
func (ethash *Ethash) Close() error { func (ethash *Ethash) Close() error {
return ethash.StopRemoteSealer()
}
// StopRemoteSealer stops the remote sealer
func (ethash *Ethash) StopRemoteSealer() error {
ethash.closeOnce.Do(func() { ethash.closeOnce.Do(func() {
// Short circuit if the exit channel is not allocated. // Short circuit if the exit channel is not allocated.
if ethash.remote == nil { if ethash.remote == nil {
@ -670,15 +681,11 @@ func (ethash *Ethash) APIs(chain consensus.ChainHeaderReader) []rpc.API {
return []rpc.API{ return []rpc.API{
{ {
Namespace: "eth", Namespace: "eth",
Version: "1.0",
Service: &API{ethash}, Service: &API{ethash},
Public: true,
}, },
{ {
Namespace: "ethash", Namespace: "ethash",
Version: "1.0",
Service: &API{ethash}, Service: &API{ethash},
Public: true,
}, },
} }
} }

View File

@ -14,13 +14,22 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package eth //go:build linux
// +build linux
package ethash
import ( import (
"time" "os"
"github.com/ethereum/go-ethereum/p2p/tracker" "golang.org/x/sys/unix"
) )
// requestTracker is a singleton tracker for eth/66 and newer request times. // ensureSize expands the file to the given size. This is to prevent runtime
var requestTracker = tracker.New(ProtocolName, 5*time.Minute) // errors later on, if the underlying file expands beyond the disk capacity,
// even though it ostensibly is already expanded, but due to being sparse
// does not actually occupy the full declared size on disk.
func ensureSize(f *os.File, size int64) error {
// Docs: https://www.man7.org/linux/man-pages/man2/fallocate.2.html
return unix.Fallocate(int(f.Fd()), 0, 0, size)
}

View File

@ -1,4 +1,4 @@
// Copyright 2020 The go-ethereum Authors // Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library. // This file is part of the go-ethereum library.
// //
// The go-ethereum library is free software: you can redistribute it and/or modify // The go-ethereum library is free software: you can redistribute it and/or modify
@ -14,19 +14,23 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package snap //go:build !linux
// +build !linux
package ethash
import ( import (
"github.com/ethereum/go-ethereum/rlp" "os"
) )
// enrEntry is the ENR entry which advertises `snap` protocol on the discovery. // ensureSize expands the file to the given size. This is to prevent runtime
type enrEntry struct { // errors later on, if the underlying file expands beyond the disk capacity,
// Ignore additional fields (for forward compatibility). // even though it ostensibly is already expanded, but due to being sparse
Rest []rlp.RawValue `rlp:"tail"` // does not actually occupy the full declared size on disk.
} func ensureSize(f *os.File, size int64) error {
// On systems which do not support fallocate, we merely truncate it.
// ENRKey implements enr.Entry. // More robust alternatives would be to
func (e enrEntry) ENRKey() string { // - Use posix_fallocate, or
return "snap" // - explicitly fill the file with zeroes.
return f.Truncate(size)
} }

View File

@ -142,6 +142,7 @@ func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan s
var ( var (
attempts = int64(0) attempts = int64(0)
nonce = seed nonce = seed
powBuffer = new(big.Int)
) )
logger := ethash.config.Log.New("miner", id) logger := ethash.config.Log.New("miner", id)
logger.Trace("Started ethash search for new nonces", "seed", seed) logger.Trace("Started ethash search for new nonces", "seed", seed)
@ -163,7 +164,7 @@ search:
} }
// Compute the PoW value of this nonce // Compute the PoW value of this nonce
digest, result := hashimotoFull(dataset.dataset, hash, nonce) digest, result := hashimotoFull(dataset.dataset, hash, nonce)
if new(big.Int).SetBytes(result).Cmp(target) <= 0 { if powBuffer.SetBytes(result).Cmp(target) <= 0 {
// Correct nonce found, create a new header with it // Correct nonce found, create a new header with it
header = types.CopyHeader(header) header = types.CopyHeader(header)
header.Nonce = types.EncodeNonce(nonce) header.Nonce = types.EncodeNonce(nonce)
@ -338,6 +339,7 @@ func (s *remoteSealer) loop() {
// makeWork creates a work package for external miner. // makeWork creates a work package for external miner.
// //
// The work package consists of 3 strings: // The work package consists of 3 strings:
//
// result[0], 32 bytes hex encoded current block header pow-hash // result[0], 32 bytes hex encoded current block header pow-hash
// result[1], 32 bytes hex encoded seed hash used for DAG // result[1], 32 bytes hex encoded seed hash used for DAG
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty // result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty

View File

@ -0,0 +1,110 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package consensus
import (
"fmt"
"sync"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
)
// transitionStatus describes the status of eth1/2 transition. This switch
// between modes is a one-way action which is triggered by corresponding
// consensus-layer message.
type transitionStatus struct {
LeftPoW bool // The flag is set when the first NewHead message received
EnteredPoS bool // The flag is set when the first FinalisedBlock message received
}
// Merger is an internal help structure used to track the eth1/2 transition status.
// It's a common structure can be used in both full node and light client.
type Merger struct {
db ethdb.KeyValueStore
status transitionStatus
mu sync.RWMutex
}
// NewMerger creates a new Merger which stores its transition status in the provided db.
func NewMerger(db ethdb.KeyValueStore) *Merger {
var status transitionStatus
blob := rawdb.ReadTransitionStatus(db)
if len(blob) != 0 {
if err := rlp.DecodeBytes(blob, &status); err != nil {
log.Crit("Failed to decode the transition status", "err", err)
}
}
return &Merger{
db: db,
status: status,
}
}
// ReachTTD is called whenever the first NewHead message received
// from the consensus-layer.
func (m *Merger) ReachTTD() {
m.mu.Lock()
defer m.mu.Unlock()
if m.status.LeftPoW {
return
}
m.status = transitionStatus{LeftPoW: true}
blob, err := rlp.EncodeToBytes(m.status)
if err != nil {
panic(fmt.Sprintf("Failed to encode the transition status: %v", err))
}
rawdb.WriteTransitionStatus(m.db, blob)
log.Info("Left PoW stage")
}
// FinalizePoS is called whenever the first FinalisedBlock message received
// from the consensus-layer.
func (m *Merger) FinalizePoS() {
m.mu.Lock()
defer m.mu.Unlock()
if m.status.EnteredPoS {
return
}
m.status = transitionStatus{LeftPoW: true, EnteredPoS: true}
blob, err := rlp.EncodeToBytes(m.status)
if err != nil {
panic(fmt.Sprintf("Failed to encode the transition status: %v", err))
}
rawdb.WriteTransitionStatus(m.db, blob)
log.Info("Entered PoS stage")
}
// TDDReached reports whether the chain has left the PoW stage.
func (m *Merger) TDDReached() bool {
m.mu.RLock()
defer m.mu.RUnlock()
return m.status.LeftPoW
}
// PoSFinalized reports whether the chain has entered the PoS stage.
func (m *Merger) PoSFinalized() bool {
m.mu.RLock()
defer m.mu.RUnlock()
return m.status.EnteredPoS
}

View File

@ -40,9 +40,10 @@ var (
// ensure it conforms to DAO hard-fork rules. // ensure it conforms to DAO hard-fork rules.
// //
// DAO hard-fork extension to the header validity: // DAO hard-fork extension to the header validity:
// a) if the node is no-fork, do not accept blocks in the [fork, fork+10) range //
// with the fork specific extra-data set // - if the node is no-fork, do not accept blocks in the [fork, fork+10) range
// b) if the node is pro-fork, require blocks in the specific range to have the // with the fork specific extra-data set.
// - if the node is pro-fork, require blocks in the specific range to have the
// unique extra-data set. // unique extra-data set.
func VerifyDAOHeaderExtraData(config *params.ChainConfig, header *types.Header) error { func VerifyDAOHeaderExtraData(config *params.ChainConfig, header *types.Header) error {
// Short circuit validation if the node doesn't care about the DAO fork // Short circuit validation if the node doesn't care about the DAO fork

View File

@ -46,7 +46,7 @@ func VerifyEip1559Header(config *params.ChainConfig, parent, header *types.Heade
expectedBaseFee := CalcBaseFee(config, parent) expectedBaseFee := CalcBaseFee(config, parent)
if header.BaseFee.Cmp(expectedBaseFee) != 0 { if header.BaseFee.Cmp(expectedBaseFee) != 0 {
return fmt.Errorf("invalid baseFee: have %s, want %s, parentBaseFee %s, parentGasUsed %d", return fmt.Errorf("invalid baseFee: have %s, want %s, parentBaseFee %s, parentGasUsed %d",
expectedBaseFee, header.BaseFee, parent.BaseFee, parent.GasUsed) header.BaseFee, expectedBaseFee, parent.BaseFee, parent.GasUsed)
} }
return nil return nil
} }
@ -58,36 +58,36 @@ func CalcBaseFee(config *params.ChainConfig, parent *types.Header) *big.Int {
return new(big.Int).SetUint64(params.InitialBaseFee) return new(big.Int).SetUint64(params.InitialBaseFee)
} }
var ( parentGasTarget := parent.GasLimit / params.ElasticityMultiplier
parentGasTarget = parent.GasLimit / params.ElasticityMultiplier
parentGasTargetBig = new(big.Int).SetUint64(parentGasTarget)
baseFeeChangeDenominator = new(big.Int).SetUint64(params.BaseFeeChangeDenominator)
)
// If the parent gasUsed is the same as the target, the baseFee remains unchanged. // If the parent gasUsed is the same as the target, the baseFee remains unchanged.
if parent.GasUsed == parentGasTarget { if parent.GasUsed == parentGasTarget {
return new(big.Int).Set(parent.BaseFee) return new(big.Int).Set(parent.BaseFee)
} }
var (
num = new(big.Int)
denom = new(big.Int)
)
if parent.GasUsed > parentGasTarget { if parent.GasUsed > parentGasTarget {
// If the parent block used more gas than its target, the baseFee should increase. // If the parent block used more gas than its target, the baseFee should increase.
gasUsedDelta := new(big.Int).SetUint64(parent.GasUsed - parentGasTarget) // max(1, parentBaseFee * gasUsedDelta / parentGasTarget / baseFeeChangeDenominator)
x := new(big.Int).Mul(parent.BaseFee, gasUsedDelta) num.SetUint64(parent.GasUsed - parentGasTarget)
y := x.Div(x, parentGasTargetBig) num.Mul(num, parent.BaseFee)
baseFeeDelta := math.BigMax( num.Div(num, denom.SetUint64(parentGasTarget))
x.Div(y, baseFeeChangeDenominator), num.Div(num, denom.SetUint64(params.BaseFeeChangeDenominator))
common.Big1, baseFeeDelta := math.BigMax(num, common.Big1)
)
return x.Add(parent.BaseFee, baseFeeDelta) return num.Add(parent.BaseFee, baseFeeDelta)
} else { } else {
// Otherwise if the parent block used less gas than its target, the baseFee should decrease. // Otherwise if the parent block used less gas than its target, the baseFee should decrease.
gasUsedDelta := new(big.Int).SetUint64(parentGasTarget - parent.GasUsed) // max(0, parentBaseFee * gasUsedDelta / parentGasTarget / baseFeeChangeDenominator)
x := new(big.Int).Mul(parent.BaseFee, gasUsedDelta) num.SetUint64(parentGasTarget - parent.GasUsed)
y := x.Div(x, parentGasTargetBig) num.Mul(num, parent.BaseFee)
baseFeeDelta := x.Div(y, baseFeeChangeDenominator) num.Div(num, denom.SetUint64(parentGasTarget))
num.Div(num, denom.SetUint64(params.BaseFeeChangeDenominator))
baseFee := num.Sub(parent.BaseFee, num)
return math.BigMax( return math.BigMax(baseFee, common.Big0)
x.Sub(parent.BaseFee, baseFeeDelta),
common.Big0,
)
} }
} }

View File

@ -35,7 +35,7 @@ func VerifyForkHashes(config *params.ChainConfig, header *types.Header, uncle bo
// If the homestead reprice hash is set, validate it // If the homestead reprice hash is set, validate it
if config.EIP150Block != nil && config.EIP150Block.Cmp(header.Number) == 0 { if config.EIP150Block != nil && config.EIP150Block.Cmp(header.Number) == 0 {
if config.EIP150Hash != (common.Hash{}) && config.EIP150Hash != header.Hash() { if config.EIP150Hash != (common.Hash{}) && config.EIP150Hash != header.Hash() {
return fmt.Errorf("homestead gas reprice fork: have 0x%x, want 0x%x", header.Hash(), config.EIP150Hash) return fmt.Errorf("homestead gas reprice fork: have %#x, want %#x", header.Hash(), config.EIP150Hash)
} }
} }
// All ok, return // All ok, return

View File

@ -143,7 +143,7 @@ func (p *terminalPrompter) PromptPassword(prompt string) (passwd string, err err
// choice to be made, returning that choice. // choice to be made, returning that choice.
func (p *terminalPrompter) PromptConfirm(prompt string) (bool, error) { func (p *terminalPrompter) PromptConfirm(prompt string) (bool, error) {
input, err := p.Prompt(prompt + " [y/n] ") input, err := p.Prompt(prompt + " [y/n] ")
if len(input) > 0 && strings.ToUpper(input[:1]) == "Y" { if len(input) > 0 && strings.EqualFold(input[:1], "y") {
return true, nil return true, nil
} }
return false, err return false, err

View File

@ -103,44 +103,9 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD
} }
// CalcGasLimit computes the gas limit of the next block after parent. It aims // CalcGasLimit computes the gas limit of the next block after parent. It aims
// to keep the baseline gas above the provided floor, and increase it towards the // to keep the baseline gas close to the provided target, and increase it towards
// ceil if the blocks are full. If the ceil is exceeded, it will always decrease // the target if the baseline gas is lower.
// the gas allowance. func CalcGasLimit(parentGasLimit, desiredLimit uint64) uint64 {
func CalcGasLimit(parentGasUsed, parentGasLimit, gasFloor, gasCeil uint64) uint64 {
// contrib = (parentGasUsed * 3 / 2) / 1024
contrib := (parentGasUsed + parentGasUsed/2) / params.GasLimitBoundDivisor
// decay = parentGasLimit / 1024 -1
decay := parentGasLimit/params.GasLimitBoundDivisor - 1
/*
strategy: gasLimit of block-to-mine is set based on parent's
gasUsed value. if parentGasUsed > parentGasLimit * (2/3) then we
increase it, otherwise lower it (or leave it unchanged if it's right
at that usage) the amount increased/decreased depends on how far away
from parentGasLimit * (2/3) parentGasUsed is.
*/
limit := parentGasLimit - decay + contrib
if limit < params.MinGasLimit {
limit = params.MinGasLimit
}
// If we're outside our allowed gas range, we try to hone towards them
if limit < gasFloor {
limit = parentGasLimit + decay
if limit > gasFloor {
limit = gasFloor
}
} else if limit > gasCeil {
limit = parentGasLimit - decay
if limit < gasCeil {
limit = gasCeil
}
}
return limit
}
// CalcGasLimit1559 calculates the next block gas limit under 1559 rules.
func CalcGasLimit1559(parentGasLimit, desiredLimit uint64) uint64 {
delta := parentGasLimit/params.GasLimitBoundDivisor - 1 delta := parentGasLimit/params.GasLimitBoundDivisor - 1
limit := parentGasLimit limit := parentGasLimit
if desiredLimit < params.MinGasLimit { if desiredLimit < params.MinGasLimit {

File diff suppressed because it is too large Load Diff

View File

@ -39,7 +39,7 @@ const statsReportLimit = 8 * time.Second
// report prints statistics if some number of blocks have been processed // report prints statistics if some number of blocks have been processed
// or more than a few seconds have passed since the last message. // or more than a few seconds have passed since the last message.
func (st *insertStats) report(chain []*types.Block, index int, dirty common.StorageSize) { func (st *insertStats) report(chain []*types.Block, index int, dirty common.StorageSize, setHead bool) {
// Fetch the timings for the batch // Fetch the timings for the batch
var ( var (
now = mclock.Now() now = mclock.Now()
@ -56,9 +56,9 @@ func (st *insertStats) report(chain []*types.Block, index int, dirty common.Stor
// Assemble the log context and send it to the logger // Assemble the log context and send it to the logger
context := []interface{}{ context := []interface{}{
"number", end.Number(), "hash", end.Hash(),
"blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000, "blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
"elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed), "elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
"number", end.Number(), "hash", end.Hash(),
} }
if timestamp := time.Unix(int64(end.Time()), 0); time.Since(timestamp) > time.Minute { if timestamp := time.Unix(int64(end.Time()), 0); time.Since(timestamp) > time.Minute {
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
@ -71,8 +71,11 @@ func (st *insertStats) report(chain []*types.Block, index int, dirty common.Stor
if st.ignored > 0 { if st.ignored > 0 {
context = append(context, []interface{}{"ignored", st.ignored}...) context = append(context, []interface{}{"ignored", st.ignored}...)
} }
if setHead {
log.Info("Imported new chain segment", context...) log.Info("Imported new chain segment", context...)
} else {
log.Info("Imported new potential chain segment", context...)
}
// Bump the stats reported to the next section // Bump the stats reported to the next section
*st = insertStats{startTime: now, lastIndex: index + 1} *st = insertStats{startTime: now, lastIndex: index + 1}
} }
@ -150,6 +153,14 @@ func (it *insertIterator) previous() *types.Header {
return it.chain[it.index-1].Header() return it.chain[it.index-1].Header()
} }
// current returns the current header that is being processed, or nil.
func (it *insertIterator) current() *types.Header {
if it.index == -1 || it.index >= len(it.chain) {
return nil
}
return it.chain[it.index].Header()
}
// first returns the first block in the it. // first returns the first block in the it.
func (it *insertIterator) first() *types.Block { func (it *insertIterator) first() *types.Block {
return it.chain[0] return it.chain[0]

View File

@ -0,0 +1,408 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
)
// CurrentHeader retrieves the current head header of the canonical chain. The
// header is retrieved from the HeaderChain's internal cache.
func (bc *BlockChain) CurrentHeader() *types.Header {
return bc.hc.CurrentHeader()
}
// CurrentBlock retrieves the current head block of the canonical chain. The
// block is retrieved from the blockchain's internal cache.
func (bc *BlockChain) CurrentBlock() *types.Block {
return bc.currentBlock.Load().(*types.Block)
}
// CurrentFastBlock retrieves the current fast-sync head block of the canonical
// chain. The block is retrieved from the blockchain's internal cache.
func (bc *BlockChain) CurrentFastBlock() *types.Block {
return bc.currentFastBlock.Load().(*types.Block)
}
// CurrentFinalizedBlock retrieves the current finalized block of the canonical
// chain. The block is retrieved from the blockchain's internal cache.
func (bc *BlockChain) CurrentFinalizedBlock() *types.Block {
return bc.currentFinalizedBlock.Load().(*types.Block)
}
// CurrentSafeBlock retrieves the current safe block of the canonical
// chain. The block is retrieved from the blockchain's internal cache.
func (bc *BlockChain) CurrentSafeBlock() *types.Block {
return bc.currentSafeBlock.Load().(*types.Block)
}
// HasHeader checks if a block header is present in the database or not, caching
// it if present.
func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
return bc.hc.HasHeader(hash, number)
}
// GetHeader retrieves a block header from the database by hash and number,
// caching it if found.
func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
return bc.hc.GetHeader(hash, number)
}
// GetHeaderByHash retrieves a block header from the database by hash, caching it if
// found.
func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
return bc.hc.GetHeaderByHash(hash)
}
// GetHeaderByNumber retrieves a block header from the database by number,
// caching it (associated with its hash) if found.
func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
return bc.hc.GetHeaderByNumber(number)
}
// GetHeadersFrom returns a contiguous segment of headers, in rlp-form, going
// backwards from the given number.
func (bc *BlockChain) GetHeadersFrom(number, count uint64) []rlp.RawValue {
return bc.hc.GetHeadersFrom(number, count)
}
// GetBody retrieves a block body (transactions and uncles) from the database by
// hash, caching it if found.
func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
// Short circuit if the body's already in the cache, retrieve otherwise
if cached, ok := bc.bodyCache.Get(hash); ok {
body := cached.(*types.Body)
return body
}
number := bc.hc.GetBlockNumber(hash)
if number == nil {
return nil
}
body := rawdb.ReadBody(bc.db, hash, *number)
if body == nil {
return nil
}
// Cache the found body for next time and return
bc.bodyCache.Add(hash, body)
return body
}
// GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
// caching it if found.
func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
// Short circuit if the body's already in the cache, retrieve otherwise
if cached, ok := bc.bodyRLPCache.Get(hash); ok {
return cached.(rlp.RawValue)
}
number := bc.hc.GetBlockNumber(hash)
if number == nil {
return nil
}
body := rawdb.ReadBodyRLP(bc.db, hash, *number)
if len(body) == 0 {
return nil
}
// Cache the found body for next time and return
bc.bodyRLPCache.Add(hash, body)
return body
}
// HasBlock checks if a block is fully present in the database or not.
func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
if bc.blockCache.Contains(hash) {
return true
}
if !bc.HasHeader(hash, number) {
return false
}
return rawdb.HasBody(bc.db, hash, number)
}
// HasFastBlock checks if a fast block is fully present in the database or not.
func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool {
if !bc.HasBlock(hash, number) {
return false
}
if bc.receiptsCache.Contains(hash) {
return true
}
return rawdb.HasReceipts(bc.db, hash, number)
}
// GetBlock retrieves a block from the database by hash and number,
// caching it if found.
func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
// Short circuit if the block's already in the cache, retrieve otherwise
if block, ok := bc.blockCache.Get(hash); ok {
return block.(*types.Block)
}
block := rawdb.ReadBlock(bc.db, hash, number)
if block == nil {
return nil
}
// Cache the found block for next time and return
bc.blockCache.Add(block.Hash(), block)
return block
}
// GetBlockByHash retrieves a block from the database by hash, caching it if found.
func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
number := bc.hc.GetBlockNumber(hash)
if number == nil {
return nil
}
return bc.GetBlock(hash, *number)
}
// GetBlockByNumber retrieves a block from the database by number, caching it
// (associated with its hash) if found.
func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
hash := rawdb.ReadCanonicalHash(bc.db, number)
if hash == (common.Hash{}) {
return nil
}
return bc.GetBlock(hash, number)
}
// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
// [deprecated by eth/62]
func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
number := bc.hc.GetBlockNumber(hash)
if number == nil {
return nil
}
for i := 0; i < n; i++ {
block := bc.GetBlock(hash, *number)
if block == nil {
break
}
blocks = append(blocks, block)
hash = block.ParentHash()
*number--
}
return
}
// GetReceiptsByHash retrieves the receipts for all transactions in a given block.
func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
if receipts, ok := bc.receiptsCache.Get(hash); ok {
return receipts.(types.Receipts)
}
number := rawdb.ReadHeaderNumber(bc.db, hash)
if number == nil {
return nil
}
receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig)
if receipts == nil {
return nil
}
bc.receiptsCache.Add(hash, receipts)
return receipts
}
// GetUnclesInChain retrieves all the uncles from a given block backwards until
// a specific distance is reached.
func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
uncles := []*types.Header{}
for i := 0; block != nil && i < length; i++ {
uncles = append(uncles, block.Uncles()...)
block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
}
return uncles
}
// GetCanonicalHash returns the canonical hash for a given block number
func (bc *BlockChain) GetCanonicalHash(number uint64) common.Hash {
return bc.hc.GetCanonicalHash(number)
}
// GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
// a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
// number of blocks to be individually checked before we reach the canonical chain.
//
// Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical)
}
// GetTransactionLookup retrieves the lookup associate with the given transaction
// hash from the cache or database.
func (bc *BlockChain) GetTransactionLookup(hash common.Hash) *rawdb.LegacyTxLookupEntry {
// Short circuit if the txlookup already in the cache, retrieve otherwise
if lookup, exist := bc.txLookupCache.Get(hash); exist {
return lookup.(*rawdb.LegacyTxLookupEntry)
}
tx, blockHash, blockNumber, txIndex := rawdb.ReadTransaction(bc.db, hash)
if tx == nil {
return nil
}
lookup := &rawdb.LegacyTxLookupEntry{BlockHash: blockHash, BlockIndex: blockNumber, Index: txIndex}
bc.txLookupCache.Add(hash, lookup)
return lookup
}
// GetTd retrieves a block's total difficulty in the canonical chain from the
// database by hash and number, caching it if found.
func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
return bc.hc.GetTd(hash, number)
}
// HasState checks if state trie is fully present in the database or not.
func (bc *BlockChain) HasState(hash common.Hash) bool {
_, err := bc.stateCache.OpenTrie(hash)
return err == nil
}
// HasBlockAndState checks if a block and associated state trie is fully present
// in the database or not, caching it if present.
func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
// Check first that the block itself is known
block := bc.GetBlock(hash, number)
if block == nil {
return false
}
return bc.HasState(block.Root())
}
// TrieNode retrieves a blob of data associated with a trie node
// either from ephemeral in-memory cache, or from persistent storage.
func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
return bc.stateCache.TrieDB().Node(hash)
}
// ContractCode retrieves a blob of data associated with a contract hash
// either from ephemeral in-memory cache, or from persistent storage.
func (bc *BlockChain) ContractCode(hash common.Hash) ([]byte, error) {
return bc.stateCache.ContractCode(common.Hash{}, hash)
}
// ContractCodeWithPrefix retrieves a blob of data associated with a contract
// hash either from ephemeral in-memory cache, or from persistent storage.
//
// If the code doesn't exist in the in-memory cache, check the storage with
// new code scheme.
func (bc *BlockChain) ContractCodeWithPrefix(hash common.Hash) ([]byte, error) {
type codeReader interface {
ContractCodeWithPrefix(addrHash, codeHash common.Hash) ([]byte, error)
}
return bc.stateCache.(codeReader).ContractCodeWithPrefix(common.Hash{}, hash)
}
// State returns a new mutable state based on the current HEAD block.
func (bc *BlockChain) State() (*state.StateDB, error) {
return bc.StateAt(bc.CurrentBlock().Root())
}
// StateAt returns a new mutable state based on a particular point in time.
func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
return state.New(root, bc.stateCache, bc.snaps)
}
// Config retrieves the chain's fork configuration.
func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
// Engine retrieves the blockchain's consensus engine.
func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
// Snapshots returns the blockchain snapshot tree.
func (bc *BlockChain) Snapshots() *snapshot.Tree {
return bc.snaps
}
// Validator returns the current validator.
func (bc *BlockChain) Validator() Validator {
return bc.validator
}
// Processor returns the current processor.
func (bc *BlockChain) Processor() Processor {
return bc.processor
}
// StateCache returns the caching database underpinning the blockchain instance.
func (bc *BlockChain) StateCache() state.Database {
return bc.stateCache
}
// GasLimit returns the gas limit of the current HEAD block.
func (bc *BlockChain) GasLimit() uint64 {
return bc.CurrentBlock().GasLimit()
}
// Genesis retrieves the chain's genesis block.
func (bc *BlockChain) Genesis() *types.Block {
return bc.genesisBlock
}
// GetVMConfig returns the block chain VM config.
func (bc *BlockChain) GetVMConfig() *vm.Config {
return &bc.vmConfig
}
// SetTxLookupLimit is responsible for updating the txlookup limit to the
// original one stored in db if the new mismatches with the old one.
func (bc *BlockChain) SetTxLookupLimit(limit uint64) {
bc.txLookupLimit = limit
}
// TxLookupLimit retrieves the txlookup limit used by blockchain to prune
// stale transaction indices.
func (bc *BlockChain) TxLookupLimit() uint64 {
return bc.txLookupLimit
}
// SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
}
// SubscribeChainEvent registers a subscription of ChainEvent.
func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
return bc.scope.Track(bc.chainFeed.Subscribe(ch))
}
// SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
}
// SubscribeChainSideEvent registers a subscription of ChainSideEvent.
func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
}
// SubscribeLogsEvent registers a subscription of []*types.Log.
func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
return bc.scope.Track(bc.logsFeed.Subscribe(ch))
}
// SubscribeBlockProcessingEvent registers a subscription of bool where true means
// block processing has started while false means it has stopped.
func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscription {
return bc.scope.Track(bc.blockProcFeed.Subscribe(ch))
}

View File

@ -75,7 +75,7 @@ func (b *BloomIndexer) Process(ctx context.Context, header *types.Header) error
// Commit implements core.ChainIndexerBackend, finalizing the bloom section and // Commit implements core.ChainIndexerBackend, finalizing the bloom section and
// writing it out into the database. // writing it out into the database.
func (b *BloomIndexer) Commit() error { func (b *BloomIndexer) Commit() error {
batch := b.db.NewBatch() batch := b.db.NewBatchWithSize((int(b.size) / 8) * types.BloomBitLength)
for i := 0; i < types.BloomBitLength; i++ { for i := 0; i < types.BloomBitLength; i++ {
bits, err := b.gen.Bitset(uint(i)) bits, err := b.gen.Bitset(uint(i))
if err != nil { if err != nil {

View File

@ -511,7 +511,8 @@ type MatcherSession struct {
quit chan struct{} // Quit channel to request pipeline termination quit chan struct{} // Quit channel to request pipeline termination
ctx context.Context // Context used by the light client to abort filtering ctx context.Context // Context used by the light client to abort filtering
err atomic.Value // Global error to track retrieval failures deep in the chain err error // Global error to track retrieval failures deep in the chain
errLock sync.Mutex
pend sync.WaitGroup pend sync.WaitGroup
} }
@ -529,10 +530,10 @@ func (s *MatcherSession) Close() {
// Error returns any failure encountered during the matching session. // Error returns any failure encountered during the matching session.
func (s *MatcherSession) Error() error { func (s *MatcherSession) Error() error {
if err := s.err.Load(); err != nil { s.errLock.Lock()
return err.(error) defer s.errLock.Unlock()
}
return nil return s.err
} }
// allocateRetrieval assigns a bloom bit index to a client process that can either // allocateRetrieval assigns a bloom bit index to a client process that can either
@ -611,7 +612,7 @@ func (s *MatcherSession) Multiplex(batch int, wait time.Duration, mux chan chan
return return
case <-time.After(wait): case <-time.After(wait):
// Throttling up, fetch whatever's available // Throttling up, fetch whatever is available
} }
} }
// Allocate as much as we can handle and request servicing // Allocate as much as we can handle and request servicing
@ -630,7 +631,9 @@ func (s *MatcherSession) Multiplex(batch int, wait time.Duration, mux chan chan
result := <-request result := <-request
if result.Error != nil { if result.Error != nil {
s.err.Store(result.Error) s.errLock.Lock()
s.err = result.Error
s.errLock.Unlock()
s.Close() s.Close()
} }
s.deliverSections(result.Bit, result.Sections, result.Bitsets) s.deliverSections(result.Bit, result.Sections, result.Bitsets)

View File

@ -23,6 +23,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc" "github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
@ -155,6 +156,28 @@ func (b *BlockGen) TxNonce(addr common.Address) uint64 {
// AddUncle adds an uncle header to the generated block. // AddUncle adds an uncle header to the generated block.
func (b *BlockGen) AddUncle(h *types.Header) { func (b *BlockGen) AddUncle(h *types.Header) {
// The uncle will have the same timestamp and auto-generated difficulty
h.Time = b.header.Time
var parent *types.Header
for i := b.i - 1; i >= 0; i-- {
if b.chain[i].Hash() == h.ParentHash {
parent = b.chain[i].Header()
break
}
}
chainreader := &fakeChainReader{config: b.config}
h.Difficulty = b.engine.CalcDifficulty(chainreader, b.header.Time, parent)
// The gas limit and price should be derived from the parent
h.GasLimit = parent.GasLimit
if b.config.IsLondon(h.Number) {
h.BaseFee = misc.CalcBaseFee(b.config, parent)
if !b.config.IsLondon(parent.Number) {
parentGasLimit := parent.GasLimit * params.ElasticityMultiplier
h.GasLimit = CalcGasLimit(parentGasLimit, parentGasLimit)
}
}
b.uncles = append(b.uncles, h) b.uncles = append(b.uncles, h)
} }
@ -205,6 +228,18 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
b := &BlockGen{i: i, chain: blocks, parent: parent, statedb: statedb, config: config, engine: engine} b := &BlockGen{i: i, chain: blocks, parent: parent, statedb: statedb, config: config, engine: engine}
b.header = makeHeader(chainreader, parent, statedb, b.engine) b.header = makeHeader(chainreader, parent, statedb, b.engine)
// Set the difficulty for clique block. The chain maker doesn't have access
// to a chain, so the difficulty will be left unset (nil). Set it here to the
// correct value.
if b.header.Difficulty == nil {
if config.TerminalTotalDifficulty == nil {
// Clique chain
b.header.Difficulty = big.NewInt(2)
} else {
// Post-merge chain
b.header.Difficulty = big.NewInt(0)
}
}
// Mutate the state and block according to any hard-fork specs // Mutate the state and block according to any hard-fork specs
if daoBlock := config.DAOForkBlock; daoBlock != nil { if daoBlock := config.DAOForkBlock; daoBlock != nil {
limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange) limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange)
@ -250,6 +285,19 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
return blocks, receipts return blocks, receipts
} }
// GenerateChainWithGenesis is a wrapper of GenerateChain which will initialize
// genesis block to database first according to the provided genesis specification
// then generate chain on top.
func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, gen func(int, *BlockGen)) (ethdb.Database, []*types.Block, []types.Receipts) {
db := rawdb.NewMemoryDatabase()
_, err := genesis.Commit(db)
if err != nil {
panic(err)
}
blocks, receipts := GenerateChain(genesis.Config, genesis.ToBlock(), engine, db, n, gen)
return db, blocks, receipts
}
func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header { func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header {
var time uint64 var time uint64
if parent.Time() == 0 { if parent.Time() == 0 {
@ -273,18 +321,17 @@ func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.S
} }
if chain.Config().IsLondon(header.Number) { if chain.Config().IsLondon(header.Number) {
header.BaseFee = misc.CalcBaseFee(chain.Config(), parent.Header()) header.BaseFee = misc.CalcBaseFee(chain.Config(), parent.Header())
parentGasLimit := parent.GasLimit()
if !chain.Config().IsLondon(parent.Number()) { if !chain.Config().IsLondon(parent.Number()) {
parentGasLimit = parent.GasLimit() * params.ElasticityMultiplier parentGasLimit := parent.GasLimit() * params.ElasticityMultiplier
header.GasLimit = CalcGasLimit(parentGasLimit, parentGasLimit)
} }
header.GasLimit = CalcGasLimit1559(parentGasLimit, parentGasLimit)
} }
return header return header
} }
// makeHeaderChain creates a deterministic chain of headers rooted at parent. // makeHeaderChain creates a deterministic chain of headers rooted at parent.
func makeHeaderChain(parent *types.Header, n int, engine consensus.Engine, db ethdb.Database, seed int) []*types.Header { func makeHeaderChain(chainConfig *params.ChainConfig, parent *types.Header, n int, engine consensus.Engine, db ethdb.Database, seed int) []*types.Header {
blocks := makeBlockChain(types.NewBlockWithHeader(parent), n, engine, db, seed) blocks := makeBlockChain(chainConfig, types.NewBlockWithHeader(parent), n, engine, db, seed)
headers := make([]*types.Header, len(blocks)) headers := make([]*types.Header, len(blocks))
for i, block := range blocks { for i, block := range blocks {
headers[i] = block.Header() headers[i] = block.Header()
@ -292,14 +339,32 @@ func makeHeaderChain(parent *types.Header, n int, engine consensus.Engine, db et
return headers return headers
} }
// makeHeaderChainWithGenesis creates a deterministic chain of headers from genesis.
func makeHeaderChainWithGenesis(genesis *Genesis, n int, engine consensus.Engine, seed int) (ethdb.Database, []*types.Header) {
db, blocks := makeBlockChainWithGenesis(genesis, n, engine, seed)
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
headers[i] = block.Header()
}
return db, headers
}
// makeBlockChain creates a deterministic chain of blocks rooted at parent. // makeBlockChain creates a deterministic chain of blocks rooted at parent.
func makeBlockChain(parent *types.Block, n int, engine consensus.Engine, db ethdb.Database, seed int) []*types.Block { func makeBlockChain(chainConfig *params.ChainConfig, parent *types.Block, n int, engine consensus.Engine, db ethdb.Database, seed int) []*types.Block {
blocks, _ := GenerateChain(params.TestChainConfig, parent, engine, db, n, func(i int, b *BlockGen) { blocks, _ := GenerateChain(chainConfig, parent, engine, db, n, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)}) b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)})
}) })
return blocks return blocks
} }
// makeBlockChain creates a deterministic chain of blocks from genesis
func makeBlockChainWithGenesis(genesis *Genesis, n int, engine consensus.Engine, seed int) (ethdb.Database, []*types.Block) {
db, blocks, _ := GenerateChainWithGenesis(genesis, engine, n, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)})
})
return db, blocks
}
type fakeChainReader struct { type fakeChainReader struct {
config *params.ChainConfig config *params.ChainConfig
} }
@ -314,3 +379,4 @@ func (cr *fakeChainReader) GetHeaderByNumber(number uint64) *types.Header
func (cr *fakeChainReader) GetHeaderByHash(hash common.Hash) *types.Header { return nil } func (cr *fakeChainReader) GetHeaderByHash(hash common.Hash) *types.Header { return nil }
func (cr *fakeChainReader) GetHeader(hash common.Hash, number uint64) *types.Header { return nil } func (cr *fakeChainReader) GetHeader(hash common.Hash, number uint64) *types.Header { return nil }
func (cr *fakeChainReader) GetBlock(hash common.Hash, number uint64) *types.Block { return nil } func (cr *fakeChainReader) GetBlock(hash common.Hash, number uint64) *types.Block { return nil }
func (cr *fakeChainReader) GetTd(hash common.Hash, number uint64) *big.Int { return nil }

View File

@ -26,11 +26,13 @@ var (
// ErrKnownBlock is returned when a block to import is already known locally. // ErrKnownBlock is returned when a block to import is already known locally.
ErrKnownBlock = errors.New("block already known") ErrKnownBlock = errors.New("block already known")
// ErrBlacklistedHash is returned if a block to import is on the blacklist. // ErrBannedHash is returned if a block to import is on the banned list.
ErrBlacklistedHash = errors.New("blacklisted hash") ErrBannedHash = errors.New("banned hash")
// ErrNoGenesis is returned when there is no Genesis Block. // ErrNoGenesis is returned when there is no Genesis Block.
ErrNoGenesis = errors.New("genesis not found in chain") ErrNoGenesis = errors.New("genesis not found in chain")
errSideChainReceipts = errors.New("side blocks can't be accepted as ancient chain data")
) )
// List of evm-call-message pre-checking errors. All state transition messages will // List of evm-call-message pre-checking errors. All state transition messages will
@ -49,6 +51,10 @@ var (
// next one expected based on the local chain. // next one expected based on the local chain.
ErrNonceTooHigh = errors.New("nonce too high") ErrNonceTooHigh = errors.New("nonce too high")
// ErrNonceMax is returned if the nonce of a transaction sender account has
// maximum allowed value and would become invalid if incremented.
ErrNonceMax = errors.New("nonce has max value")
// ErrGasLimitReached is returned by the gas pool if the amount of gas required // ErrGasLimitReached is returned by the gas pool if the amount of gas required
// by a transaction is higher than what's left in the block. // by a transaction is higher than what's left in the block.
ErrGasLimitReached = errors.New("gas limit reached") ErrGasLimitReached = errors.New("gas limit reached")
@ -85,6 +91,9 @@ var (
ErrFeeCapVeryHigh = errors.New("max fee per gas higher than 2^256-1") ErrFeeCapVeryHigh = errors.New("max fee per gas higher than 2^256-1")
// ErrFeeCapTooLow is returned if the transaction fee cap is less than the // ErrFeeCapTooLow is returned if the transaction fee cap is less than the
// the base fee of the block. // base fee of the block.
ErrFeeCapTooLow = errors.New("max fee per gas less than block base fee") ErrFeeCapTooLow = errors.New("max fee per gas less than block base fee")
// ErrSenderNoEOA is returned if the sender of a transaction is a contract.
ErrSenderNoEOA = errors.New("sender not an eoa")
) )

View File

@ -31,7 +31,7 @@ type ChainContext interface {
// Engine retrieves the chain's consensus engine. // Engine retrieves the chain's consensus engine.
Engine() consensus.Engine Engine() consensus.Engine
// GetHeader returns the hash corresponding to their hash. // GetHeader returns the header corresponding to the hash/number argument pair.
GetHeader(common.Hash, uint64) *types.Header GetHeader(common.Hash, uint64) *types.Header
} }
@ -40,6 +40,7 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
var ( var (
beneficiary common.Address beneficiary common.Address
baseFee *big.Int baseFee *big.Int
random *common.Hash
) )
// If we don't have an explicit author (i.e. not mining), extract from the header // If we don't have an explicit author (i.e. not mining), extract from the header
@ -51,6 +52,9 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
if header.BaseFee != nil { if header.BaseFee != nil {
baseFee = new(big.Int).Set(header.BaseFee) baseFee = new(big.Int).Set(header.BaseFee)
} }
if header.Difficulty.Cmp(common.Big0) == 0 {
random = &header.MixDigest
}
return vm.BlockContext{ return vm.BlockContext{
CanTransfer: CanTransfer, CanTransfer: CanTransfer,
Transfer: Transfer, Transfer: Transfer,
@ -61,6 +65,7 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
Difficulty: new(big.Int).Set(header.Difficulty), Difficulty: new(big.Int).Set(header.Difficulty),
BaseFee: baseFee, BaseFee: baseFee,
GasLimit: header.GasLimit, GasLimit: header.GasLimit,
Random: random,
} }
} }
@ -79,6 +84,11 @@ func GetHashFn(ref *types.Header, chain ChainContext) func(n uint64) common.Hash
var cache []common.Hash var cache []common.Hash
return func(n uint64) common.Hash { return func(n uint64) common.Hash {
if ref.Number.Uint64() <= n {
// This situation can happen if we're doing tracing and using
// block overrides.
return common.Hash{}
}
// If there's no hash cache yet, make one // If there's no hash cache yet, make one
if len(cache) == 0 { if len(cache) == 0 {
cache = append(cache, ref.ParentHash) cache = append(cache, ref.ParentHash)

View File

@ -0,0 +1,108 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
crand "crypto/rand"
"errors"
"math/big"
mrand "math/rand"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
)
// ChainReader defines a small collection of methods needed to access the local
// blockchain during header verification. It's implemented by both blockchain
// and lightchain.
type ChainReader interface {
// Config retrieves the header chain's chain configuration.
Config() *params.ChainConfig
// GetTd returns the total difficulty of a local block.
GetTd(common.Hash, uint64) *big.Int
}
// ForkChoice is the fork chooser based on the highest total difficulty of the
// chain(the fork choice used in the eth1) and the external fork choice (the fork
// choice used in the eth2). This main goal of this ForkChoice is not only for
// offering fork choice during the eth1/2 merge phase, but also keep the compatibility
// for all other proof-of-work networks.
type ForkChoice struct {
chain ChainReader
rand *mrand.Rand
// preserve is a helper function used in td fork choice.
// Miners will prefer to choose the local mined block if the
// local td is equal to the extern one. It can be nil for light
// client
preserve func(header *types.Header) bool
}
func NewForkChoice(chainReader ChainReader, preserve func(header *types.Header) bool) *ForkChoice {
// Seed a fast but crypto originating random generator
seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
if err != nil {
log.Crit("Failed to initialize random seed", "err", err)
}
return &ForkChoice{
chain: chainReader,
rand: mrand.New(mrand.NewSource(seed.Int64())),
preserve: preserve,
}
}
// ReorgNeeded returns whether the reorg should be applied
// based on the given external header and local canonical chain.
// In the td mode, the new head is chosen if the corresponding
// total difficulty is higher. In the extern mode, the trusted
// header is always selected as the head.
func (f *ForkChoice) ReorgNeeded(current *types.Header, header *types.Header) (bool, error) {
var (
localTD = f.chain.GetTd(current.Hash(), current.Number.Uint64())
externTd = f.chain.GetTd(header.Hash(), header.Number.Uint64())
)
if localTD == nil || externTd == nil {
return false, errors.New("missing td")
}
// Accept the new header as the chain head if the transition
// is already triggered. We assume all the headers after the
// transition come from the trusted consensus layer.
if ttd := f.chain.Config().TerminalTotalDifficulty; ttd != nil && ttd.Cmp(externTd) <= 0 {
return true, nil
}
// If the total difficulty is higher than our known, add it to the canonical chain
// Second clause in the if statement reduces the vulnerability to selfish mining.
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
reorg := externTd.Cmp(localTD) > 0
if !reorg && externTd.Cmp(localTD) == 0 {
number, headNumber := header.Number.Uint64(), current.Number.Uint64()
if number < headNumber {
reorg = true
} else if number == headNumber {
var currentPreserve, externPreserve bool
if f.preserve != nil {
currentPreserve, externPreserve = f.preserve(current), f.preserve(header)
}
reorg = !currentPreserve && (externPreserve || f.rand.Float64() < 0.5)
}
}
return reorg, nil
}

View File

@ -1,256 +0,0 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package forkid implements EIP-2124 (https://eips.ethereum.org/EIPS/eip-2124).
package forkid
import (
"encoding/binary"
"errors"
"hash/crc32"
"math"
"math/big"
"reflect"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
)
var (
// ErrRemoteStale is returned by the validator if a remote fork checksum is a
// subset of our already applied forks, but the announced next fork block is
// not on our already passed chain.
ErrRemoteStale = errors.New("remote needs update")
// ErrLocalIncompatibleOrStale is returned by the validator if a remote fork
// checksum does not match any local checksum variation, signalling that the
// two chains have diverged in the past at some point (possibly at genesis).
ErrLocalIncompatibleOrStale = errors.New("local incompatible or needs update")
)
// Blockchain defines all necessary method to build a forkID.
type Blockchain interface {
// Config retrieves the chain's fork configuration.
Config() *params.ChainConfig
// Genesis retrieves the chain's genesis block.
Genesis() *types.Block
// CurrentHeader retrieves the current head header of the canonical chain.
CurrentHeader() *types.Header
}
// ID is a fork identifier as defined by EIP-2124.
type ID struct {
Hash [4]byte // CRC32 checksum of the genesis block and passed fork block numbers
Next uint64 // Block number of the next upcoming fork, or 0 if no forks are known
}
// Filter is a fork id filter to validate a remotely advertised ID.
type Filter func(id ID) error
// NewID calculates the Ethereum fork ID from the chain config, genesis hash, and head.
func NewID(config *params.ChainConfig, genesis common.Hash, head uint64) ID {
// Calculate the starting checksum from the genesis hash
hash := crc32.ChecksumIEEE(genesis[:])
// Calculate the current fork checksum and the next fork block
var next uint64
for _, fork := range gatherForks(config) {
if fork <= head {
// Fork already passed, checksum the previous hash and the fork number
hash = checksumUpdate(hash, fork)
continue
}
next = fork
break
}
return ID{Hash: checksumToBytes(hash), Next: next}
}
// NewIDWithChain calculates the Ethereum fork ID from an existing chain instance.
func NewIDWithChain(chain Blockchain) ID {
return NewID(
chain.Config(),
chain.Genesis().Hash(),
chain.CurrentHeader().Number.Uint64(),
)
}
// NewFilter creates a filter that returns if a fork ID should be rejected or not
// based on the local chain's status.
func NewFilter(chain Blockchain) Filter {
return newFilter(
chain.Config(),
chain.Genesis().Hash(),
func() uint64 {
return chain.CurrentHeader().Number.Uint64()
},
)
}
// NewStaticFilter creates a filter at block zero.
func NewStaticFilter(config *params.ChainConfig, genesis common.Hash) Filter {
head := func() uint64 { return 0 }
return newFilter(config, genesis, head)
}
// newFilter is the internal version of NewFilter, taking closures as its arguments
// instead of a chain. The reason is to allow testing it without having to simulate
// an entire blockchain.
func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() uint64) Filter {
// Calculate the all the valid fork hash and fork next combos
var (
forks = gatherForks(config)
sums = make([][4]byte, len(forks)+1) // 0th is the genesis
)
hash := crc32.ChecksumIEEE(genesis[:])
sums[0] = checksumToBytes(hash)
for i, fork := range forks {
hash = checksumUpdate(hash, fork)
sums[i+1] = checksumToBytes(hash)
}
// Add two sentries to simplify the fork checks and don't require special
// casing the last one.
forks = append(forks, math.MaxUint64) // Last fork will never be passed
// Create a validator that will filter out incompatible chains
return func(id ID) error {
// Run the fork checksum validation ruleset:
// 1. If local and remote FORK_CSUM matches, compare local head to FORK_NEXT.
// The two nodes are in the same fork state currently. They might know
// of differing future forks, but that's not relevant until the fork
// triggers (might be postponed, nodes might be updated to match).
// 1a. A remotely announced but remotely not passed block is already passed
// locally, disconnect, since the chains are incompatible.
// 1b. No remotely announced fork; or not yet passed locally, connect.
// 2. If the remote FORK_CSUM is a subset of the local past forks and the
// remote FORK_NEXT matches with the locally following fork block number,
// connect.
// Remote node is currently syncing. It might eventually diverge from
// us, but at this current point in time we don't have enough information.
// 3. If the remote FORK_CSUM is a superset of the local past forks and can
// be completed with locally known future forks, connect.
// Local node is currently syncing. It might eventually diverge from
// the remote, but at this current point in time we don't have enough
// information.
// 4. Reject in all other cases.
head := headfn()
for i, fork := range forks {
// If our head is beyond this fork, continue to the next (we have a dummy
// fork of maxuint64 as the last item to always fail this check eventually).
if head >= fork {
continue
}
// Found the first unpassed fork block, check if our current state matches
// the remote checksum (rule #1).
if sums[i] == id.Hash {
// Fork checksum matched, check if a remote future fork block already passed
// locally without the local node being aware of it (rule #1a).
if id.Next > 0 && head >= id.Next {
return ErrLocalIncompatibleOrStale
}
// Haven't passed locally a remote-only fork, accept the connection (rule #1b).
return nil
}
// The local and remote nodes are in different forks currently, check if the
// remote checksum is a subset of our local forks (rule #2).
for j := 0; j < i; j++ {
if sums[j] == id.Hash {
// Remote checksum is a subset, validate based on the announced next fork
if forks[j] != id.Next {
return ErrRemoteStale
}
return nil
}
}
// Remote chain is not a subset of our local one, check if it's a superset by
// any chance, signalling that we're simply out of sync (rule #3).
for j := i + 1; j < len(sums); j++ {
if sums[j] == id.Hash {
// Yay, remote checksum is a superset, ignore upcoming forks
return nil
}
}
// No exact, subset or superset match. We are on differing chains, reject.
return ErrLocalIncompatibleOrStale
}
log.Error("Impossible fork ID validation", "id", id)
return nil // Something's very wrong, accept rather than reject
}
}
// checksumUpdate calculates the next IEEE CRC32 checksum based on the previous
// one and a fork block number (equivalent to CRC32(original-blob || fork)).
func checksumUpdate(hash uint32, fork uint64) uint32 {
var blob [8]byte
binary.BigEndian.PutUint64(blob[:], fork)
return crc32.Update(hash, crc32.IEEETable, blob[:])
}
// checksumToBytes converts a uint32 checksum into a [4]byte array.
func checksumToBytes(hash uint32) [4]byte {
var blob [4]byte
binary.BigEndian.PutUint32(blob[:], hash)
return blob
}
// gatherForks gathers all the known forks and creates a sorted list out of them.
func gatherForks(config *params.ChainConfig) []uint64 {
// Gather all the fork block numbers via reflection
kind := reflect.TypeOf(params.ChainConfig{})
conf := reflect.ValueOf(config).Elem()
var forks []uint64
for i := 0; i < kind.NumField(); i++ {
// Fetch the next field and skip non-fork rules
field := kind.Field(i)
if !strings.HasSuffix(field.Name, "Block") {
continue
}
if field.Type != reflect.TypeOf(new(big.Int)) {
continue
}
// Extract the fork rule block number and aggregate it
rule := conf.Field(i).Interface().(*big.Int)
if rule != nil {
forks = append(forks, rule.Uint64())
}
}
// Sort the fork block numbers to permit chronological XOR
for i := 0; i < len(forks); i++ {
for j := i + 1; j < len(forks); j++ {
if forks[i] > forks[j] {
forks[i], forks[j] = forks[j], forks[i]
}
}
}
// Deduplicate block numbers applying multiple forks
for i := 1; i < len(forks); i++ {
if forks[i] == forks[i-1] {
forks = append(forks[:i], forks[i+1:]...)
i--
}
}
// Skip any forks in block 0, that's the genesis ruleset
if len(forks) > 0 && forks[0] == 0 {
forks = forks[1:]
}
return forks
}

View File

@ -39,8 +39,8 @@ import (
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
) )
//go:generate gencodec -type Genesis -field-override genesisSpecMarshaling -out gen_genesis.go //go:generate go run github.com/fjl/gencodec -type Genesis -field-override genesisSpecMarshaling -out gen_genesis.go
//go:generate gencodec -type GenesisAccount -field-override genesisAccountMarshaling -out gen_genesis_account.go //go:generate go run github.com/fjl/gencodec -type GenesisAccount -field-override genesisAccountMarshaling -out gen_genesis_account.go
var errGenesisNoConfig = errors.New("genesis has no chain configuration") var errGenesisNoConfig = errors.New("genesis has no chain configuration")
@ -65,6 +65,41 @@ type Genesis struct {
BaseFee *big.Int `json:"baseFeePerGas"` BaseFee *big.Int `json:"baseFeePerGas"`
} }
func ReadGenesis(db ethdb.Database) (*Genesis, error) {
var genesis Genesis
stored := rawdb.ReadCanonicalHash(db, 0)
if (stored == common.Hash{}) {
return nil, fmt.Errorf("invalid genesis hash in database: %x", stored)
}
blob := rawdb.ReadGenesisStateSpec(db, stored)
if blob == nil {
return nil, fmt.Errorf("genesis state missing from db")
}
if len(blob) != 0 {
if err := genesis.Alloc.UnmarshalJSON(blob); err != nil {
return nil, fmt.Errorf("could not unmarshal genesis state json: %s", err)
}
}
genesis.Config = rawdb.ReadChainConfig(db, stored)
if genesis.Config == nil {
return nil, fmt.Errorf("genesis config missing from db")
}
genesisBlock := rawdb.ReadBlock(db, stored, 0)
if genesisBlock == nil {
return nil, fmt.Errorf("genesis block missing from db")
}
genesisHeader := genesisBlock.Header()
genesis.Nonce = genesisHeader.Nonce.Uint64()
genesis.Timestamp = genesisHeader.Time
genesis.ExtraData = genesisHeader.Extra
genesis.GasLimit = genesisHeader.GasLimit
genesis.Difficulty = genesisHeader.Difficulty
genesis.Mixhash = genesisHeader.MixDigest
genesis.Coinbase = genesisHeader.Coinbase
return &genesis, nil
}
// GenesisAlloc specifies the initial state that is part of the genesis block. // GenesisAlloc specifies the initial state that is part of the genesis block.
type GenesisAlloc map[common.Address]GenesisAccount type GenesisAlloc map[common.Address]GenesisAccount
@ -80,6 +115,96 @@ func (ga *GenesisAlloc) UnmarshalJSON(data []byte) error {
return nil return nil
} }
// deriveHash computes the state root according to the genesis specification.
func (ga *GenesisAlloc) deriveHash() (common.Hash, error) {
// Create an ephemeral in-memory database for computing hash,
// all the derived states will be discarded to not pollute disk.
db := state.NewDatabase(rawdb.NewMemoryDatabase())
statedb, err := state.New(common.Hash{}, db, nil)
if err != nil {
return common.Hash{}, err
}
for addr, account := range *ga {
statedb.AddBalance(addr, account.Balance)
statedb.SetCode(addr, account.Code)
statedb.SetNonce(addr, account.Nonce)
for key, value := range account.Storage {
statedb.SetState(addr, key, value)
}
}
return statedb.Commit(false)
}
// flush is very similar with deriveHash, but the main difference is
// all the generated states will be persisted into the given database.
// Also, the genesis state specification will be flushed as well.
func (ga *GenesisAlloc) flush(db ethdb.Database) error {
statedb, err := state.New(common.Hash{}, state.NewDatabaseWithConfig(db, &trie.Config{Preimages: true}), nil)
if err != nil {
return err
}
for addr, account := range *ga {
statedb.AddBalance(addr, account.Balance)
statedb.SetCode(addr, account.Code)
statedb.SetNonce(addr, account.Nonce)
for key, value := range account.Storage {
statedb.SetState(addr, key, value)
}
}
root, err := statedb.Commit(false)
if err != nil {
return err
}
err = statedb.Database().TrieDB().Commit(root, true, nil)
if err != nil {
return err
}
// Marshal the genesis state specification and persist.
blob, err := json.Marshal(ga)
if err != nil {
return err
}
rawdb.WriteGenesisStateSpec(db, root, blob)
return nil
}
// CommitGenesisState loads the stored genesis state with the given block
// hash and commits them into the given database handler.
func CommitGenesisState(db ethdb.Database, hash common.Hash) error {
var alloc GenesisAlloc
blob := rawdb.ReadGenesisStateSpec(db, hash)
if len(blob) != 0 {
if err := alloc.UnmarshalJSON(blob); err != nil {
return err
}
} else {
// Genesis allocation is missing and there are several possibilities:
// the node is legacy which doesn't persist the genesis allocation or
// the persisted allocation is just lost.
// - supported networks(mainnet, testnets), recover with defined allocations
// - private network, can't recover
var genesis *Genesis
switch hash {
case params.MainnetGenesisHash:
genesis = DefaultGenesisBlock()
case params.RopstenGenesisHash:
genesis = DefaultRopstenGenesisBlock()
case params.RinkebyGenesisHash:
genesis = DefaultRinkebyGenesisBlock()
case params.GoerliGenesisHash:
genesis = DefaultGoerliGenesisBlock()
case params.SepoliaGenesisHash:
genesis = DefaultSepoliaGenesisBlock()
}
if genesis != nil {
alloc = genesis.Alloc
} else {
return errors.New("not found")
}
}
return alloc.flush(db)
}
// GenesisAccount is an account in the state of the genesis block. // GenesisAccount is an account in the state of the genesis block.
type GenesisAccount struct { type GenesisAccount struct {
Code []byte `json:"code,omitempty"` Code []byte `json:"code,omitempty"`
@ -121,7 +246,6 @@ func (h *storageJSON) UnmarshalText(text []byte) error {
} }
offset := len(h) - len(text)/2 // pad on the left offset := len(h) - len(text)/2 // pad on the left
if _, err := hex.Decode(h[offset:], text); err != nil { if _, err := hex.Decode(h[offset:], text); err != nil {
fmt.Println(err)
return fmt.Errorf("invalid hex storage key/value %q", text) return fmt.Errorf("invalid hex storage key/value %q", text)
} }
return nil return nil
@ -141,6 +265,12 @@ func (e *GenesisMismatchError) Error() string {
return fmt.Sprintf("database contains incompatible genesis (have %x, new %x)", e.Stored, e.New) return fmt.Sprintf("database contains incompatible genesis (have %x, new %x)", e.Stored, e.New)
} }
// ChainOverrides contains the changes to chain config.
type ChainOverrides struct {
OverrideTerminalTotalDifficulty *big.Int
OverrideTerminalTotalDifficultyPassed *bool
}
// SetupGenesisBlock writes or updates the genesis block in db. // SetupGenesisBlock writes or updates the genesis block in db.
// The block that will be used is: // The block that will be used is:
// //
@ -158,10 +288,22 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
return SetupGenesisBlockWithOverride(db, genesis, nil) return SetupGenesisBlockWithOverride(db, genesis, nil)
} }
func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideLondon *big.Int) (*params.ChainConfig, common.Hash, error) { func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrides *ChainOverrides) (*params.ChainConfig, common.Hash, error) {
if genesis != nil && genesis.Config == nil { if genesis != nil && genesis.Config == nil {
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
} }
applyOverrides := func(config *params.ChainConfig) {
if config != nil {
if overrides != nil && overrides.OverrideTerminalTotalDifficulty != nil {
config.TerminalTotalDifficulty = overrides.OverrideTerminalTotalDifficulty
}
if overrides != nil && overrides.OverrideTerminalTotalDifficultyPassed != nil {
config.TerminalTotalDifficultyPassed = *overrides.OverrideTerminalTotalDifficultyPassed
}
}
}
// Just commit the new block if there is no stored genesis block. // Just commit the new block if there is no stored genesis block.
stored := rawdb.ReadCanonicalHash(db, 0) stored := rawdb.ReadCanonicalHash(db, 0)
if (stored == common.Hash{}) { if (stored == common.Hash{}) {
@ -175,6 +317,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
if err != nil { if err != nil {
return genesis.Config, common.Hash{}, err return genesis.Config, common.Hash{}, err
} }
applyOverrides(genesis.Config)
return genesis.Config, block.Hash(), nil return genesis.Config, block.Hash(), nil
} }
// We have the genesis block in database(perhaps in ancient database) // We have the genesis block in database(perhaps in ancient database)
@ -185,7 +328,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
genesis = DefaultGenesisBlock() genesis = DefaultGenesisBlock()
} }
// Ensure the stored genesis matches with the given one. // Ensure the stored genesis matches with the given one.
hash := genesis.ToBlock(nil).Hash() hash := genesis.ToBlock().Hash()
if hash != stored { if hash != stored {
return genesis.Config, hash, &GenesisMismatchError{stored, hash} return genesis.Config, hash, &GenesisMismatchError{stored, hash}
} }
@ -193,20 +336,19 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
if err != nil { if err != nil {
return genesis.Config, hash, err return genesis.Config, hash, err
} }
applyOverrides(genesis.Config)
return genesis.Config, block.Hash(), nil return genesis.Config, block.Hash(), nil
} }
// Check whether the genesis block is already written. // Check whether the genesis block is already written.
if genesis != nil { if genesis != nil {
hash := genesis.ToBlock(nil).Hash() hash := genesis.ToBlock().Hash()
if hash != stored { if hash != stored {
return genesis.Config, hash, &GenesisMismatchError{stored, hash} return genesis.Config, hash, &GenesisMismatchError{stored, hash}
} }
} }
// Get the existing chain configuration. // Get the existing chain configuration.
newcfg := genesis.configOrDefault(stored) newcfg := genesis.configOrDefault(stored)
if overrideLondon != nil { applyOverrides(newcfg)
newcfg.LondonBlock = overrideLondon
}
if err := newcfg.CheckConfigForkOrder(); err != nil { if err := newcfg.CheckConfigForkOrder(); err != nil {
return newcfg, common.Hash{}, err return newcfg, common.Hash{}, err
} }
@ -216,11 +358,15 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
rawdb.WriteChainConfig(db, stored, newcfg) rawdb.WriteChainConfig(db, stored, newcfg)
return newcfg, stored, nil return newcfg, stored, nil
} }
// Special case: don't change the existing config of a non-mainnet chain if no new storedData, _ := json.Marshal(storedcfg)
// config is supplied. These chains would get AllProtocolChanges (and a compat error) // Special case: if a private network is being used (no genesis and also no
// if we just continued here. // mainnet hash in the database), we must not apply the `configOrDefault`
// chain config as that would be AllProtocolChanges (applying any new fork
// on top of an existing private network genesis block). In that case, only
// apply the overrides.
if genesis == nil && stored != params.MainnetGenesisHash { if genesis == nil && stored != params.MainnetGenesisHash {
return storedcfg, stored, nil newcfg = storedcfg
applyOverrides(newcfg)
} }
// Check config compatibility and write the config. Compatibility errors // Check config compatibility and write the config. Compatibility errors
// are returned to the caller unless we're already at block zero. // are returned to the caller unless we're already at block zero.
@ -232,10 +378,49 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
if compatErr != nil && *height != 0 && compatErr.RewindTo != 0 { if compatErr != nil && *height != 0 && compatErr.RewindTo != 0 {
return newcfg, stored, compatErr return newcfg, stored, compatErr
} }
// Don't overwrite if the old is identical to the new
if newData, _ := json.Marshal(newcfg); !bytes.Equal(storedData, newData) {
rawdb.WriteChainConfig(db, stored, newcfg) rawdb.WriteChainConfig(db, stored, newcfg)
}
return newcfg, stored, nil return newcfg, stored, nil
} }
// LoadCliqueConfig loads the stored clique config if the chain config
// is already present in database, otherwise, return the config in the
// provided genesis specification. Note the returned clique config can
// be nil if we are not in the clique network.
func LoadCliqueConfig(db ethdb.Database, genesis *Genesis) (*params.CliqueConfig, error) {
// Load the stored chain config from the database. It can be nil
// in case the database is empty. Notably, we only care about the
// chain config corresponds to the canonical chain.
stored := rawdb.ReadCanonicalHash(db, 0)
if stored != (common.Hash{}) {
storedcfg := rawdb.ReadChainConfig(db, stored)
if storedcfg != nil {
return storedcfg.Clique, nil
}
}
// Load the clique config from the provided genesis specification.
if genesis != nil {
// Reject invalid genesis spec without valid chain config
if genesis.Config == nil {
return nil, errGenesisNoConfig
}
// If the canonical genesis header is present, but the chain
// config is missing(initialize the empty leveldb with an
// external ancient chain segment), ensure the provided genesis
// is matched.
if stored != (common.Hash{}) && genesis.ToBlock().Hash() != stored {
return nil, &GenesisMismatchError{stored, genesis.ToBlock().Hash()}
}
return genesis.Config.Clique, nil
}
// There is no stored chain config and no new config provided,
// In this case the default chain config(mainnet) will be used,
// namely ethash is the specified consensus engine, return nil.
return nil, nil
}
func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig { func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
switch { switch {
case g != nil: case g != nil:
@ -244,36 +429,25 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
return params.MainnetChainConfig return params.MainnetChainConfig
case ghash == params.RopstenGenesisHash: case ghash == params.RopstenGenesisHash:
return params.RopstenChainConfig return params.RopstenChainConfig
case ghash == params.SepoliaGenesisHash:
return params.SepoliaChainConfig
case ghash == params.RinkebyGenesisHash: case ghash == params.RinkebyGenesisHash:
return params.RinkebyChainConfig return params.RinkebyChainConfig
case ghash == params.GoerliGenesisHash: case ghash == params.GoerliGenesisHash:
return params.GoerliChainConfig return params.GoerliChainConfig
case ghash == params.CalaverasGenesisHash: case ghash == params.KilnGenesisHash:
return params.CalaverasChainConfig return DefaultKilnGenesisBlock().Config
default: default:
return params.AllEthashProtocolChanges return params.AllEthashProtocolChanges
} }
} }
// ToBlock creates the genesis block and writes state of a genesis specification // ToBlock returns the genesis block according to genesis specification.
// to the given database (or discards it if nil). func (g *Genesis) ToBlock() *types.Block {
func (g *Genesis) ToBlock(db ethdb.Database) *types.Block { root, err := g.Alloc.deriveHash()
if db == nil {
db = rawdb.NewMemoryDatabase()
}
statedb, err := state.New(common.Hash{}, state.NewDatabase(db), nil)
if err != nil { if err != nil {
panic(err) panic(err)
} }
for addr, account := range g.Alloc {
statedb.AddBalance(addr, account.Balance)
statedb.SetCode(addr, account.Code)
statedb.SetNonce(addr, account.Nonce)
for key, value := range account.Storage {
statedb.SetState(addr, key, value)
}
}
root := statedb.IntermediateRoot(false)
head := &types.Header{ head := &types.Header{
Number: new(big.Int).SetUint64(g.Number), Number: new(big.Int).SetUint64(g.Number),
Nonce: types.EncodeNonce(g.Nonce), Nonce: types.EncodeNonce(g.Nonce),
@ -291,7 +465,7 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
if g.GasLimit == 0 { if g.GasLimit == 0 {
head.GasLimit = params.GenesisGasLimit head.GasLimit = params.GenesisGasLimit
} }
if g.Difficulty == nil { if g.Difficulty == nil && g.Mixhash == (common.Hash{}) {
head.Difficulty = params.GenesisDifficulty head.Difficulty = params.GenesisDifficulty
} }
if g.Config != nil && g.Config.IsLondon(common.Big0) { if g.Config != nil && g.Config.IsLondon(common.Big0) {
@ -301,18 +475,15 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
head.BaseFee = new(big.Int).SetUint64(params.InitialBaseFee) head.BaseFee = new(big.Int).SetUint64(params.InitialBaseFee)
} }
} }
statedb.Commit(false)
statedb.Database().TrieDB().Commit(root, true, nil)
return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil)) return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil))
} }
// Commit writes the block and state of a genesis specification to the database. // Commit writes the block and state of a genesis specification to the database.
// The block is committed as the canonical head block. // The block is committed as the canonical head block.
func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) { func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
block := g.ToBlock(db) block := g.ToBlock()
if block.Number().Sign() != 0 { if block.Number().Sign() != 0 {
return nil, fmt.Errorf("can't commit genesis block with number > 0") return nil, errors.New("can't commit genesis block with number > 0")
} }
config := g.Config config := g.Config
if config == nil { if config == nil {
@ -321,7 +492,16 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
if err := config.CheckConfigForkOrder(); err != nil { if err := config.CheckConfigForkOrder(); err != nil {
return nil, err return nil, err
} }
rawdb.WriteTd(db, block.Hash(), block.NumberU64(), g.Difficulty) if config.Clique != nil && len(block.Extra()) < 32+crypto.SignatureLength {
return nil, errors.New("can't start clique chain without signers")
}
// All the checks has passed, flush the states derived from the genesis
// specification as well as the specification itself into the provided
// database.
if err := g.Alloc.flush(db); err != nil {
return nil, err
}
rawdb.WriteTd(db, block.Hash(), block.NumberU64(), block.Difficulty())
rawdb.WriteBlock(db, block) rawdb.WriteBlock(db, block)
rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil) rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil)
rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64()) rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64())
@ -342,15 +522,6 @@ func (g *Genesis) MustCommit(db ethdb.Database) *types.Block {
return block return block
} }
// GenesisBlockForTesting creates and writes a block in which addr has the given wei balance.
func GenesisBlockForTesting(db ethdb.Database, addr common.Address, balance *big.Int) *types.Block {
g := Genesis{
Alloc: GenesisAlloc{addr: {Balance: balance}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
return g.MustCommit(db)
}
// DefaultGenesisBlock returns the Ethereum main net genesis block. // DefaultGenesisBlock returns the Ethereum main net genesis block.
func DefaultGenesisBlock() *Genesis { func DefaultGenesisBlock() *Genesis {
return &Genesis{ return &Genesis{
@ -399,20 +570,31 @@ func DefaultGoerliGenesisBlock() *Genesis {
} }
} }
func DefaultCalaverasGenesisBlock() *Genesis { // DefaultSepoliaGenesisBlock returns the Sepolia network genesis block.
// Full genesis: https://gist.github.com/holiman/c6ed9269dce28304ad176314caa75e97 func DefaultSepoliaGenesisBlock() *Genesis {
return &Genesis{ return &Genesis{
Config: params.CalaverasChainConfig, Config: params.SepoliaChainConfig,
Timestamp: 0x60b3877f, Nonce: 0,
ExtraData: hexutil.MustDecode("0x00000000000000000000000000000000000000000000000000000000000000005211cea3870c7ba7c6c44b185e62eecdb864cd8c560228ce57d31efbf64c200b2c200aacec78cf17a7148e784fe95a7a750335f8b9572ee28d72e7650000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), ExtraData: []byte("Sepolia, Athens, Attica, Greece!"),
GasLimit: 0x47b760, GasLimit: 0x1c9c380,
Difficulty: big.NewInt(1), Difficulty: big.NewInt(0x20000),
Alloc: decodePrealloc(calaverasAllocData), Timestamp: 1633267481,
Alloc: decodePrealloc(sepoliaAllocData),
} }
} }
// DefaultKilnGenesisBlock returns the kiln network genesis block.
func DefaultKilnGenesisBlock() *Genesis {
g := new(Genesis)
reader := strings.NewReader(KilnAllocData)
if err := json.NewDecoder(reader).Decode(g); err != nil {
panic(err)
}
return g
}
// DeveloperGenesisBlock returns the 'geth --dev' genesis block. // DeveloperGenesisBlock returns the 'geth --dev' genesis block.
func DeveloperGenesisBlock(period uint64, faucet common.Address) *Genesis { func DeveloperGenesisBlock(period uint64, gasLimit uint64, faucet common.Address) *Genesis {
// Override the default period to the user requested one // Override the default period to the user requested one
config := *params.AllCliqueProtocolChanges config := *params.AllCliqueProtocolChanges
config.Clique = &params.CliqueConfig{ config.Clique = &params.CliqueConfig{
@ -424,7 +606,7 @@ func DeveloperGenesisBlock(period uint64, faucet common.Address) *Genesis {
return &Genesis{ return &Genesis{
Config: &config, Config: &config,
ExtraData: append(append(make([]byte, 32), faucet[:]...), make([]byte, crypto.SignatureLength)...), ExtraData: append(append(make([]byte, 32), faucet[:]...), make([]byte, crypto.SignatureLength)...),
GasLimit: 11500000, GasLimit: gasLimit,
BaseFee: big.NewInt(params.InitialBaseFee), BaseFee: big.NewInt(params.InitialBaseFee),
Difficulty: big.NewInt(1), Difficulty: big.NewInt(1),
Alloc: map[common.Address]GenesisAccount{ Alloc: map[common.Address]GenesisAccount{

File diff suppressed because one or more lines are too long

View File

@ -33,6 +33,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
lru "github.com/hashicorp/golang-lru" lru "github.com/hashicorp/golang-lru"
) )
@ -49,7 +50,7 @@ const (
// HeaderChain is responsible for maintaining the header chain including the // HeaderChain is responsible for maintaining the header chain including the
// header query and updating. // header query and updating.
// //
// The components maintained by headerchain includes: (1) total difficult // The components maintained by headerchain includes: (1) total difficulty
// (2) header (3) block hash -> number mapping (4) canonical number -> hash mapping // (2) header (3) block hash -> number mapping (4) canonical number -> hash mapping
// and (5) head header flag. // and (5) head header flag.
// //
@ -57,7 +58,6 @@ const (
// the necessary mutex locking/unlocking. // the necessary mutex locking/unlocking.
type HeaderChain struct { type HeaderChain struct {
config *params.ChainConfig config *params.ChainConfig
chainDb ethdb.Database chainDb ethdb.Database
genesisHeader *types.Header genesisHeader *types.Header
@ -86,7 +86,6 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c
if err != nil { if err != nil {
return nil, err return nil, err
} }
hc := &HeaderChain{ hc := &HeaderChain{
config: config, config: config,
chainDb: chainDb, chainDb: chainDb,
@ -97,12 +96,10 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c
rand: mrand.New(mrand.NewSource(seed.Int64())), rand: mrand.New(mrand.NewSource(seed.Int64())),
engine: engine, engine: engine,
} }
hc.genesisHeader = hc.GetHeaderByNumber(0) hc.genesisHeader = hc.GetHeaderByNumber(0)
if hc.genesisHeader == nil { if hc.genesisHeader == nil {
return nil, ErrNoGenesis return nil, ErrNoGenesis
} }
hc.currentHeader.Store(hc.genesisHeader) hc.currentHeader.Store(hc.genesisHeader)
if head := rawdb.ReadHeadBlockHash(chainDb); head != (common.Hash{}) { if head := rawdb.ReadHeadBlockHash(chainDb); head != (common.Hash{}) {
if chead := hc.GetHeaderByHash(head); chead != nil { if chead := hc.GetHeaderByHash(head); chead != nil {
@ -111,7 +108,6 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c
} }
hc.currentHeaderHash = hc.CurrentHeader().Hash() hc.currentHeaderHash = hc.CurrentHeader().Hash()
headHeaderGauge.Update(hc.CurrentHeader().Number.Int64()) headHeaderGauge.Update(hc.CurrentHeader().Number.Int64())
return hc, nil return hc, nil
} }
@ -137,35 +133,93 @@ type headerWriteResult struct {
lastHeader *types.Header lastHeader *types.Header
} }
// WriteHeaders writes a chain of headers into the local chain, given that the parents // Reorg reorgs the local canonical chain into the specified chain. The reorg
// are already known. If the total difficulty of the newly inserted chain becomes // can be classified into two cases: (a) extend the local chain (b) switch the
// greater than the current known TD, the canonical chain is reorged. // head to the given header.
// func (hc *HeaderChain) Reorg(headers []*types.Header) error {
// Note: This method is not concurrent-safe with inserting blocks simultaneously // Short circuit if nothing to reorg.
// into the chain, as side effects caused by reorganisations cannot be emulated
// without the real blocks. Hence, writing headers directly should only be done
// in two scenarios: pure-header mode of operation (light clients), or properly
// separated header/block phases (non-archive clients).
func (hc *HeaderChain) writeHeaders(headers []*types.Header) (result *headerWriteResult, err error) {
if len(headers) == 0 { if len(headers) == 0 {
return &headerWriteResult{}, nil return nil
}
// If the parent of the (first) block is already the canon header,
// we don't have to go backwards to delete canon blocks, but simply
// pile them onto the existing chain. Otherwise, do the necessary
// reorgs.
var (
first = headers[0]
last = headers[len(headers)-1]
batch = hc.chainDb.NewBatch()
)
if first.ParentHash != hc.currentHeaderHash {
// Delete any canonical number assignments above the new head
for i := last.Number.Uint64() + 1; ; i++ {
hash := rawdb.ReadCanonicalHash(hc.chainDb, i)
if hash == (common.Hash{}) {
break
}
rawdb.DeleteCanonicalHash(batch, i)
}
// Overwrite any stale canonical number assignments, going
// backwards from the first header in this import until the
// cross link between two chains.
var (
header = first
headNumber = header.Number.Uint64()
headHash = header.Hash()
)
for rawdb.ReadCanonicalHash(hc.chainDb, headNumber) != headHash {
rawdb.WriteCanonicalHash(batch, headHash, headNumber)
if headNumber == 0 {
break // It shouldn't be reached
}
headHash, headNumber = header.ParentHash, header.Number.Uint64()-1
header = hc.GetHeader(headHash, headNumber)
if header == nil {
return fmt.Errorf("missing parent %d %x", headNumber, headHash)
}
}
}
// Extend the canonical chain with the new headers
for i := 0; i < len(headers)-1; i++ {
hash := headers[i+1].ParentHash // Save some extra hashing
num := headers[i].Number.Uint64()
rawdb.WriteCanonicalHash(batch, hash, num)
rawdb.WriteHeadHeaderHash(batch, hash)
}
// Write the last header
hash := headers[len(headers)-1].Hash()
num := headers[len(headers)-1].Number.Uint64()
rawdb.WriteCanonicalHash(batch, hash, num)
rawdb.WriteHeadHeaderHash(batch, hash)
if err := batch.Write(); err != nil {
return err
}
// Last step update all in-memory head header markers
hc.currentHeaderHash = last.Hash()
hc.currentHeader.Store(types.CopyHeader(last))
headHeaderGauge.Update(last.Number.Int64())
return nil
}
// WriteHeaders writes a chain of headers into the local chain, given that the
// parents are already known. The chain head header won't be updated in this
// function, the additional SetCanonical is expected in order to finish the entire
// procedure.
func (hc *HeaderChain) WriteHeaders(headers []*types.Header) (int, error) {
if len(headers) == 0 {
return 0, nil
} }
ptd := hc.GetTd(headers[0].ParentHash, headers[0].Number.Uint64()-1) ptd := hc.GetTd(headers[0].ParentHash, headers[0].Number.Uint64()-1)
if ptd == nil { if ptd == nil {
return &headerWriteResult{}, consensus.ErrUnknownAncestor return 0, consensus.ErrUnknownAncestor
} }
var ( var (
lastNumber = headers[0].Number.Uint64() - 1 // Last successfully imported number
lastHash = headers[0].ParentHash // Last imported header hash
newTD = new(big.Int).Set(ptd) // Total difficulty of inserted chain newTD = new(big.Int).Set(ptd) // Total difficulty of inserted chain
inserted []rawdb.NumberHash // Ephemeral lookup of number/hash for the chain
lastHeader *types.Header parentKnown = true // Set to true to force hc.HasHeader check the first iteration
inserted []numberHash // Ephemeral lookup of number/hash for the chain batch = hc.chainDb.NewBatch()
firstInserted = -1 // Index of the first non-ignored header
) )
batch := hc.chainDb.NewBatch()
parentKnown := true // Set to true to force hc.HasHeader check the first iteration
for i, header := range headers { for i, header := range headers {
var hash common.Hash var hash common.Hash
// The headers have already been validated at this point, so we already // The headers have already been validated at this point, so we already
@ -188,116 +242,67 @@ func (hc *HeaderChain) writeHeaders(headers []*types.Header) (result *headerWrit
hc.tdCache.Add(hash, new(big.Int).Set(newTD)) hc.tdCache.Add(hash, new(big.Int).Set(newTD))
rawdb.WriteHeader(batch, header) rawdb.WriteHeader(batch, header)
inserted = append(inserted, numberHash{number, hash}) inserted = append(inserted, rawdb.NumberHash{Number: number, Hash: hash})
hc.headerCache.Add(hash, header) hc.headerCache.Add(hash, header)
hc.numberCache.Add(hash, number) hc.numberCache.Add(hash, number)
if firstInserted < 0 {
firstInserted = i
}
} }
parentKnown = alreadyKnown parentKnown = alreadyKnown
lastHeader, lastHash, lastNumber = header, hash, number
} }
// Skip the slow disk write of all headers if interrupted. // Skip the slow disk write of all headers if interrupted.
if hc.procInterrupt() { if hc.procInterrupt() {
log.Debug("Premature abort during headers import") log.Debug("Premature abort during headers import")
return &headerWriteResult{}, errors.New("aborted") return 0, errors.New("aborted")
} }
// Commit to disk! // Commit to disk!
if err := batch.Write(); err != nil { if err := batch.Write(); err != nil {
log.Crit("Failed to write headers", "error", err) log.Crit("Failed to write headers", "error", err)
} }
batch.Reset() return len(inserted), nil
}
// writeHeadersAndSetHead writes a batch of block headers and applies the last
// header as the chain head if the fork choicer says it's ok to update the chain.
// Note: This method is not concurrent-safe with inserting blocks simultaneously
// into the chain, as side effects caused by reorganisations cannot be emulated
// without the real blocks. Hence, writing headers directly should only be done
// in two scenarios: pure-header mode of operation (light clients), or properly
// separated header/block phases (non-archive clients).
func (hc *HeaderChain) writeHeadersAndSetHead(headers []*types.Header, forker *ForkChoice) (*headerWriteResult, error) {
inserted, err := hc.WriteHeaders(headers)
if err != nil {
return nil, err
}
var ( var (
head = hc.CurrentHeader().Number.Uint64() lastHeader = headers[len(headers)-1]
localTD = hc.GetTd(hc.currentHeaderHash, head) lastHash = headers[len(headers)-1].Hash()
status = SideStatTy result = &headerWriteResult{
) status: NonStatTy,
// If the total difficulty is higher than our known, add it to the canonical chain ignored: len(headers) - inserted,
// Second clause in the if statement reduces the vulnerability to selfish mining. imported: inserted,
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
reorg := newTD.Cmp(localTD) > 0
if !reorg && newTD.Cmp(localTD) == 0 {
if lastNumber < head {
reorg = true
} else if lastNumber == head {
reorg = mrand.Float64() < 0.5
}
}
// If the parent of the (first) block is already the canon header,
// we don't have to go backwards to delete canon blocks, but
// simply pile them onto the existing chain
chainAlreadyCanon := headers[0].ParentHash == hc.currentHeaderHash
if reorg {
// If the header can be added into canonical chain, adjust the
// header chain markers(canonical indexes and head header flag).
//
// Note all markers should be written atomically.
markerBatch := batch // we can reuse the batch to keep allocs down
if !chainAlreadyCanon {
// Delete any canonical number assignments above the new head
for i := lastNumber + 1; ; i++ {
hash := rawdb.ReadCanonicalHash(hc.chainDb, i)
if hash == (common.Hash{}) {
break
}
rawdb.DeleteCanonicalHash(markerBatch, i)
}
// Overwrite any stale canonical number assignments, going
// backwards from the first header in this import
var (
headHash = headers[0].ParentHash // inserted[0].parent?
headNumber = headers[0].Number.Uint64() - 1 // inserted[0].num-1 ?
headHeader = hc.GetHeader(headHash, headNumber)
)
for rawdb.ReadCanonicalHash(hc.chainDb, headNumber) != headHash {
rawdb.WriteCanonicalHash(markerBatch, headHash, headNumber)
headHash = headHeader.ParentHash
headNumber = headHeader.Number.Uint64() - 1
headHeader = hc.GetHeader(headHash, headNumber)
}
// If some of the older headers were already known, but obtained canon-status
// during this import batch, then we need to write that now
// Further down, we continue writing the staus for the ones that
// were not already known
for i := 0; i < firstInserted; i++ {
hash := headers[i].Hash()
num := headers[i].Number.Uint64()
rawdb.WriteCanonicalHash(markerBatch, hash, num)
rawdb.WriteHeadHeaderHash(markerBatch, hash)
}
}
// Extend the canonical chain with the new headers
for _, hn := range inserted {
rawdb.WriteCanonicalHash(markerBatch, hn.hash, hn.number)
rawdb.WriteHeadHeaderHash(markerBatch, hn.hash)
}
if err := markerBatch.Write(); err != nil {
log.Crit("Failed to write header markers into disk", "err", err)
}
markerBatch.Reset()
// Last step update all in-memory head header markers
hc.currentHeaderHash = lastHash
hc.currentHeader.Store(types.CopyHeader(lastHeader))
headHeaderGauge.Update(lastHeader.Number.Int64())
// Chain status is canonical since this insert was a reorg.
// Note that all inserts which have higher TD than existing are 'reorg'.
status = CanonStatTy
}
if len(inserted) == 0 {
status = NonStatTy
}
return &headerWriteResult{
status: status,
ignored: len(headers) - len(inserted),
imported: len(inserted),
lastHash: lastHash, lastHash: lastHash,
lastHeader: lastHeader, lastHeader: lastHeader,
}, nil }
)
// Ask the fork choicer if the reorg is necessary
if reorg, err := forker.ReorgNeeded(hc.CurrentHeader(), lastHeader); err != nil {
return nil, err
} else if !reorg {
if inserted != 0 {
result.status = SideStatTy
}
return result, nil
}
// Special case, all the inserted headers are already on the canonical
// header chain, skip the reorg operation.
if hc.GetCanonicalHash(lastHeader.Number.Uint64()) == lastHash && lastHeader.Number.Uint64() <= hc.CurrentHeader().Number.Uint64() {
return result, nil
}
// Apply the reorg operation
if err := hc.Reorg(headers); err != nil {
return nil, err
}
result.status = CanonStatTy
return result, nil
} }
func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) (int, error) { func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
@ -315,11 +320,11 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int)
} }
// If the header is a banned one, straight out abort // If the header is a banned one, straight out abort
if BadHashes[chain[i].ParentHash] { if BadHashes[chain[i].ParentHash] {
return i - 1, ErrBlacklistedHash return i - 1, ErrBannedHash
} }
// If it's the last header in the cunk, we need to check it too // If it's the last header in the cunk, we need to check it too
if i == len(chain)-1 && BadHashes[chain[i].Hash()] { if i == len(chain)-1 && BadHashes[chain[i].Hash()] {
return i, ErrBlacklistedHash return i, ErrBannedHash
} }
} }
@ -357,7 +362,7 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int)
return 0, nil return 0, nil
} }
// InsertHeaderChain inserts the given headers. // InsertHeaderChain inserts the given headers and does the reorganisations.
// //
// The validity of the headers is NOT CHECKED by this method, i.e. they need to be // The validity of the headers is NOT CHECKED by this method, i.e. they need to be
// validated by ValidateHeaderChain before calling InsertHeaderChain. // validated by ValidateHeaderChain before calling InsertHeaderChain.
@ -367,20 +372,19 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int)
// //
// The returned 'write status' says if the inserted headers are part of the canonical chain // The returned 'write status' says if the inserted headers are part of the canonical chain
// or a side chain. // or a side chain.
func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, start time.Time) (WriteStatus, error) { func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, start time.Time, forker *ForkChoice) (WriteStatus, error) {
if hc.procInterrupt() { if hc.procInterrupt() {
return 0, errors.New("aborted") return 0, errors.New("aborted")
} }
res, err := hc.writeHeaders(chain) res, err := hc.writeHeadersAndSetHead(chain, forker)
if err != nil {
return 0, err
}
// Report some public statistics so the user has a clue what's going on // Report some public statistics so the user has a clue what's going on
context := []interface{}{ context := []interface{}{
"count", res.imported, "count", res.imported,
"elapsed", common.PrettyDuration(time.Since(start)), "elapsed", common.PrettyDuration(time.Since(start)),
} }
if err != nil {
context = append(context, "err", err)
}
if last := res.lastHeader; last != nil { if last := res.lastHeader; last != nil {
context = append(context, "number", last.Number, "hash", res.lastHash) context = append(context, "number", last.Number, "hash", res.lastHash)
if timestamp := time.Unix(int64(last.Time), 0); time.Since(timestamp) > time.Minute { if timestamp := time.Unix(int64(last.Time), 0); time.Since(timestamp) > time.Minute {
@ -394,29 +398,6 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, start time.Time)
return res.status, err return res.status, err
} }
// GetBlockHashesFromHash retrieves a number of block hashes starting at a given
// hash, fetching towards the genesis block.
func (hc *HeaderChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
// Get the origin header from which to fetch
header := hc.GetHeaderByHash(hash)
if header == nil {
return nil
}
// Iterate the headers until enough is collected or the genesis reached
chain := make([]common.Hash, 0, max)
for i := uint64(0); i < max; i++ {
next := header.ParentHash
if header = hc.GetHeader(next, header.Number.Uint64()-1); header == nil {
break
}
chain = append(chain, next)
if header.Number.Sign() == 0 {
break
}
}
return chain
}
// GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
// a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
// number of blocks to be individually checked before we reach the canonical chain. // number of blocks to be individually checked before we reach the canonical chain.
@ -472,16 +453,6 @@ func (hc *HeaderChain) GetTd(hash common.Hash, number uint64) *big.Int {
return td return td
} }
// GetTdByHash retrieves a block's total difficulty in the canonical chain from the
// database by hash, caching it if found.
func (hc *HeaderChain) GetTdByHash(hash common.Hash) *big.Int {
number := hc.GetBlockNumber(hash)
if number == nil {
return nil
}
return hc.GetTd(hash, *number)
}
// GetHeader retrieves a block header from the database by hash and number, // GetHeader retrieves a block header from the database by hash and number,
// caching it if found. // caching it if found.
func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header { func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header {
@ -528,6 +499,46 @@ func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header {
return hc.GetHeader(hash, number) return hc.GetHeader(hash, number)
} }
// GetHeadersFrom returns a contiguous segment of headers, in rlp-form, going
// backwards from the given number.
// If the 'number' is higher than the highest local header, this method will
// return a best-effort response, containing the headers that we do have.
func (hc *HeaderChain) GetHeadersFrom(number, count uint64) []rlp.RawValue {
// If the request is for future headers, we still return the portion of
// headers that we are able to serve
if current := hc.CurrentHeader().Number.Uint64(); current < number {
if count > number-current {
count -= number - current
number = current
} else {
return nil
}
}
var headers []rlp.RawValue
// If we have some of the headers in cache already, use that before going to db.
hash := rawdb.ReadCanonicalHash(hc.chainDb, number)
if hash == (common.Hash{}) {
return nil
}
for count > 0 {
header, ok := hc.headerCache.Get(hash)
if !ok {
break
}
h := header.(*types.Header)
rlpData, _ := rlp.EncodeToBytes(h)
headers = append(headers, rlpData)
hash = h.ParentHash
count--
number--
}
// Read remaining from db
if count > 0 {
headers = append(headers, rawdb.ReadHeaderRange(hc.chainDb, number, count)...)
}
return headers
}
func (hc *HeaderChain) GetCanonicalHash(number uint64) common.Hash { func (hc *HeaderChain) GetCanonicalHash(number uint64) common.Hash {
return rawdb.ReadCanonicalHash(hc.chainDb, number) return rawdb.ReadCanonicalHash(hc.chainDb, number)
} }

View File

@ -14,15 +14,14 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build none
// +build none // +build none
/* /*
The mkalloc tool creates the genesis allocation constants in genesis_alloc.go The mkalloc tool creates the genesis allocation constants in genesis_alloc.go
It outputs a const declaration that contains an RLP-encoded list of (address, balance) tuples. It outputs a const declaration that contains an RLP-encoded list of (address, balance) tuples.
go run mkalloc.go genesis.json go run mkalloc.go genesis.json
*/ */
package main package main

Some files were not shown because too many files have changed in this diff Show More