Update status-go, whisper and ethereum to our newest versions (#49)

This commit is contained in:
Dmitry Shulyak 2019-05-27 14:22:18 +03:00 committed by GitHub
parent bae0b78fa9
commit 86f1b48bcd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
293 changed files with 44720 additions and 13771 deletions

10
go.mod
View File

@ -8,7 +8,7 @@ require (
github.com/deckarep/golang-set v1.7.1 // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/elastic/gosigar v0.10.2 // indirect
github.com/ethereum/go-ethereum v1.8.20
github.com/ethereum/go-ethereum v1.8.23
github.com/fatih/color v1.7.0
github.com/fjl/memsize v0.0.0-20180929194037-2a09253e352a // indirect
github.com/go-playground/locales v0.12.1 // indirect
@ -17,7 +17,6 @@ require (
github.com/golang/mock v1.3.1
github.com/influxdata/influxdb v1.7.6 // indirect
github.com/jroimartin/gocui v0.4.0
github.com/jteeuwen/go-bindata v3.0.7+incompatible // indirect
github.com/karalabe/hid v0.0.0-20181128192157-d815e0c1a2e2 // indirect
github.com/leodido/go-urn v1.1.0 // indirect
github.com/mattn/go-runewidth v0.0.4 // indirect
@ -37,15 +36,14 @@ require (
github.com/status-im/doubleratchet v2.0.0+incompatible // indirect
github.com/status-im/migrate v0.0.0-20181227113212-3bde36291b5c
github.com/status-im/rendezvous v1.2.0 // indirect
github.com/status-im/status-go v0.23.0-beta.9
github.com/status-im/whisper v1.4.8
github.com/status-im/status-go v0.25.0-beta.2
github.com/status-im/whisper v1.4.13
github.com/stretchr/objx v0.1.1 // indirect
github.com/stretchr/testify v1.3.0
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f
golang.org/x/net v0.0.0-20190514140710-3ec191127204 // indirect
golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f // indirect
golang.org/x/text v0.3.2 // indirect
golang.org/x/tools v0.0.0-20190515035509-2196cb7019cc // indirect
gopkg.in/go-playground/assert.v1 v1.2.1 // indirect
gopkg.in/go-playground/validator.v9 v9.24.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
@ -54,4 +52,4 @@ require (
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
)
replace github.com/ethereum/go-ethereum v1.8.20 => github.com/status-im/go-ethereum v1.8.20
replace github.com/ethereum/go-ethereum v1.8.23 => github.com/status-im/go-ethereum v1.8.23

37
go.sum
View File

@ -3,7 +3,9 @@ github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOv
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y=
github.com/Microsoft/go-winio v0.4.9 h1:3RbgqgGVqmcpbOiwrjbVtDHLlJBGF6aE+yHmNtBNsFQ=
github.com/Microsoft/go-winio v0.4.9/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
github.com/Sirupsen/logrus v1.0.6 h1:HCAGQRk48dRVPA5Y+Yh0qdCSTzPOyU1tBJ7Q9YzotII=
github.com/Sirupsen/logrus v1.0.6/go.mod h1:rmk17hk6i8ZSAJkSDa7nOxamrG+SP4P0mm+DAvExv4U=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/allegro/bigcache v1.1.0 h1:MLuIKTjdxDc+qsG2rhjsYjsHQC5LUGjIWzutg7M+W68=
@ -51,10 +53,15 @@ github.com/deckarep/golang-set v1.7.1 h1:SCQV0S6gTtp6itiFrTqI+pfmJ4LN85S1YzhDf9r
github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ=
github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/docker/distribution v0.0.0-20180720172123-0dae0957e5fe h1:ZRQNMB7Sw5jf9g/0imDbI+vTFNk4J7qBdtFI5/zf1kg=
github.com/docker/distribution v0.0.0-20180720172123-0dae0957e5fe/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v0.0.0-20170502054910-90d35abf7b35 h1:ly3dRUfvdP5i/t9iqVHd2VQQIDtO3tpfFWPah7g4CFw=
github.com/docker/docker v0.0.0-20170502054910-90d35abf7b35/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.3.0 h1:3lOnM9cSzgGwx8VfK/NGOW5fLQ0GjIlCkaktF+n1M6o=
github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
@ -62,6 +69,7 @@ github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/elastic/gosigar v0.10.2 h1:ktO7B8lAQQCf/CieNp1lQzKnbijg3FbfQqFUfAe6h/A=
github.com/elastic/gosigar v0.10.2/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs=
github.com/ethereum/go-ethereum v1.8.20/go.mod h1:PwpWDrCLZrV+tfrhqqF6kPknbISMHaJv9Ln3kPCZLwY=
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fjl/memsize v0.0.0-20180929194037-2a09253e352a h1:1znxn4+q2MrEdTk1eCk6KIV3muTYVclBIB6CTVR/zBc=
@ -85,8 +93,6 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV
github.com/golang-migrate/migrate v3.5.4+incompatible h1:R7OzwvCJTCgwapPCiX6DyBiu2czIUMDCB118gFTKTUA=
github.com/golang-migrate/migrate v3.5.4+incompatible/go.mod h1:IsVUlFN5puWOmXrqjgGUfIRIbU7mr8oNBE2tyERd9Wk=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@ -155,8 +161,6 @@ github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/jroimartin/gocui v0.4.0 h1:52jnalstgmc25FmtGcWqa0tcbMEWS6RpFLsOIO+I+E8=
github.com/jroimartin/gocui v0.4.0/go.mod h1:7i7bbj99OgFHzo7kB2zPb8pXLqMBSQegY7azfqXMkyY=
github.com/jteeuwen/go-bindata v3.0.7+incompatible h1:91Uy4d9SYVr1kyTJ15wJsog+esAZZl7JmEfTkwmhJts=
github.com/jteeuwen/go-bindata v3.0.7+incompatible/go.mod h1:JVvhzYOiGBnFSYRyV00iY8q7/0PThjIYav1p9h5dmKs=
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/karalabe/hid v0.0.0-20181128192157-d815e0c1a2e2 h1:BkkpZxPVs3gIf+3Tejt8lWzuo2P29N1ChGUMEpuSJ8U=
github.com/karalabe/hid v0.0.0-20181128192157-d815e0c1a2e2/go.mod h1:YvbcH+3Wo6XPs9nkgTY3u19KXLauXW+J5nB7hEHuX0A=
@ -173,6 +177,7 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kshvakov/clickhouse v1.3.3/go.mod h1:DMzX7FxRymoNkVgizH0DWAL8Cur7wHLgx3MUnGwJqpE=
github.com/leodido/go-urn v1.1.0 h1:Sm1gr51B1kKyfD2BlRcLSiEkffoG96g6TPv6eRoEiB8=
github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw=
github.com/lib/pq v0.0.0-20180523175426-90697d60dd84 h1:it29sI2IM490luSc3RAhp5WuCYnc6RtbfLVAB7nmC5M=
github.com/lib/pq v0.0.0-20180523175426-90697d60dd84/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/libp2p/go-addr-util v0.0.1 h1:TpTQm9cXVRVSKsYbgQ7GKc3KbbHVTnbostgGaDEP+88=
github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ=
@ -314,6 +319,7 @@ github.com/onsi/gomega v1.4.1 h1:PZSj/UFNaVp3KxrzHOcS7oyuWA7LoOY/77yCTEFu21U=
github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
@ -345,6 +351,7 @@ github.com/russolsen/transit v0.0.0-20180705123435-0794b4c4505a h1:yVNJFSzkEG8sm
github.com/russolsen/transit v0.0.0-20180705123435-0794b4c4505a/go.mod h1:TPq+fcJOdGrkpZpXF4UVmFjYxH0gGqnxdgZ+OzAmvJk=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
github.com/sirupsen/logrus v1.0.6 h1:hcP1GmhGigz/O7h1WVUM5KklBp1JoNS9FggWKdj/j3s=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/smartystreets/assertions v0.0.0-20180820201707-7c9eb446e3cf/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
@ -357,18 +364,18 @@ github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/status-im/doubleratchet v2.0.0+incompatible h1:s77lF1lDubK0RKftxN2vH8G9gwtVVp13ggWfyY4O1q4=
github.com/status-im/doubleratchet v2.0.0+incompatible/go.mod h1:1sqR0+yhiM/bd+wrdX79AOt2csZuJOni0nUDzKNuqOU=
github.com/status-im/go-ethereum v1.8.20 h1:t5kpBwjgoDnfdHjPf1AnRE5R8KbtrCJqB6wWYJchqas=
github.com/status-im/go-ethereum v1.8.20/go.mod h1:rdSZTVW2L1V6dsRpW77o4gwqWRUYfb/e9nNHcIgVEO4=
github.com/status-im/go-ethereum v1.8.23 h1:HEAa0xhZ/Zoz/dNl+neOilNHQJzgYP+gk54hUBCrcb0=
github.com/status-im/go-ethereum v1.8.23/go.mod h1:rdSZTVW2L1V6dsRpW77o4gwqWRUYfb/e9nNHcIgVEO4=
github.com/status-im/go-multiaddr-ethv4 v1.1.0 h1:7+BdGNaqb66f/dO7mx/12LjaT+6wr4G8l4wvkONfctg=
github.com/status-im/go-multiaddr-ethv4 v1.1.0/go.mod h1:H/WC6o3//elWzBWBe/UhvLl4tqtseVLzO5SFNBtT5SU=
github.com/status-im/migrate v0.0.0-20181227113212-3bde36291b5c h1:GGnVxyL55FngDgRF+vOzWt/er/rXBctyG0iyH/n+anA=
github.com/status-im/migrate v0.0.0-20181227113212-3bde36291b5c/go.mod h1:dZ3YxYs9/ik9SOXvNX3Yjl8kcaDbN5b4vZH5efdK+bk=
github.com/status-im/rendezvous v1.2.0 h1:d3HysveJI9w3QFFIGf0rz1HHW51LMb0F0sgGDF+Qq+c=
github.com/status-im/rendezvous v1.2.0/go.mod h1:vUZNaCYr2AsFngV2IW5NsnPBRK/h+x57nsMINkhrVCQ=
github.com/status-im/status-go v0.23.0-beta.9 h1:ZeBtlWgVQuI4qG/u5ffsziMxmgL9iIXi+ou5HjaVbEs=
github.com/status-im/status-go v0.23.0-beta.9/go.mod h1:3ky7GKNvGXC46HmjcVnDe+3cCkzP+JTzPRz4d50AQ4w=
github.com/status-im/whisper v1.4.8 h1:YA7FS9F7SFYn/SvrQta0gAu206/HXX8JpwCmZImxn6E=
github.com/status-im/whisper v1.4.8/go.mod h1:WS6z39YJQ8WJa9s+DmTuEM/s2nVF6Iz3B1SZYw5cYf0=
github.com/status-im/status-go v0.25.0-beta.2 h1:7o0CaezNhdHtxxeBliY7EoboDyaatRcsVtoKkTgr2gs=
github.com/status-im/status-go v0.25.0-beta.2/go.mod h1:3ky7GKNvGXC46HmjcVnDe+3cCkzP+JTzPRz4d50AQ4w=
github.com/status-im/whisper v1.4.13 h1:V8aC66CkpTYFS/l9lSp5NW2xHOlpRnGZxKdJmDVLFxE=
github.com/status-im/whisper v1.4.13/go.mod h1:WS6z39YJQ8WJa9s+DmTuEM/s2nVF6Iz3B1SZYw5cYf0=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@ -417,15 +424,11 @@ golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190419010253-1f3472d942ba h1:h0zCzEL5UW1mERvwTN6AXcc75PpLkY6OcReia6Dq1BM=
golang.org/x/net v0.0.0-20190419010253-1f3472d942ba/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190514140710-3ec191127204 h1:4yG6GqBtw9C+UrLp6s2wtSniayy/Vd/3F7ffLE427XI=
golang.org/x/net v0.0.0-20190514140710-3ec191127204/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/oauth2 v0.0.0-20180724155351-3d292e4d0cdc/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180724212812-e072cadbbdc8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -436,8 +439,6 @@ golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190418153312-f0ce4c0180be h1:mI+jhqkn68ybP0ORJqunXn+fq+Eeb4hHKqLQcFICjAc=
golang.org/x/sys v0.0.0-20190418153312-f0ce4c0180be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f h1:Xab8gg26GrI/x3RNdVhVkHHM1XLyGeRBEvz4Q5x4YW8=
golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
@ -448,19 +449,19 @@ golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20180725002046-526516e9c4bf/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190515035509-2196cb7019cc h1:tbVkv02hKlra+M/i/CAnW0EBJbCikREdSVzxka4vio8=
golang.org/x/tools v0.0.0-20190515035509-2196cb7019cc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
google.golang.org/api v0.0.0-20180724000608-2c45710c7f3f/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/genproto v0.0.0-20180722052100-02b4e9547331/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/grpc v1.13.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM=
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=

View File

@ -396,6 +396,7 @@ func newFilter(keys keysManager) *filter {
Filter: &whisper.Filter{
PoW: 0,
AllowP2P: true,
Messages: whisper.NewMemoryMessageStore(),
},
keys: keys,
}

View File

@ -68,8 +68,11 @@ matrix:
- debhelper
- dput
- fakeroot
- python-bzrlib
- python-paramiko
script:
- go run build/ci.go debsrc -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>" -upload ppa:ethereum/ethereum
- echo '|1|7SiYPr9xl3uctzovOTj4gMwAC1M=|t6ReES75Bo/PxlOPJ6/GsGbTrM0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0aKz5UTUndYgIGG7dQBV+HaeuEZJ2xPHo2DS2iSKvUL4xNMSAY4UguNW+pX56nAQmZKIZZ8MaEvSj6zMEDiq6HFfn5JcTlM80UwlnyKe8B8p7Nk06PPQLrnmQt5fh0HmEcZx+JU9TZsfCHPnX7MNz4ELfZE6cFsclClrKim3BHUIGq//t93DllB+h4O9LHjEUsQ1Sr63irDLSutkLJD6RXchjROXkNirlcNVHH/jwLWR5RcYilNX7S5bIkK8NlWPjsn/8Ua5O7I9/YoE97PpO6i73DTGLh5H9JN/SITwCKBkgSDWUt61uPK3Y11Gty7o2lWsBjhBUm2Y38CBsoGmBw==' >> ~/.ssh/known_hosts
- go run build/ci.go debsrc -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>"
# This builder does the Linux Azure uploads
- if: type = push
@ -156,7 +159,7 @@ matrix:
git:
submodules: false # avoid cloning ethereum/tests
before_install:
- curl https://storage.googleapis.com/golang/go1.11.2.linux-amd64.tar.gz | tar -xz
- curl https://storage.googleapis.com/golang/go1.11.5.linux-amd64.tar.gz | tar -xz
- export PATH=`pwd`/go/bin:$PATH
- export GOROOT=`pwd`/go
- export GOPATH=$HOME/go

View File

@ -58,13 +58,11 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
return nil, err
}
return arguments, nil
}
method, exist := abi.Methods[name]
if !exist {
return nil, fmt.Errorf("method '%s' not found", name)
}
arguments, err := method.Inputs.Pack(args...)
if err != nil {
return nil, err
@ -82,7 +80,7 @@ func (abi ABI) Unpack(v interface{}, name string, output []byte) (err error) {
// we need to decide whether we're calling a method or an event
if method, ok := abi.Methods[name]; ok {
if len(output)%32 != 0 {
return fmt.Errorf("abi: improperly formatted output")
return fmt.Errorf("abi: improperly formatted output: %s - Bytes: [%+v]", string(output), output)
}
return method.Outputs.Unpack(v, output)
} else if event, ok := abi.Events[name]; ok {

View File

@ -33,24 +33,27 @@ type Argument struct {
type Arguments []Argument
type ArgumentMarshaling struct {
Name string
Type string
Components []ArgumentMarshaling
Indexed bool
}
// UnmarshalJSON implements json.Unmarshaler interface
func (argument *Argument) UnmarshalJSON(data []byte) error {
var extarg struct {
Name string
Type string
Indexed bool
}
err := json.Unmarshal(data, &extarg)
var arg ArgumentMarshaling
err := json.Unmarshal(data, &arg)
if err != nil {
return fmt.Errorf("argument json err: %v", err)
}
argument.Type, err = NewType(extarg.Type)
argument.Type, err = NewType(arg.Type, arg.Components)
if err != nil {
return err
}
argument.Name = extarg.Name
argument.Indexed = extarg.Indexed
argument.Name = arg.Name
argument.Indexed = arg.Indexed
return nil
}
@ -85,7 +88,6 @@ func (arguments Arguments) isTuple() bool {
// Unpack performs the operation hexdata -> Go format
func (arguments Arguments) Unpack(v interface{}, data []byte) error {
// make sure the passed value is arguments pointer
if reflect.Ptr != reflect.ValueOf(v).Kind() {
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
@ -97,52 +99,134 @@ func (arguments Arguments) Unpack(v interface{}, data []byte) error {
if arguments.isTuple() {
return arguments.unpackTuple(v, marshalledValues)
}
return arguments.unpackAtomic(v, marshalledValues)
return arguments.unpackAtomic(v, marshalledValues[0])
}
func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interface{}) error {
// unpack sets the unmarshalled value to go format.
// Note the dst here must be settable.
func unpack(t *Type, dst interface{}, src interface{}) error {
var (
dstVal = reflect.ValueOf(dst).Elem()
srcVal = reflect.ValueOf(src)
)
if t.T != TupleTy && !((t.T == SliceTy || t.T == ArrayTy) && t.Elem.T == TupleTy) {
return set(dstVal, srcVal)
}
switch t.T {
case TupleTy:
if dstVal.Kind() != reflect.Struct {
return fmt.Errorf("abi: invalid dst value for unpack, want struct, got %s", dstVal.Kind())
}
fieldmap, err := mapArgNamesToStructFields(t.TupleRawNames, dstVal)
if err != nil {
return err
}
for i, elem := range t.TupleElems {
fname := fieldmap[t.TupleRawNames[i]]
field := dstVal.FieldByName(fname)
if !field.IsValid() {
return fmt.Errorf("abi: field %s can't found in the given value", t.TupleRawNames[i])
}
if err := unpack(elem, field.Addr().Interface(), srcVal.Field(i).Interface()); err != nil {
return err
}
}
return nil
case SliceTy:
if dstVal.Kind() != reflect.Slice {
return fmt.Errorf("abi: invalid dst value for unpack, want slice, got %s", dstVal.Kind())
}
slice := reflect.MakeSlice(dstVal.Type(), srcVal.Len(), srcVal.Len())
for i := 0; i < slice.Len(); i++ {
if err := unpack(t.Elem, slice.Index(i).Addr().Interface(), srcVal.Index(i).Interface()); err != nil {
return err
}
}
dstVal.Set(slice)
case ArrayTy:
if dstVal.Kind() != reflect.Array {
return fmt.Errorf("abi: invalid dst value for unpack, want array, got %s", dstVal.Kind())
}
array := reflect.New(dstVal.Type()).Elem()
for i := 0; i < array.Len(); i++ {
if err := unpack(t.Elem, array.Index(i).Addr().Interface(), srcVal.Index(i).Interface()); err != nil {
return err
}
}
dstVal.Set(array)
}
return nil
}
// unpackAtomic unpacks ( hexdata -> go ) a single value
func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues interface{}) error {
if arguments.LengthNonIndexed() == 0 {
return nil
}
argument := arguments.NonIndexed()[0]
elem := reflect.ValueOf(v).Elem()
if elem.Kind() == reflect.Struct {
fieldmap, err := mapArgNamesToStructFields([]string{argument.Name}, elem)
if err != nil {
return err
}
field := elem.FieldByName(fieldmap[argument.Name])
if !field.IsValid() {
return fmt.Errorf("abi: field %s can't be found in the given value", argument.Name)
}
return unpack(&argument.Type, field.Addr().Interface(), marshalledValues)
}
return unpack(&argument.Type, elem.Addr().Interface(), marshalledValues)
}
// unpackTuple unpacks ( hexdata -> go ) a batch of values.
func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interface{}) error {
var (
value = reflect.ValueOf(v).Elem()
typ = value.Type()
kind = value.Kind()
)
if err := requireUnpackKind(value, typ, kind, arguments); err != nil {
return err
}
// If the interface is a struct, get of abi->struct_field mapping
var abi2struct map[string]string
if kind == reflect.Struct {
var err error
abi2struct, err = mapAbiToStructFields(arguments, value)
var (
argNames []string
err error
)
for _, arg := range arguments.NonIndexed() {
argNames = append(argNames, arg.Name)
}
abi2struct, err = mapArgNamesToStructFields(argNames, value)
if err != nil {
return err
}
}
for i, arg := range arguments.NonIndexed() {
reflectValue := reflect.ValueOf(marshalledValues[i])
switch kind {
case reflect.Struct:
if structField, ok := abi2struct[arg.Name]; ok {
if err := set(value.FieldByName(structField), reflectValue, arg); err != nil {
return err
}
field := value.FieldByName(abi2struct[arg.Name])
if !field.IsValid() {
return fmt.Errorf("abi: field %s can't be found in the given value", arg.Name)
}
if err := unpack(&arg.Type, field.Addr().Interface(), marshalledValues[i]); err != nil {
return err
}
case reflect.Slice, reflect.Array:
if value.Len() < i {
return fmt.Errorf("abi: insufficient number of arguments for unpack, want %d, got %d", len(arguments), value.Len())
}
v := value.Index(i)
if err := requireAssignable(v, reflectValue); err != nil {
if err := requireAssignable(v, reflect.ValueOf(marshalledValues[i])); err != nil {
return err
}
if err := set(v.Elem(), reflectValue, arg); err != nil {
if err := unpack(&arg.Type, v.Addr().Interface(), marshalledValues[i]); err != nil {
return err
}
default:
@ -150,48 +234,7 @@ func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interfa
}
}
return nil
}
// unpackAtomic unpacks ( hexdata -> go ) a single value
func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues []interface{}) error {
if len(marshalledValues) != 1 {
return fmt.Errorf("abi: wrong length, expected single value, got %d", len(marshalledValues))
}
elem := reflect.ValueOf(v).Elem()
kind := elem.Kind()
reflectValue := reflect.ValueOf(marshalledValues[0])
var abi2struct map[string]string
if kind == reflect.Struct {
var err error
if abi2struct, err = mapAbiToStructFields(arguments, elem); err != nil {
return err
}
arg := arguments.NonIndexed()[0]
if structField, ok := abi2struct[arg.Name]; ok {
return set(elem.FieldByName(structField), reflectValue, arg)
}
return nil
}
return set(elem, reflectValue, arguments.NonIndexed()[0])
}
// Computes the full size of an array;
// i.e. counting nested arrays, which count towards size for unpacking.
func getArraySize(arr *Type) int {
size := arr.Size
// Arrays can be nested, with each element being the same size
arr = arr.Elem
for arr.T == ArrayTy {
// Keep multiplying by elem.Size while the elem is an array.
size *= arr.Size
arr = arr.Elem
}
// Now we have the full array size, including its children.
return size
}
// UnpackValues can be used to unpack ABI-encoded hexdata according to the ABI-specification,
@ -202,7 +245,7 @@ func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) {
virtualArgs := 0
for index, arg := range arguments.NonIndexed() {
marshalledValue, err := toGoType((index+virtualArgs)*32, arg.Type, data)
if arg.Type.T == ArrayTy {
if arg.Type.T == ArrayTy && !isDynamicType(arg.Type) {
// If we have a static array, like [3]uint256, these are coded as
// just like uint256,uint256,uint256.
// This means that we need to add two 'virtual' arguments when
@ -213,7 +256,11 @@ func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) {
//
// Calculate the full array size to get the correct offset for the next argument.
// Decrement it by 1, as the normal index increment is still applied.
virtualArgs += getArraySize(&arg.Type) - 1
virtualArgs += getTypeSize(arg.Type)/32 - 1
} else if arg.Type.T == TupleTy && !isDynamicType(arg.Type) {
// If we have a static tuple, like (uint256, bool, uint256), these are
// coded as just like uint256,bool,uint256
virtualArgs += getTypeSize(arg.Type)/32 - 1
}
if err != nil {
return nil, err
@ -243,7 +290,7 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
// input offset is the bytes offset for packed output
inputOffset := 0
for _, abiArg := range abiArgs {
inputOffset += getDynamicTypeOffset(abiArg.Type)
inputOffset += getTypeSize(abiArg.Type)
}
var ret []byte
for i, a := range args {
@ -272,14 +319,13 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
return ret, nil
}
// capitalise makes the first character of a string upper case, also removing any
// prefixing underscores from the variable names.
func capitalise(input string) string {
for len(input) > 0 && input[0] == '_' {
input = input[1:]
// ToCamelCase converts an under-score string to a camel-case string
func ToCamelCase(input string) string {
parts := strings.Split(input, "_")
for i, s := range parts {
if len(s) > 0 {
parts[i] = strings.ToUpper(s[:1]) + s[1:]
}
}
if len(input) == 0 {
return ""
}
return strings.ToUpper(input[:1]) + input[1:]
return strings.Join(parts, "")
}

View File

@ -36,10 +36,10 @@ type SignerFn func(types.Signer, common.Address, *types.Transaction) (*types.Tra
// CallOpts is the collection of options to fine tune a contract call request.
type CallOpts struct {
Pending bool // Whether to operate on the pending state or the last known one
From common.Address // Optional the sender address, otherwise the first account is used
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
Pending bool // Whether to operate on the pending state or the last known one
From common.Address // Optional the sender address, otherwise the first account is used
BlockNumber *big.Int // Optional the block number on which the call should be performed
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
}
// TransactOpts is the collection of authorization data required to create a
@ -148,10 +148,10 @@ func (c *BoundContract) Call(opts *CallOpts, result interface{}, method string,
}
}
} else {
output, err = c.caller.CallContract(ctx, msg, nil)
output, err = c.caller.CallContract(ctx, msg, opts.BlockNumber)
if err == nil && len(output) == 0 {
// Make sure we have a contract to operate on, and bail out otherwise.
if code, err = c.caller.CodeAt(ctx, c.address, nil); err != nil {
if code, err = c.caller.CodeAt(ctx, c.address, opts.BlockNumber); err != nil {
return err
} else if len(code) == 0 {
return ErrNoCode

View File

@ -381,54 +381,23 @@ func namedTypeJava(javaKind string, solKind abi.Type) string {
// methodNormalizer is a name transformer that modifies Solidity method names to
// conform to target language naming concentions.
var methodNormalizer = map[Lang]func(string) string{
LangGo: capitalise,
LangGo: abi.ToCamelCase,
LangJava: decapitalise,
}
// capitalise makes a camel-case string which starts with an upper case character.
func capitalise(input string) string {
for len(input) > 0 && input[0] == '_' {
input = input[1:]
}
if len(input) == 0 {
return ""
}
return toCamelCase(strings.ToUpper(input[:1]) + input[1:])
return abi.ToCamelCase(input)
}
// decapitalise makes a camel-case string which starts with a lower case character.
func decapitalise(input string) string {
for len(input) > 0 && input[0] == '_' {
input = input[1:]
}
if len(input) == 0 {
return ""
return input
}
return toCamelCase(strings.ToLower(input[:1]) + input[1:])
}
// toCamelCase converts an under-score string to a camel-case string
func toCamelCase(input string) string {
toupper := false
result := ""
for k, v := range input {
switch {
case k == 0:
result = strings.ToUpper(string(input[0]))
case toupper:
result += strings.ToUpper(string(v))
toupper = false
case v == '_':
toupper = true
default:
result += string(v)
}
}
return result
goForm := abi.ToCamelCase(input)
return strings.ToLower(goForm[:1]) + goForm[1:]
}
// structured checks whether a list of ABI data types has enough information to

View File

@ -36,12 +36,12 @@ type Event struct {
func (e Event) String() string {
inputs := make([]string, len(e.Inputs))
for i, input := range e.Inputs {
inputs[i] = fmt.Sprintf("%v %v", input.Name, input.Type)
inputs[i] = fmt.Sprintf("%v %v", input.Type, input.Name)
if input.Indexed {
inputs[i] = fmt.Sprintf("%v indexed %v", input.Name, input.Type)
inputs[i] = fmt.Sprintf("%v indexed %v", input.Type, input.Name)
}
}
return fmt.Sprintf("e %v(%v)", e.Name, strings.Join(inputs, ", "))
return fmt.Sprintf("event %v(%v)", e.Name, strings.Join(inputs, ", "))
}
// Id returns the canonical representation of the event's signature used by the

View File

@ -56,14 +56,14 @@ func (method Method) Sig() string {
func (method Method) String() string {
inputs := make([]string, len(method.Inputs))
for i, input := range method.Inputs {
inputs[i] = fmt.Sprintf("%v %v", input.Name, input.Type)
inputs[i] = fmt.Sprintf("%v %v", input.Type, input.Name)
}
outputs := make([]string, len(method.Outputs))
for i, output := range method.Outputs {
outputs[i] = output.Type.String()
if len(output.Name) > 0 {
outputs[i] = fmt.Sprintf("%v ", output.Name)
outputs[i] += fmt.Sprintf(" %v", output.Name)
}
outputs[i] += output.Type.String()
}
constant := ""
if method.Const {

View File

@ -71,22 +71,36 @@ func mustArrayToByteSlice(value reflect.Value) reflect.Value {
//
// set is a bit more lenient when it comes to assignment and doesn't force an as
// strict ruleset as bare `reflect` does.
func set(dst, src reflect.Value, output Argument) error {
dstType := dst.Type()
srcType := src.Type()
func set(dst, src reflect.Value) error {
dstType, srcType := dst.Type(), src.Type()
switch {
case dstType.AssignableTo(srcType):
dst.Set(src)
case dstType.Kind() == reflect.Interface:
return set(dst.Elem(), src)
case dstType.Kind() == reflect.Ptr && dstType.Elem() != derefbigT:
return set(dst.Elem(), src)
case srcType.AssignableTo(dstType) && dst.CanSet():
dst.Set(src)
case dstType.Kind() == reflect.Ptr:
return set(dst.Elem(), src, output)
case dstType.Kind() == reflect.Slice && srcType.Kind() == reflect.Slice:
return setSlice(dst, src)
default:
return fmt.Errorf("abi: cannot unmarshal %v in to %v", src.Type(), dst.Type())
}
return nil
}
// setSlice attempts to assign src to dst when slices are not assignable by default
// e.g. src: [][]byte -> dst: [][15]byte
func setSlice(dst, src reflect.Value) error {
slice := reflect.MakeSlice(dst.Type(), src.Len(), src.Len())
for i := 0; i < src.Len(); i++ {
v := src.Index(i)
reflect.Copy(slice.Index(i), v)
}
dst.Set(slice)
return nil
}
// requireAssignable assures that `dest` is a pointer and it's not an interface.
func requireAssignable(dst, src reflect.Value) error {
if dst.Kind() != reflect.Ptr && dst.Kind() != reflect.Interface {
@ -112,14 +126,14 @@ func requireUnpackKind(v reflect.Value, t reflect.Type, k reflect.Kind,
return nil
}
// mapAbiToStringField maps abi to struct fields.
// mapArgNamesToStructFields maps a slice of argument names to struct fields.
// first round: for each Exportable field that contains a `abi:""` tag
// and this field name exists in the arguments, pair them together.
// second round: for each argument field that has not been already linked,
// and this field name exists in the given argument name list, pair them together.
// second round: for each argument name that has not been already linked,
// find what variable is expected to be mapped into, if it exists and has not been
// used, pair them.
func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]string, error) {
// Note this function assumes the given value is a struct value.
func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[string]string, error) {
typ := value.Type()
abi2struct := make(map[string]string)
@ -133,45 +147,39 @@ func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]strin
if structFieldName[:1] != strings.ToUpper(structFieldName[:1]) {
continue
}
// skip fields that have no abi:"" tag.
var ok bool
var tagName string
if tagName, ok = typ.Field(i).Tag.Lookup("abi"); !ok {
continue
}
// check if tag is empty.
if tagName == "" {
return nil, fmt.Errorf("struct: abi tag in '%s' is empty", structFieldName)
}
// check which argument field matches with the abi tag.
found := false
for _, abiField := range args.NonIndexed() {
if abiField.Name == tagName {
if abi2struct[abiField.Name] != "" {
for _, arg := range argNames {
if arg == tagName {
if abi2struct[arg] != "" {
return nil, fmt.Errorf("struct: abi tag in '%s' already mapped", structFieldName)
}
// pair them
abi2struct[abiField.Name] = structFieldName
struct2abi[structFieldName] = abiField.Name
abi2struct[arg] = structFieldName
struct2abi[structFieldName] = arg
found = true
}
}
// check if this tag has been mapped.
if !found {
return nil, fmt.Errorf("struct: abi tag '%s' defined but not found in abi", tagName)
}
}
// second round ~~~
for _, arg := range args {
for _, argName := range argNames {
abiFieldName := arg.Name
structFieldName := capitalise(abiFieldName)
structFieldName := ToCamelCase(argName)
if structFieldName == "" {
return nil, fmt.Errorf("abi: purely underscored output cannot unpack to struct")
@ -181,11 +189,11 @@ func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]strin
// struct field with the same field name. If so, raise an error:
// abi: [ { "name": "value" } ]
// struct { Value *big.Int , Value1 *big.Int `abi:"value"`}
if abi2struct[abiFieldName] != "" {
if abi2struct[abiFieldName] != structFieldName &&
if abi2struct[argName] != "" {
if abi2struct[argName] != structFieldName &&
struct2abi[structFieldName] == "" &&
value.FieldByName(structFieldName).IsValid() {
return nil, fmt.Errorf("abi: multiple variables maps to the same abi field '%s'", abiFieldName)
return nil, fmt.Errorf("abi: multiple variables maps to the same abi field '%s'", argName)
}
continue
}
@ -197,16 +205,14 @@ func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]strin
if value.FieldByName(structFieldName).IsValid() {
// pair them
abi2struct[abiFieldName] = structFieldName
struct2abi[structFieldName] = abiFieldName
abi2struct[argName] = structFieldName
struct2abi[structFieldName] = argName
} else {
// not paired, but annotate as used, to detect cases like
// abi : [ { "name": "value" }, { "name": "_value" } ]
// struct { Value *big.Int }
struct2abi[structFieldName] = abiFieldName
struct2abi[structFieldName] = argName
}
}
return abi2struct, nil
}

View File

@ -17,6 +17,7 @@
package abi
import (
"errors"
"fmt"
"reflect"
"regexp"
@ -32,6 +33,7 @@ const (
StringTy
SliceTy
ArrayTy
TupleTy
AddressTy
FixedBytesTy
BytesTy
@ -43,13 +45,16 @@ const (
// Type is the reflection of the supported argument type
type Type struct {
Elem *Type
Kind reflect.Kind
Type reflect.Type
Size int
T byte // Our own type checking
stringKind string // holds the unparsed string for deriving signatures
// Tuple relative fields
TupleElems []*Type // Type information of all tuple fields
TupleRawNames []string // Raw field name of all tuple fields
}
var (
@ -58,7 +63,7 @@ var (
)
// NewType creates a new reflection type of abi type given in t.
func NewType(t string) (typ Type, err error) {
func NewType(t string, components []ArgumentMarshaling) (typ Type, err error) {
// check that array brackets are equal if they exist
if strings.Count(t, "[") != strings.Count(t, "]") {
return Type{}, fmt.Errorf("invalid arg type in abi")
@ -71,7 +76,7 @@ func NewType(t string) (typ Type, err error) {
if strings.Count(t, "[") != 0 {
i := strings.LastIndex(t, "[")
// recursively embed the type
embeddedType, err := NewType(t[:i])
embeddedType, err := NewType(t[:i], components)
if err != nil {
return Type{}, err
}
@ -87,6 +92,9 @@ func NewType(t string) (typ Type, err error) {
typ.Kind = reflect.Slice
typ.Elem = &embeddedType
typ.Type = reflect.SliceOf(embeddedType.Type)
if embeddedType.T == TupleTy {
typ.stringKind = embeddedType.stringKind + sliced
}
} else if len(intz) == 1 {
// is a array
typ.T = ArrayTy
@ -97,6 +105,9 @@ func NewType(t string) (typ Type, err error) {
return Type{}, fmt.Errorf("abi: error parsing variable size: %v", err)
}
typ.Type = reflect.ArrayOf(typ.Size, embeddedType.Type)
if embeddedType.T == TupleTy {
typ.stringKind = embeddedType.stringKind + sliced
}
} else {
return Type{}, fmt.Errorf("invalid formatting of array type")
}
@ -158,6 +169,40 @@ func NewType(t string) (typ Type, err error) {
typ.Size = varSize
typ.Type = reflect.ArrayOf(varSize, reflect.TypeOf(byte(0)))
}
case "tuple":
var (
fields []reflect.StructField
elems []*Type
names []string
expression string // canonical parameter expression
)
expression += "("
for idx, c := range components {
cType, err := NewType(c.Type, c.Components)
if err != nil {
return Type{}, err
}
if ToCamelCase(c.Name) == "" {
return Type{}, errors.New("abi: purely anonymous or underscored field is not supported")
}
fields = append(fields, reflect.StructField{
Name: ToCamelCase(c.Name), // reflect.StructOf will panic for any exported field.
Type: cType.Type,
})
elems = append(elems, &cType)
names = append(names, c.Name)
expression += cType.stringKind
if idx != len(components)-1 {
expression += ","
}
}
expression += ")"
typ.Kind = reflect.Struct
typ.Type = reflect.StructOf(fields)
typ.TupleElems = elems
typ.TupleRawNames = names
typ.T = TupleTy
typ.stringKind = expression
case "function":
typ.Kind = reflect.Array
typ.T = FunctionTy
@ -178,7 +223,6 @@ func (t Type) String() (out string) {
func (t Type) pack(v reflect.Value) ([]byte, error) {
// dereference pointer first if it's a pointer
v = indirect(v)
if err := typeCheck(t, v); err != nil {
return nil, err
}
@ -196,7 +240,7 @@ func (t Type) pack(v reflect.Value) ([]byte, error) {
offset := 0
offsetReq := isDynamicType(*t.Elem)
if offsetReq {
offset = getDynamicTypeOffset(*t.Elem) * v.Len()
offset = getTypeSize(*t.Elem) * v.Len()
}
var tail []byte
for i := 0; i < v.Len(); i++ {
@ -213,6 +257,45 @@ func (t Type) pack(v reflect.Value) ([]byte, error) {
tail = append(tail, val...)
}
return append(ret, tail...), nil
case TupleTy:
// (T1,...,Tk) for k >= 0 and any types T1, …, Tk
// enc(X) = head(X(1)) ... head(X(k)) tail(X(1)) ... tail(X(k))
// where X = (X(1), ..., X(k)) and head and tail are defined for Ti being a static
// type as
// head(X(i)) = enc(X(i)) and tail(X(i)) = "" (the empty string)
// and as
// head(X(i)) = enc(len(head(X(1)) ... head(X(k)) tail(X(1)) ... tail(X(i-1))))
// tail(X(i)) = enc(X(i))
// otherwise, i.e. if Ti is a dynamic type.
fieldmap, err := mapArgNamesToStructFields(t.TupleRawNames, v)
if err != nil {
return nil, err
}
// Calculate prefix occupied size.
offset := 0
for _, elem := range t.TupleElems {
offset += getTypeSize(*elem)
}
var ret, tail []byte
for i, elem := range t.TupleElems {
field := v.FieldByName(fieldmap[t.TupleRawNames[i]])
if !field.IsValid() {
return nil, fmt.Errorf("field %s for tuple not found in the given struct", t.TupleRawNames[i])
}
val, err := elem.pack(field)
if err != nil {
return nil, err
}
if isDynamicType(*elem) {
ret = append(ret, packNum(reflect.ValueOf(offset))...)
tail = append(tail, val...)
offset += len(val)
} else {
ret = append(ret, val...)
}
}
return append(ret, tail...), nil
default:
return packElement(t, v), nil
}
@ -225,25 +308,45 @@ func (t Type) requiresLengthPrefix() bool {
}
// isDynamicType returns true if the type is dynamic.
// StringTy, BytesTy, and SliceTy(irrespective of slice element type) are dynamic types
// ArrayTy is considered dynamic if and only if the Array element is a dynamic type.
// This function recursively checks the type for slice and array elements.
// The following types are called “dynamic”:
// * bytes
// * string
// * T[] for any T
// * T[k] for any dynamic T and any k >= 0
// * (T1,...,Tk) if Ti is dynamic for some 1 <= i <= k
func isDynamicType(t Type) bool {
// dynamic types
// array is also a dynamic type if the array type is dynamic
if t.T == TupleTy {
for _, elem := range t.TupleElems {
if isDynamicType(*elem) {
return true
}
}
return false
}
return t.T == StringTy || t.T == BytesTy || t.T == SliceTy || (t.T == ArrayTy && isDynamicType(*t.Elem))
}
// getDynamicTypeOffset returns the offset for the type.
// See `isDynamicType` to know which types are considered dynamic.
// If the type t is an array and element type is not a dynamic type, then we consider it a static type and
// return 32 * size of array since length prefix is not required.
// If t is a dynamic type or element type(for slices and arrays) is dynamic, then we simply return 32 as offset.
func getDynamicTypeOffset(t Type) int {
// if it is an array and there are no dynamic types
// then the array is static type
// getTypeSize returns the size that this type needs to occupy.
// We distinguish static and dynamic types. Static types are encoded in-place
// and dynamic types are encoded at a separately allocated location after the
// current block.
// So for a static variable, the size returned represents the size that the
// variable actually occupies.
// For a dynamic variable, the returned size is fixed 32 bytes, which is used
// to store the location reference for actual value storage.
func getTypeSize(t Type) int {
if t.T == ArrayTy && !isDynamicType(*t.Elem) {
return 32 * t.Size
// Recursively calculate type size if it is a nested array
if t.Elem.T == ArrayTy {
return t.Size * getTypeSize(*t.Elem)
}
return t.Size * 32
} else if t.T == TupleTy && !isDynamicType(t) {
total := 0
for _, elem := range t.TupleElems {
total += getTypeSize(*elem)
}
return total
}
return 32
}

View File

@ -115,17 +115,6 @@ func readFixedBytes(t Type, word []byte) (interface{}, error) {
}
func getFullElemSize(elem *Type) int {
//all other should be counted as 32 (slices have pointers to respective elements)
size := 32
//arrays wrap it, each element being the same size
for elem.T == ArrayTy {
size *= elem.Size
elem = elem.Elem
}
return size
}
// iteratively unpack elements
func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) {
if size < 0 {
@ -150,13 +139,9 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error)
// Arrays have packed elements, resulting in longer unpack steps.
// Slices have just 32 bytes per element (pointing to the contents).
elemSize := 32
if t.T == ArrayTy {
elemSize = getFullElemSize(t.Elem)
}
elemSize := getTypeSize(*t.Elem)
for i, j := start, 0; j < size; i, j = i+elemSize, j+1 {
inter, err := toGoType(i, *t.Elem, output)
if err != nil {
return nil, err
@ -170,6 +155,36 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error)
return refSlice.Interface(), nil
}
func forTupleUnpack(t Type, output []byte) (interface{}, error) {
retval := reflect.New(t.Type).Elem()
virtualArgs := 0
for index, elem := range t.TupleElems {
marshalledValue, err := toGoType((index+virtualArgs)*32, *elem, output)
if elem.T == ArrayTy && !isDynamicType(*elem) {
// If we have a static array, like [3]uint256, these are coded as
// just like uint256,uint256,uint256.
// This means that we need to add two 'virtual' arguments when
// we count the index from now on.
//
// Array values nested multiple levels deep are also encoded inline:
// [2][3]uint256: uint256,uint256,uint256,uint256,uint256,uint256
//
// Calculate the full array size to get the correct offset for the next argument.
// Decrement it by 1, as the normal index increment is still applied.
virtualArgs += getTypeSize(*elem)/32 - 1
} else if elem.T == TupleTy && !isDynamicType(*elem) {
// If we have a static tuple, like (uint256, bool, uint256), these are
// coded as just like uint256,bool,uint256
virtualArgs += getTypeSize(*elem)/32 - 1
}
if err != nil {
return nil, err
}
retval.Field(index).Set(reflect.ValueOf(marshalledValue))
}
return retval.Interface(), nil
}
// toGoType parses the output bytes and recursively assigns the value of these bytes
// into a go type with accordance with the ABI spec.
func toGoType(index int, t Type, output []byte) (interface{}, error) {
@ -178,14 +193,14 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
}
var (
returnOutput []byte
begin, end int
err error
returnOutput []byte
begin, length int
err error
)
// if we require a length prefix, find the beginning word and size returned.
if t.requiresLengthPrefix() {
begin, end, err = lengthPrefixPointsTo(index, output)
begin, length, err = lengthPrefixPointsTo(index, output)
if err != nil {
return nil, err
}
@ -194,12 +209,26 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
}
switch t.T {
case TupleTy:
if isDynamicType(t) {
begin, err := tuplePointsTo(index, output)
if err != nil {
return nil, err
}
return forTupleUnpack(t, output[begin:])
} else {
return forTupleUnpack(t, output[index:])
}
case SliceTy:
return forEachUnpack(t, output, begin, end)
return forEachUnpack(t, output[begin:], 0, length)
case ArrayTy:
return forEachUnpack(t, output, index, t.Size)
if isDynamicType(*t.Elem) {
offset := int64(binary.BigEndian.Uint64(returnOutput[len(returnOutput)-8:]))
return forEachUnpack(t, output[offset:], 0, t.Size)
}
return forEachUnpack(t, output[index:], 0, t.Size)
case StringTy: // variable arrays are written at the end of the return bytes
return string(output[begin : begin+end]), nil
return string(output[begin : begin+length]), nil
case IntTy, UintTy:
return readInteger(t.T, t.Kind, returnOutput), nil
case BoolTy:
@ -209,7 +238,7 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
case HashTy:
return common.BytesToHash(returnOutput), nil
case BytesTy:
return output[begin : begin+end], nil
return output[begin : begin+length], nil
case FixedBytesTy:
return readFixedBytes(t, returnOutput)
case FunctionTy:
@ -250,3 +279,17 @@ func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err
length = int(lengthBig.Uint64())
return
}
// tuplePointsTo resolves the location reference for dynamic tuple.
func tuplePointsTo(index int, output []byte) (start int, err error) {
offset := big.NewInt(0).SetBytes(output[index : index+32])
outputLen := big.NewInt(int64(len(output)))
if offset.Cmp(big.NewInt(int64(len(output)))) > 0 {
return 0, fmt.Errorf("abi: cannot marshal in to go slice: offset %v would go over slice boundary (len=%v)", offset, outputLen)
}
if offset.BitLen() > 63 {
return 0, fmt.Errorf("abi offset larger than int64: %v", offset)
}
return int(offset.Uint64()), nil
}

View File

@ -52,8 +52,8 @@ func (w *keystoreWallet) Status() (string, error) {
// is no connection or decryption step necessary to access the list of accounts.
func (w *keystoreWallet) Open(passphrase string) error { return nil }
// Close implements accounts.Wallet, but is a noop for plain wallets since is no
// meaningful open operation.
// Close implements accounts.Wallet, but is a noop for plain wallets since there
// is no meaningful open operation.
func (w *keystoreWallet) Close() error { return nil }
// Accounts implements accounts.Wallet, returning an account list consisting of
@ -84,10 +84,7 @@ func (w *keystoreWallet) SelfDerive(base accounts.DerivationPath, chain ethereum
// able to sign via our shared keystore backend).
func (w *keystoreWallet) SignHash(account accounts.Account, hash []byte) ([]byte, error) {
// Make sure the requested account is contained within
if account.Address != w.account.Address {
return nil, accounts.ErrUnknownAccount
}
if account.URL != (accounts.URL{}) && account.URL != w.account.URL {
if !w.Contains(account) {
return nil, accounts.ErrUnknownAccount
}
// Account seems valid, request the keystore to sign
@ -100,10 +97,7 @@ func (w *keystoreWallet) SignHash(account accounts.Account, hash []byte) ([]byte
// be able to sign via our shared keystore backend).
func (w *keystoreWallet) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
// Make sure the requested account is contained within
if account.Address != w.account.Address {
return nil, accounts.ErrUnknownAccount
}
if account.URL != (accounts.URL{}) && account.URL != w.account.URL {
if !w.Contains(account) {
return nil, accounts.ErrUnknownAccount
}
// Account seems valid, request the keystore to sign
@ -114,10 +108,7 @@ func (w *keystoreWallet) SignTx(account accounts.Account, tx *types.Transaction,
// given hash with the given account using passphrase as extra authentication.
func (w *keystoreWallet) SignHashWithPassphrase(account accounts.Account, passphrase string, hash []byte) ([]byte, error) {
// Make sure the requested account is contained within
if account.Address != w.account.Address {
return nil, accounts.ErrUnknownAccount
}
if account.URL != (accounts.URL{}) && account.URL != w.account.URL {
if !w.Contains(account) {
return nil, accounts.ErrUnknownAccount
}
// Account seems valid, request the keystore to sign
@ -128,10 +119,7 @@ func (w *keystoreWallet) SignHashWithPassphrase(account accounts.Account, passph
// transaction with the given account using passphrase as extra authentication.
func (w *keystoreWallet) SignTxWithPassphrase(account accounts.Account, passphrase string, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
// Make sure the requested account is contained within
if account.Address != w.account.Address {
return nil, accounts.ErrUnknownAccount
}
if account.URL != (accounts.URL{}) && account.URL != w.account.URL {
if !w.Contains(account) {
return nil, accounts.ErrUnknownAccount
}
// Account seems valid, request the keystore to sign

View File

@ -257,7 +257,9 @@ func (w *ledgerDriver) ledgerDerive(derivationPath []uint32) (common.Address, er
// Decode the hex sting into an Ethereum address and return
var address common.Address
hex.Decode(address[:], hexstr)
if _, err = hex.Decode(address[:], hexstr); err != nil {
return common.Address{}, err
}
return address, nil
}

View File

@ -23,8 +23,8 @@ environment:
install:
- git submodule update --init
- rmdir C:\go /s /q
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.11.2.windows-%GETH_ARCH%.zip
- 7z x go1.11.2.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.11.5.windows-%GETH_ARCH%.zip
- 7z x go1.11.5.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
- go version
- gcc --version

View File

@ -26,11 +26,11 @@ import "syscall"
// Raise tries to maximize the file descriptor allowance of this process
// to the maximum hard-limit allowed by the OS.
func Raise(max uint64) error {
func Raise(max uint64) (uint64, error) {
// Get the current limit
var limit syscall.Rlimit
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return err
return 0, err
}
// Try to update the limit to the max allowance
limit.Cur = limit.Max
@ -38,9 +38,12 @@ func Raise(max uint64) error {
limit.Cur = int64(max)
}
if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return err
return 0, err
}
return nil
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return 0, err
}
return limit.Cur, nil
}
// Current retrieves the number of file descriptors allowed to be opened by this

View File

@ -22,11 +22,12 @@ import "syscall"
// Raise tries to maximize the file descriptor allowance of this process
// to the maximum hard-limit allowed by the OS.
func Raise(max uint64) error {
// Returns the size it was set to (may differ from the desired 'max')
func Raise(max uint64) (uint64, error) {
// Get the current limit
var limit syscall.Rlimit
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return err
return 0, err
}
// Try to update the limit to the max allowance
limit.Cur = limit.Max
@ -34,9 +35,13 @@ func Raise(max uint64) error {
limit.Cur = max
}
if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return err
return 0, err
}
return nil
// MacOS can silently apply further caps, so retrieve the actually set limit
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return 0, err
}
return limit.Cur, nil
}
// Current retrieves the number of file descriptors allowed to be opened by this

View File

@ -16,28 +16,30 @@
package fdlimit
import "errors"
import "fmt"
const hardlimit = 16384
// Raise tries to maximize the file descriptor allowance of this process
// to the maximum hard-limit allowed by the OS.
func Raise(max uint64) error {
func Raise(max uint64) (uint64, error) {
// This method is NOP by design:
// * Linux/Darwin counterparts need to manually increase per process limits
// * On Windows Go uses the CreateFile API, which is limited to 16K files, non
// changeable from within a running process
// This way we can always "request" raising the limits, which will either have
// or not have effect based on the platform we're running on.
if max > 16384 {
return errors.New("file descriptor limit (16384) reached")
if max > hardlimit {
return hardlimit, fmt.Errorf("file descriptor limit (%d) reached", hardlimit)
}
return nil
return max, nil
}
// Current retrieves the number of file descriptors allowed to be opened by this
// process.
func Current() (int, error) {
// Please see Raise for the reason why we use hard coded 16K as the limit
return 16384, nil
return hardlimit, nil
}
// Maximum retrieves the maximum number of file descriptors this process is

View File

@ -27,7 +27,7 @@ import (
"strings"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto/sha3"
"golang.org/x/crypto/sha3"
)
// Lengths of hashes and addresses in bytes.
@ -196,7 +196,7 @@ func (a Address) Hash() Hash { return BytesToHash(a[:]) }
// Hex returns an EIP55-compliant hex string representation of the address.
func (a Address) Hex() string {
unchecksummed := hex.EncodeToString(a[:])
sha := sha3.NewKeccak256()
sha := sha3.NewLegacyKeccak256()
sha.Write([]byte(unchecksummed))
hash := sha.Sum(nil)

View File

@ -33,13 +33,13 @@ import (
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
lru "github.com/hashicorp/golang-lru"
"golang.org/x/crypto/sha3"
)
const (
@ -148,7 +148,7 @@ type SignerFn func(accounts.Account, []byte) ([]byte, error)
// panics. This is done to avoid accidentally using both forms (signature present
// or not), which could be abused to produce different hashes for the same header.
func sigHash(header *types.Header) (hash common.Hash) {
hasher := sha3.NewKeccak256()
hasher := sha3.NewLegacyKeccak256()
rlp.Encode(hasher, []interface{}{
header.ParentHash,

View File

@ -30,8 +30,8 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/bitutil"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/log"
"golang.org/x/crypto/sha3"
)
const (
@ -123,7 +123,7 @@ func seedHash(block uint64) []byte {
if block < epochLength {
return seed
}
keccak256 := makeHasher(sha3.NewKeccak256())
keccak256 := makeHasher(sha3.NewLegacyKeccak256())
for i := 0; i < int(block/epochLength); i++ {
keccak256(seed, seed)
}
@ -177,7 +177,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
}
}()
// Create a hasher to reuse between invocations
keccak512 := makeHasher(sha3.NewKeccak512())
keccak512 := makeHasher(sha3.NewLegacyKeccak512())
// Sequentially produce the initial dataset
keccak512(cache, seed)
@ -301,7 +301,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
defer pend.Done()
// Create a hasher to reuse between invocations
keccak512 := makeHasher(sha3.NewKeccak512())
keccak512 := makeHasher(sha3.NewLegacyKeccak512())
// Calculate the data segment this thread should generate
batch := uint32((size + hashBytes*uint64(threads) - 1) / (hashBytes * uint64(threads)))
@ -375,7 +375,7 @@ func hashimoto(hash []byte, nonce uint64, size uint64, lookup func(index uint32)
// in-memory cache) in order to produce our final value for a particular header
// hash and nonce.
func hashimotoLight(size uint64, cache []uint32, hash []byte, nonce uint64) ([]byte, []byte) {
keccak512 := makeHasher(sha3.NewKeccak512())
keccak512 := makeHasher(sha3.NewLegacyKeccak512())
lookup := func(index uint32) []uint32 {
rawData := generateDatasetItem(cache, index, keccak512)

View File

@ -31,9 +31,9 @@ import (
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"golang.org/x/crypto/sha3"
)
// Ethash proof-of-work protocol constants.
@ -575,7 +575,7 @@ func (ethash *Ethash) Finalize(chain consensus.ChainReader, header *types.Header
// SealHash returns the hash of a block prior to it being sealed.
func (ethash *Ethash) SealHash(header *types.Header) (hash common.Hash) {
hasher := sha3.NewKeccak256()
hasher := sha3.NewLegacyKeccak256()
rlp.Encode(hasher, []interface{}{
header.ParentHash,

View File

@ -65,7 +65,7 @@ const (
triesInMemory = 128
// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
BlockChainVersion = 3
BlockChainVersion uint64 = 3
)
// CacheConfig contains the configuration values for the trie caching/pruning
@ -972,20 +972,26 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
triedb.Cap(limit - ethdb.IdealBatchSize)
}
// Find the next state trie we need to commit
header := bc.GetHeaderByNumber(current - triesInMemory)
chosen := header.Number.Uint64()
chosen := current - triesInMemory
// If we exceeded out time allowance, flush an entire trie to disk
if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
// If we're exceeding limits but haven't reached a large enough memory gap,
// warn the user that the system is becoming unstable.
if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
// If the header is missing (canonical chain behind), we're reorging a low
// diff sidechain. Suspend committing until this operation is completed.
header := bc.GetHeaderByNumber(chosen)
if header == nil {
log.Warn("Reorg in progress, trie commit postponed", "number", chosen)
} else {
// If we're exceeding limits but haven't reached a large enough memory gap,
// warn the user that the system is becoming unstable.
if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
}
// Flush an entire trie and restart the counters
triedb.Commit(header.Root, true)
lastWrite = chosen
bc.gcproc = 0
}
// Flush an entire trie and restart the counters
triedb.Commit(header.Root, true)
lastWrite = chosen
bc.gcproc = 0
}
// Garbage collect anything below our required write retention
for !bc.triegc.Empty() {
@ -1136,7 +1142,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
switch {
// First block is pruned, insert as sidechain and reorg only if TD grows enough
case err == consensus.ErrPrunedAncestor:
return bc.insertSidechain(it)
return bc.insertSidechain(block, it)
// First block is future, shove it (and all children) to the future queue (unknown ancestor)
case err == consensus.ErrFutureBlock || (err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(it.first().ParentHash())):
@ -1278,7 +1284,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
//
// The method writes all (header-and-body-valid) blocks to disk, then tries to
// switch over to the new chain if the TD exceeded the current chain.
func (bc *BlockChain) insertSidechain(it *insertIterator) (int, []interface{}, []*types.Log, error) {
func (bc *BlockChain) insertSidechain(block *types.Block, it *insertIterator) (int, []interface{}, []*types.Log, error) {
var (
externTd *big.Int
current = bc.CurrentBlock().NumberU64()
@ -1287,7 +1293,7 @@ func (bc *BlockChain) insertSidechain(it *insertIterator) (int, []interface{}, [
// Since we don't import them here, we expect ErrUnknownAncestor for the remaining
// ones. Any other errors means that the block is invalid, and should not be written
// to disk.
block, err := it.current(), consensus.ErrPrunedAncestor
err := consensus.ErrPrunedAncestor
for ; block != nil && (err == consensus.ErrPrunedAncestor); block, err = it.next() {
// Check the canonical state root for that number
if number := block.NumberU64(); current >= number {
@ -1317,7 +1323,7 @@ func (bc *BlockChain) insertSidechain(it *insertIterator) (int, []interface{}, [
if err := bc.WriteBlockWithoutState(block, externTd); err != nil {
return it.index, nil, nil, err
}
log.Debug("Inserted sidechain block", "number", block.Number(), "hash", block.Hash(),
log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(),
"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
"root", block.Root())

View File

@ -111,14 +111,6 @@ func (it *insertIterator) next() (*types.Block, error) {
return it.chain[it.index], it.validator.ValidateBody(it.chain[it.index])
}
// current returns the current block that's being processed.
func (it *insertIterator) current() *types.Block {
if it.index < 0 || it.index+1 >= len(it.chain) {
return nil
}
return it.chain[it.index]
}
// previous returns the previous block was being processed, or nil
func (it *insertIterator) previous() *types.Block {
if it.index < 1 {

View File

@ -149,7 +149,7 @@ func (b *BlockGen) PrevBlock(index int) *types.Block {
// associated difficulty. It's useful to test scenarios where forking is not
// tied to chain length directly.
func (b *BlockGen) OffsetTime(seconds int64) {
b.header.Time.Add(b.header.Time, new(big.Int).SetInt64(seconds))
b.header.Time.Add(b.header.Time, big.NewInt(seconds))
if b.header.Time.Cmp(b.parent.Header().Time) <= 0 {
panic("block time out of range")
}

View File

@ -157,7 +157,6 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, constant
if genesis != nil && genesis.Config == nil {
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
}
// Just commit the new block if there is no stored genesis block.
stored := rawdb.ReadCanonicalHash(db, 0)
if (stored == common.Hash{}) {
@ -183,6 +182,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, constant
newcfg := genesis.configOrDefault(stored)
if constantinopleOverride != nil {
newcfg.ConstantinopleBlock = constantinopleOverride
newcfg.PetersburgBlock = constantinopleOverride
}
storedcfg := rawdb.ReadChainConfig(db, stored)
if storedcfg == nil {
@ -339,6 +339,18 @@ func DefaultRinkebyGenesisBlock() *Genesis {
}
}
// DefaultGoerliGenesisBlock returns the Görli network genesis block.
func DefaultGoerliGenesisBlock() *Genesis {
return &Genesis{
Config: params.GoerliChainConfig,
Timestamp: 1548854791,
ExtraData: hexutil.MustDecode("0x22466c6578692069732061207468696e6722202d204166726900000000000000e0a2bd4258d2768837baa26a28fe71dc079f84c70000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
GasLimit: 10485760,
Difficulty: big.NewInt(1),
Alloc: decodePrealloc(goerliAllocData),
}
}
// DeveloperGenesisBlock returns the 'geth --dev' genesis block. Note, this must
// be seeded with the
func DeveloperGenesisBlock(period uint64, faucet common.Address) *Genesis {

File diff suppressed because one or more lines are too long

View File

@ -26,19 +26,27 @@ import (
)
// ReadDatabaseVersion retrieves the version number of the database.
func ReadDatabaseVersion(db DatabaseReader) int {
var version int
func ReadDatabaseVersion(db DatabaseReader) *uint64 {
var version uint64
enc, _ := db.Get(databaseVerisionKey)
rlp.DecodeBytes(enc, &version)
if len(enc) == 0 {
return nil
}
if err := rlp.DecodeBytes(enc, &version); err != nil {
return nil
}
return version
return &version
}
// WriteDatabaseVersion stores the version number of the database
func WriteDatabaseVersion(db DatabaseWriter, version int) {
enc, _ := rlp.EncodeToBytes(version)
if err := db.Put(databaseVerisionKey, enc); err != nil {
func WriteDatabaseVersion(db DatabaseWriter, version uint64) {
enc, err := rlp.EncodeToBytes(version)
if err != nil {
log.Crit("Failed to encode database version", "err", err)
}
if err = db.Put(databaseVerisionKey, enc); err != nil {
log.Crit("Failed to store the database version", "err", err)
}
}

View File

@ -468,9 +468,9 @@ func (self *StateDB) createObject(addr common.Address) (newobj, prev *stateObjec
//
// Carrying over the balance ensures that Ether doesn't disappear.
func (self *StateDB) CreateAccount(addr common.Address) {
new, prev := self.createObject(addr)
newObj, prev := self.createObject(addr)
if prev != nil {
new.setBalance(prev.data.Balance)
newObj.setBalance(prev.data.Balance)
}
}

View File

@ -22,7 +22,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
)
// senderCacher is a concurrent transaction sender recoverer anc cacher.
// senderCacher is a concurrent transaction sender recoverer and cacher.
var senderCacher = newTxSenderCacher(runtime.NumCPU())
// txSenderCacherRequest is a request for recovering transaction senders with a

View File

@ -172,6 +172,26 @@ func (config *TxPoolConfig) sanitize() TxPoolConfig {
log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump)
conf.PriceBump = DefaultTxPoolConfig.PriceBump
}
if conf.AccountSlots < 1 {
log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots)
conf.AccountSlots = DefaultTxPoolConfig.AccountSlots
}
if conf.GlobalSlots < 1 {
log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots)
conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots
}
if conf.AccountQueue < 1 {
log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue)
conf.AccountQueue = DefaultTxPoolConfig.AccountQueue
}
if conf.GlobalQueue < 1 {
log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue)
conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue
}
if conf.Lifetime < 1 {
log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime)
conf.Lifetime = DefaultTxPoolConfig.Lifetime
}
return conf
}

View File

@ -28,8 +28,8 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/rlp"
"golang.org/x/crypto/sha3"
)
var (
@ -109,7 +109,7 @@ func (h *Header) Size() common.StorageSize {
}
func rlpHash(x interface{}) (h common.Hash) {
hw := sha3.NewKeccak256()
hw := sha3.NewLegacyKeccak256()
rlp.Encode(hw, x)
hw.Sum(h[:0])
return h

View File

@ -234,7 +234,7 @@ func (tx *Transaction) AsMessage(s Signer) (Message, error) {
}
// WithSignature returns a new transaction with the given signature.
// This signature needs to be formatted as described in the yellow paper (v+27).
// This signature needs to be in the [R || S || V] format where V is 0 or 1.
func (tx *Transaction) WithSignature(signer Signer, sig []byte) (*Transaction, error) {
r, s, v, err := signer.SignatureValues(tx, sig)
if err != nil {

View File

@ -121,7 +121,9 @@ func gasSStore(gt params.GasTable, evm *EVM, contract *Contract, stack *Stack, m
current = evm.StateDB.GetState(contract.Address(), common.BigToHash(x))
)
// The legacy gas metering only takes into consideration the current state
if !evm.chainRules.IsConstantinople {
// Legacy rules should be applied if we are in Petersburg (removal of EIP-1283)
// OR Constantinople is not active
if evm.chainRules.IsPetersburg || !evm.chainRules.IsConstantinople {
// This checks for 3 scenario's and calculates gas accordingly:
//
// 1. From a zero-value address to a non-zero value (NEW VALUE)

View File

@ -24,8 +24,8 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/params"
"golang.org/x/crypto/sha3"
)
var (
@ -387,7 +387,7 @@ func opSha3(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory
data := memory.Get(offset.Int64(), size.Int64())
if interpreter.hasher == nil {
interpreter.hasher = sha3.NewKeccak256().(keccakState)
interpreter.hasher = sha3.NewLegacyKeccak256().(keccakState)
} else {
interpreter.hasher.Reset()
}

View File

@ -34,7 +34,11 @@ type JSONLogger struct {
// NewJSONLogger creates a new EVM tracer that prints execution steps as JSON objects
// into the provided stream.
func NewJSONLogger(cfg *LogConfig, writer io.Writer) *JSONLogger {
return &JSONLogger{json.NewEncoder(writer), cfg}
l := &JSONLogger{json.NewEncoder(writer), cfg}
if l.cfg == nil {
l.cfg = &LogConfig{}
}
return l
}
func (l *JSONLogger) CaptureStart(from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) error {

View File

@ -30,8 +30,8 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/rlp"
"golang.org/x/crypto/sha3"
)
var (
@ -43,7 +43,7 @@ var errInvalidPubkey = errors.New("invalid secp256k1 public key")
// Keccak256 calculates and returns the Keccak256 hash of the input data.
func Keccak256(data ...[]byte) []byte {
d := sha3.NewKeccak256()
d := sha3.NewLegacyKeccak256()
for _, b := range data {
d.Write(b)
}
@ -53,7 +53,7 @@ func Keccak256(data ...[]byte) []byte {
// Keccak256Hash calculates and returns the Keccak256 hash of the input data,
// converting it to an internal Hash data structure.
func Keccak256Hash(data ...[]byte) (h common.Hash) {
d := sha3.NewKeccak256()
d := sha3.NewLegacyKeccak256()
for _, b := range data {
d.Write(b)
}
@ -63,7 +63,7 @@ func Keccak256Hash(data ...[]byte) (h common.Hash) {
// Keccak512 calculates and returns the Keccak512 hash of the input data.
func Keccak512(data ...[]byte) []byte {
d := sha3.NewKeccak512()
d := sha3.NewLegacyKeccak512()
for _, b := range data {
d.Write(b)
}

View File

@ -310,7 +310,7 @@ var theCurve = new(BitCurve)
func init() {
// See SEC 2 section 2.7.1
// curve parameters taken from:
// http://www.secg.org/collateral/sec2_final.pdf
// http://www.secg.org/sec2-v2.pdf
theCurve.P, _ = new(big.Int).SetString("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 0)
theCurve.N, _ = new(big.Int).SetString("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 0)
theCurve.B, _ = new(big.Int).SetString("0x0000000000000000000000000000000000000000000000000000000000000007", 0)

View File

@ -1,27 +0,0 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,22 +0,0 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

View File

@ -1,66 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package sha3 implements the SHA-3 fixed-output-length hash functions and
// the SHAKE variable-output-length hash functions defined by FIPS-202.
//
// Both types of hash function use the "sponge" construction and the Keccak
// permutation. For a detailed specification see http://keccak.noekeon.org/
//
//
// Guidance
//
// If you aren't sure what function you need, use SHAKE256 with at least 64
// bytes of output. The SHAKE instances are faster than the SHA3 instances;
// the latter have to allocate memory to conform to the hash.Hash interface.
//
// If you need a secret-key MAC (message authentication code), prepend the
// secret key to the input, hash with SHAKE256 and read at least 32 bytes of
// output.
//
//
// Security strengths
//
// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security
// strength against preimage attacks of x bits. Since they only produce "x"
// bits of output, their collision-resistance is only "x/2" bits.
//
// The SHAKE-256 and -128 functions have a generic security strength of 256 and
// 128 bits against all attacks, provided that at least 2x bits of their output
// is used. Requesting more than 64 or 32 bytes of output, respectively, does
// not increase the collision-resistance of the SHAKE functions.
//
//
// The sponge construction
//
// A sponge builds a pseudo-random function from a public pseudo-random
// permutation, by applying the permutation to a state of "rate + capacity"
// bytes, but hiding "capacity" of the bytes.
//
// A sponge starts out with a zero state. To hash an input using a sponge, up
// to "rate" bytes of the input are XORed into the sponge's state. The sponge
// is then "full" and the permutation is applied to "empty" it. This process is
// repeated until all the input has been "absorbed". The input is then padded.
// The digest is "squeezed" from the sponge in the same way, except that output
// output is copied out instead of input being XORed in.
//
// A sponge is parameterized by its generic security strength, which is equal
// to half its capacity; capacity + rate is equal to the permutation's width.
// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means
// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2.
//
//
// Recommendations
//
// The SHAKE functions are recommended for most new uses. They can produce
// output of arbitrary length. SHAKE256, with an output length of at least
// 64 bytes, provides 256-bit security against all attacks. The Keccak team
// recommends it for most applications upgrading from SHA2-512. (NIST chose a
// much stronger, but much slower, sponge instance for SHA3-512.)
//
// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions.
// They produce output of the same length, with the same security strengths
// against all attacks. This means, in particular, that SHA3-256 only has
// 128-bit collision resistance, because its output length is 32 bytes.
package sha3

View File

@ -1,71 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha3
// This file provides functions for creating instances of the SHA-3
// and SHAKE hash functions, as well as utility functions for hashing
// bytes.
import (
"hash"
)
// NewKeccak256 creates a new Keccak-256 hash.
func NewKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} }
// NewKeccak512 creates a new Keccak-512 hash.
func NewKeccak512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x01} }
// New224 creates a new SHA3-224 hash.
// Its generic security strength is 224 bits against preimage attacks,
// and 112 bits against collision attacks.
func New224() hash.Hash { return &state{rate: 144, outputLen: 28, dsbyte: 0x06} }
// New256 creates a new SHA3-256 hash.
// Its generic security strength is 256 bits against preimage attacks,
// and 128 bits against collision attacks.
func New256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x06} }
// New384 creates a new SHA3-384 hash.
// Its generic security strength is 384 bits against preimage attacks,
// and 192 bits against collision attacks.
func New384() hash.Hash { return &state{rate: 104, outputLen: 48, dsbyte: 0x06} }
// New512 creates a new SHA3-512 hash.
// Its generic security strength is 512 bits against preimage attacks,
// and 256 bits against collision attacks.
func New512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x06} }
// Sum224 returns the SHA3-224 digest of the data.
func Sum224(data []byte) (digest [28]byte) {
h := New224()
h.Write(data)
h.Sum(digest[:0])
return
}
// Sum256 returns the SHA3-256 digest of the data.
func Sum256(data []byte) (digest [32]byte) {
h := New256()
h.Write(data)
h.Sum(digest[:0])
return
}
// Sum384 returns the SHA3-384 digest of the data.
func Sum384(data []byte) (digest [48]byte) {
h := New384()
h.Write(data)
h.Sum(digest[:0])
return
}
// Sum512 returns the SHA3-512 digest of the data.
func Sum512(data []byte) (digest [64]byte) {
h := New512()
h.Write(data)
h.Sum(digest[:0])
return
}

View File

@ -1,412 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64 appengine gccgo
package sha3
// rc stores the round constants for use in the ι step.
var rc = [24]uint64{
0x0000000000000001,
0x0000000000008082,
0x800000000000808A,
0x8000000080008000,
0x000000000000808B,
0x0000000080000001,
0x8000000080008081,
0x8000000000008009,
0x000000000000008A,
0x0000000000000088,
0x0000000080008009,
0x000000008000000A,
0x000000008000808B,
0x800000000000008B,
0x8000000000008089,
0x8000000000008003,
0x8000000000008002,
0x8000000000000080,
0x000000000000800A,
0x800000008000000A,
0x8000000080008081,
0x8000000000008080,
0x0000000080000001,
0x8000000080008008,
}
// keccakF1600 applies the Keccak permutation to a 1600b-wide
// state represented as a slice of 25 uint64s.
func keccakF1600(a *[25]uint64) {
// Implementation translated from Keccak-inplace.c
// in the keccak reference code.
var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64
for i := 0; i < 24; i += 4 {
// Combines the 5 steps in each round into 2 steps.
// Unrolls 4 rounds per loop and spreads some steps across rounds.
// Round 1
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
d0 = bc4 ^ (bc1<<1 | bc1>>63)
d1 = bc0 ^ (bc2<<1 | bc2>>63)
d2 = bc1 ^ (bc3<<1 | bc3>>63)
d3 = bc2 ^ (bc4<<1 | bc4>>63)
d4 = bc3 ^ (bc0<<1 | bc0>>63)
bc0 = a[0] ^ d0
t = a[6] ^ d1
bc1 = t<<44 | t>>(64-44)
t = a[12] ^ d2
bc2 = t<<43 | t>>(64-43)
t = a[18] ^ d3
bc3 = t<<21 | t>>(64-21)
t = a[24] ^ d4
bc4 = t<<14 | t>>(64-14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i]
a[6] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
a[18] = bc3 ^ (bc0 &^ bc4)
a[24] = bc4 ^ (bc1 &^ bc0)
t = a[10] ^ d0
bc2 = t<<3 | t>>(64-3)
t = a[16] ^ d1
bc3 = t<<45 | t>>(64-45)
t = a[22] ^ d2
bc4 = t<<61 | t>>(64-61)
t = a[3] ^ d3
bc0 = t<<28 | t>>(64-28)
t = a[9] ^ d4
bc1 = t<<20 | t>>(64-20)
a[10] = bc0 ^ (bc2 &^ bc1)
a[16] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
a[3] = bc3 ^ (bc0 &^ bc4)
a[9] = bc4 ^ (bc1 &^ bc0)
t = a[20] ^ d0
bc4 = t<<18 | t>>(64-18)
t = a[1] ^ d1
bc0 = t<<1 | t>>(64-1)
t = a[7] ^ d2
bc1 = t<<6 | t>>(64-6)
t = a[13] ^ d3
bc2 = t<<25 | t>>(64-25)
t = a[19] ^ d4
bc3 = t<<8 | t>>(64-8)
a[20] = bc0 ^ (bc2 &^ bc1)
a[1] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
a[13] = bc3 ^ (bc0 &^ bc4)
a[19] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
bc1 = t<<36 | t>>(64-36)
t = a[11] ^ d1
bc2 = t<<10 | t>>(64-10)
t = a[17] ^ d2
bc3 = t<<15 | t>>(64-15)
t = a[23] ^ d3
bc4 = t<<56 | t>>(64-56)
t = a[4] ^ d4
bc0 = t<<27 | t>>(64-27)
a[5] = bc0 ^ (bc2 &^ bc1)
a[11] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
a[23] = bc3 ^ (bc0 &^ bc4)
a[4] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
bc3 = t<<41 | t>>(64-41)
t = a[21] ^ d1
bc4 = t<<2 | t>>(64-2)
t = a[2] ^ d2
bc0 = t<<62 | t>>(64-62)
t = a[8] ^ d3
bc1 = t<<55 | t>>(64-55)
t = a[14] ^ d4
bc2 = t<<39 | t>>(64-39)
a[15] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
a[8] = bc3 ^ (bc0 &^ bc4)
a[14] = bc4 ^ (bc1 &^ bc0)
// Round 2
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
d0 = bc4 ^ (bc1<<1 | bc1>>63)
d1 = bc0 ^ (bc2<<1 | bc2>>63)
d2 = bc1 ^ (bc3<<1 | bc3>>63)
d3 = bc2 ^ (bc4<<1 | bc4>>63)
d4 = bc3 ^ (bc0<<1 | bc0>>63)
bc0 = a[0] ^ d0
t = a[16] ^ d1
bc1 = t<<44 | t>>(64-44)
t = a[7] ^ d2
bc2 = t<<43 | t>>(64-43)
t = a[23] ^ d3
bc3 = t<<21 | t>>(64-21)
t = a[14] ^ d4
bc4 = t<<14 | t>>(64-14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1]
a[16] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
a[23] = bc3 ^ (bc0 &^ bc4)
a[14] = bc4 ^ (bc1 &^ bc0)
t = a[20] ^ d0
bc2 = t<<3 | t>>(64-3)
t = a[11] ^ d1
bc3 = t<<45 | t>>(64-45)
t = a[2] ^ d2
bc4 = t<<61 | t>>(64-61)
t = a[18] ^ d3
bc0 = t<<28 | t>>(64-28)
t = a[9] ^ d4
bc1 = t<<20 | t>>(64-20)
a[20] = bc0 ^ (bc2 &^ bc1)
a[11] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
a[18] = bc3 ^ (bc0 &^ bc4)
a[9] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
bc4 = t<<18 | t>>(64-18)
t = a[6] ^ d1
bc0 = t<<1 | t>>(64-1)
t = a[22] ^ d2
bc1 = t<<6 | t>>(64-6)
t = a[13] ^ d3
bc2 = t<<25 | t>>(64-25)
t = a[4] ^ d4
bc3 = t<<8 | t>>(64-8)
a[15] = bc0 ^ (bc2 &^ bc1)
a[6] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
a[13] = bc3 ^ (bc0 &^ bc4)
a[4] = bc4 ^ (bc1 &^ bc0)
t = a[10] ^ d0
bc1 = t<<36 | t>>(64-36)
t = a[1] ^ d1
bc2 = t<<10 | t>>(64-10)
t = a[17] ^ d2
bc3 = t<<15 | t>>(64-15)
t = a[8] ^ d3
bc4 = t<<56 | t>>(64-56)
t = a[24] ^ d4
bc0 = t<<27 | t>>(64-27)
a[10] = bc0 ^ (bc2 &^ bc1)
a[1] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
a[8] = bc3 ^ (bc0 &^ bc4)
a[24] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
bc3 = t<<41 | t>>(64-41)
t = a[21] ^ d1
bc4 = t<<2 | t>>(64-2)
t = a[12] ^ d2
bc0 = t<<62 | t>>(64-62)
t = a[3] ^ d3
bc1 = t<<55 | t>>(64-55)
t = a[19] ^ d4
bc2 = t<<39 | t>>(64-39)
a[5] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
a[3] = bc3 ^ (bc0 &^ bc4)
a[19] = bc4 ^ (bc1 &^ bc0)
// Round 3
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
d0 = bc4 ^ (bc1<<1 | bc1>>63)
d1 = bc0 ^ (bc2<<1 | bc2>>63)
d2 = bc1 ^ (bc3<<1 | bc3>>63)
d3 = bc2 ^ (bc4<<1 | bc4>>63)
d4 = bc3 ^ (bc0<<1 | bc0>>63)
bc0 = a[0] ^ d0
t = a[11] ^ d1
bc1 = t<<44 | t>>(64-44)
t = a[22] ^ d2
bc2 = t<<43 | t>>(64-43)
t = a[8] ^ d3
bc3 = t<<21 | t>>(64-21)
t = a[19] ^ d4
bc4 = t<<14 | t>>(64-14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2]
a[11] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
a[8] = bc3 ^ (bc0 &^ bc4)
a[19] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
bc2 = t<<3 | t>>(64-3)
t = a[1] ^ d1
bc3 = t<<45 | t>>(64-45)
t = a[12] ^ d2
bc4 = t<<61 | t>>(64-61)
t = a[23] ^ d3
bc0 = t<<28 | t>>(64-28)
t = a[9] ^ d4
bc1 = t<<20 | t>>(64-20)
a[15] = bc0 ^ (bc2 &^ bc1)
a[1] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
a[23] = bc3 ^ (bc0 &^ bc4)
a[9] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
bc4 = t<<18 | t>>(64-18)
t = a[16] ^ d1
bc0 = t<<1 | t>>(64-1)
t = a[2] ^ d2
bc1 = t<<6 | t>>(64-6)
t = a[13] ^ d3
bc2 = t<<25 | t>>(64-25)
t = a[24] ^ d4
bc3 = t<<8 | t>>(64-8)
a[5] = bc0 ^ (bc2 &^ bc1)
a[16] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
a[13] = bc3 ^ (bc0 &^ bc4)
a[24] = bc4 ^ (bc1 &^ bc0)
t = a[20] ^ d0
bc1 = t<<36 | t>>(64-36)
t = a[6] ^ d1
bc2 = t<<10 | t>>(64-10)
t = a[17] ^ d2
bc3 = t<<15 | t>>(64-15)
t = a[3] ^ d3
bc4 = t<<56 | t>>(64-56)
t = a[14] ^ d4
bc0 = t<<27 | t>>(64-27)
a[20] = bc0 ^ (bc2 &^ bc1)
a[6] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
a[3] = bc3 ^ (bc0 &^ bc4)
a[14] = bc4 ^ (bc1 &^ bc0)
t = a[10] ^ d0
bc3 = t<<41 | t>>(64-41)
t = a[21] ^ d1
bc4 = t<<2 | t>>(64-2)
t = a[7] ^ d2
bc0 = t<<62 | t>>(64-62)
t = a[18] ^ d3
bc1 = t<<55 | t>>(64-55)
t = a[4] ^ d4
bc2 = t<<39 | t>>(64-39)
a[10] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
a[18] = bc3 ^ (bc0 &^ bc4)
a[4] = bc4 ^ (bc1 &^ bc0)
// Round 4
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
d0 = bc4 ^ (bc1<<1 | bc1>>63)
d1 = bc0 ^ (bc2<<1 | bc2>>63)
d2 = bc1 ^ (bc3<<1 | bc3>>63)
d3 = bc2 ^ (bc4<<1 | bc4>>63)
d4 = bc3 ^ (bc0<<1 | bc0>>63)
bc0 = a[0] ^ d0
t = a[1] ^ d1
bc1 = t<<44 | t>>(64-44)
t = a[2] ^ d2
bc2 = t<<43 | t>>(64-43)
t = a[3] ^ d3
bc3 = t<<21 | t>>(64-21)
t = a[4] ^ d4
bc4 = t<<14 | t>>(64-14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3]
a[1] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
a[3] = bc3 ^ (bc0 &^ bc4)
a[4] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
bc2 = t<<3 | t>>(64-3)
t = a[6] ^ d1
bc3 = t<<45 | t>>(64-45)
t = a[7] ^ d2
bc4 = t<<61 | t>>(64-61)
t = a[8] ^ d3
bc0 = t<<28 | t>>(64-28)
t = a[9] ^ d4
bc1 = t<<20 | t>>(64-20)
a[5] = bc0 ^ (bc2 &^ bc1)
a[6] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
a[8] = bc3 ^ (bc0 &^ bc4)
a[9] = bc4 ^ (bc1 &^ bc0)
t = a[10] ^ d0
bc4 = t<<18 | t>>(64-18)
t = a[11] ^ d1
bc0 = t<<1 | t>>(64-1)
t = a[12] ^ d2
bc1 = t<<6 | t>>(64-6)
t = a[13] ^ d3
bc2 = t<<25 | t>>(64-25)
t = a[14] ^ d4
bc3 = t<<8 | t>>(64-8)
a[10] = bc0 ^ (bc2 &^ bc1)
a[11] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
a[13] = bc3 ^ (bc0 &^ bc4)
a[14] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
bc1 = t<<36 | t>>(64-36)
t = a[16] ^ d1
bc2 = t<<10 | t>>(64-10)
t = a[17] ^ d2
bc3 = t<<15 | t>>(64-15)
t = a[18] ^ d3
bc4 = t<<56 | t>>(64-56)
t = a[19] ^ d4
bc0 = t<<27 | t>>(64-27)
a[15] = bc0 ^ (bc2 &^ bc1)
a[16] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
a[18] = bc3 ^ (bc0 &^ bc4)
a[19] = bc4 ^ (bc1 &^ bc0)
t = a[20] ^ d0
bc3 = t<<41 | t>>(64-41)
t = a[21] ^ d1
bc4 = t<<2 | t>>(64-2)
t = a[22] ^ d2
bc0 = t<<62 | t>>(64-62)
t = a[23] ^ d3
bc1 = t<<55 | t>>(64-55)
t = a[24] ^ d4
bc2 = t<<39 | t>>(64-39)
a[20] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
a[23] = bc3 ^ (bc0 &^ bc4)
a[24] = bc4 ^ (bc1 &^ bc0)
}
}

View File

@ -1,13 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64,!appengine,!gccgo
package sha3
// This function is implemented in keccakf_amd64.s.
//go:noescape
func keccakF1600(state *[25]uint64)

View File

@ -1,390 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64,!appengine,!gccgo
// This code was translated into a form compatible with 6a from the public
// domain sources at https://github.com/gvanas/KeccakCodePackage
// Offsets in state
#define _ba (0*8)
#define _be (1*8)
#define _bi (2*8)
#define _bo (3*8)
#define _bu (4*8)
#define _ga (5*8)
#define _ge (6*8)
#define _gi (7*8)
#define _go (8*8)
#define _gu (9*8)
#define _ka (10*8)
#define _ke (11*8)
#define _ki (12*8)
#define _ko (13*8)
#define _ku (14*8)
#define _ma (15*8)
#define _me (16*8)
#define _mi (17*8)
#define _mo (18*8)
#define _mu (19*8)
#define _sa (20*8)
#define _se (21*8)
#define _si (22*8)
#define _so (23*8)
#define _su (24*8)
// Temporary registers
#define rT1 AX
// Round vars
#define rpState DI
#define rpStack SP
#define rDa BX
#define rDe CX
#define rDi DX
#define rDo R8
#define rDu R9
#define rBa R10
#define rBe R11
#define rBi R12
#define rBo R13
#define rBu R14
#define rCa SI
#define rCe BP
#define rCi rBi
#define rCo rBo
#define rCu R15
#define MOVQ_RBI_RCE MOVQ rBi, rCe
#define XORQ_RT1_RCA XORQ rT1, rCa
#define XORQ_RT1_RCE XORQ rT1, rCe
#define XORQ_RBA_RCU XORQ rBa, rCu
#define XORQ_RBE_RCU XORQ rBe, rCu
#define XORQ_RDU_RCU XORQ rDu, rCu
#define XORQ_RDA_RCA XORQ rDa, rCa
#define XORQ_RDE_RCE XORQ rDe, rCe
#define mKeccakRound(iState, oState, rc, B_RBI_RCE, G_RT1_RCA, G_RT1_RCE, G_RBA_RCU, K_RT1_RCA, K_RT1_RCE, K_RBA_RCU, M_RT1_RCA, M_RT1_RCE, M_RBE_RCU, S_RDU_RCU, S_RDA_RCA, S_RDE_RCE) \
/* Prepare round */ \
MOVQ rCe, rDa; \
ROLQ $1, rDa; \
\
MOVQ _bi(iState), rCi; \
XORQ _gi(iState), rDi; \
XORQ rCu, rDa; \
XORQ _ki(iState), rCi; \
XORQ _mi(iState), rDi; \
XORQ rDi, rCi; \
\
MOVQ rCi, rDe; \
ROLQ $1, rDe; \
\
MOVQ _bo(iState), rCo; \
XORQ _go(iState), rDo; \
XORQ rCa, rDe; \
XORQ _ko(iState), rCo; \
XORQ _mo(iState), rDo; \
XORQ rDo, rCo; \
\
MOVQ rCo, rDi; \
ROLQ $1, rDi; \
\
MOVQ rCu, rDo; \
XORQ rCe, rDi; \
ROLQ $1, rDo; \
\
MOVQ rCa, rDu; \
XORQ rCi, rDo; \
ROLQ $1, rDu; \
\
/* Result b */ \
MOVQ _ba(iState), rBa; \
MOVQ _ge(iState), rBe; \
XORQ rCo, rDu; \
MOVQ _ki(iState), rBi; \
MOVQ _mo(iState), rBo; \
MOVQ _su(iState), rBu; \
XORQ rDe, rBe; \
ROLQ $44, rBe; \
XORQ rDi, rBi; \
XORQ rDa, rBa; \
ROLQ $43, rBi; \
\
MOVQ rBe, rCa; \
MOVQ rc, rT1; \
ORQ rBi, rCa; \
XORQ rBa, rT1; \
XORQ rT1, rCa; \
MOVQ rCa, _ba(oState); \
\
XORQ rDu, rBu; \
ROLQ $14, rBu; \
MOVQ rBa, rCu; \
ANDQ rBe, rCu; \
XORQ rBu, rCu; \
MOVQ rCu, _bu(oState); \
\
XORQ rDo, rBo; \
ROLQ $21, rBo; \
MOVQ rBo, rT1; \
ANDQ rBu, rT1; \
XORQ rBi, rT1; \
MOVQ rT1, _bi(oState); \
\
NOTQ rBi; \
ORQ rBa, rBu; \
ORQ rBo, rBi; \
XORQ rBo, rBu; \
XORQ rBe, rBi; \
MOVQ rBu, _bo(oState); \
MOVQ rBi, _be(oState); \
B_RBI_RCE; \
\
/* Result g */ \
MOVQ _gu(iState), rBe; \
XORQ rDu, rBe; \
MOVQ _ka(iState), rBi; \
ROLQ $20, rBe; \
XORQ rDa, rBi; \
ROLQ $3, rBi; \
MOVQ _bo(iState), rBa; \
MOVQ rBe, rT1; \
ORQ rBi, rT1; \
XORQ rDo, rBa; \
MOVQ _me(iState), rBo; \
MOVQ _si(iState), rBu; \
ROLQ $28, rBa; \
XORQ rBa, rT1; \
MOVQ rT1, _ga(oState); \
G_RT1_RCA; \
\
XORQ rDe, rBo; \
ROLQ $45, rBo; \
MOVQ rBi, rT1; \
ANDQ rBo, rT1; \
XORQ rBe, rT1; \
MOVQ rT1, _ge(oState); \
G_RT1_RCE; \
\
XORQ rDi, rBu; \
ROLQ $61, rBu; \
MOVQ rBu, rT1; \
ORQ rBa, rT1; \
XORQ rBo, rT1; \
MOVQ rT1, _go(oState); \
\
ANDQ rBe, rBa; \
XORQ rBu, rBa; \
MOVQ rBa, _gu(oState); \
NOTQ rBu; \
G_RBA_RCU; \
\
ORQ rBu, rBo; \
XORQ rBi, rBo; \
MOVQ rBo, _gi(oState); \
\
/* Result k */ \
MOVQ _be(iState), rBa; \
MOVQ _gi(iState), rBe; \
MOVQ _ko(iState), rBi; \
MOVQ _mu(iState), rBo; \
MOVQ _sa(iState), rBu; \
XORQ rDi, rBe; \
ROLQ $6, rBe; \
XORQ rDo, rBi; \
ROLQ $25, rBi; \
MOVQ rBe, rT1; \
ORQ rBi, rT1; \
XORQ rDe, rBa; \
ROLQ $1, rBa; \
XORQ rBa, rT1; \
MOVQ rT1, _ka(oState); \
K_RT1_RCA; \
\
XORQ rDu, rBo; \
ROLQ $8, rBo; \
MOVQ rBi, rT1; \
ANDQ rBo, rT1; \
XORQ rBe, rT1; \
MOVQ rT1, _ke(oState); \
K_RT1_RCE; \
\
XORQ rDa, rBu; \
ROLQ $18, rBu; \
NOTQ rBo; \
MOVQ rBo, rT1; \
ANDQ rBu, rT1; \
XORQ rBi, rT1; \
MOVQ rT1, _ki(oState); \
\
MOVQ rBu, rT1; \
ORQ rBa, rT1; \
XORQ rBo, rT1; \
MOVQ rT1, _ko(oState); \
\
ANDQ rBe, rBa; \
XORQ rBu, rBa; \
MOVQ rBa, _ku(oState); \
K_RBA_RCU; \
\
/* Result m */ \
MOVQ _ga(iState), rBe; \
XORQ rDa, rBe; \
MOVQ _ke(iState), rBi; \
ROLQ $36, rBe; \
XORQ rDe, rBi; \
MOVQ _bu(iState), rBa; \
ROLQ $10, rBi; \
MOVQ rBe, rT1; \
MOVQ _mi(iState), rBo; \
ANDQ rBi, rT1; \
XORQ rDu, rBa; \
MOVQ _so(iState), rBu; \
ROLQ $27, rBa; \
XORQ rBa, rT1; \
MOVQ rT1, _ma(oState); \
M_RT1_RCA; \
\
XORQ rDi, rBo; \
ROLQ $15, rBo; \
MOVQ rBi, rT1; \
ORQ rBo, rT1; \
XORQ rBe, rT1; \
MOVQ rT1, _me(oState); \
M_RT1_RCE; \
\
XORQ rDo, rBu; \
ROLQ $56, rBu; \
NOTQ rBo; \
MOVQ rBo, rT1; \
ORQ rBu, rT1; \
XORQ rBi, rT1; \
MOVQ rT1, _mi(oState); \
\
ORQ rBa, rBe; \
XORQ rBu, rBe; \
MOVQ rBe, _mu(oState); \
\
ANDQ rBa, rBu; \
XORQ rBo, rBu; \
MOVQ rBu, _mo(oState); \
M_RBE_RCU; \
\
/* Result s */ \
MOVQ _bi(iState), rBa; \
MOVQ _go(iState), rBe; \
MOVQ _ku(iState), rBi; \
XORQ rDi, rBa; \
MOVQ _ma(iState), rBo; \
ROLQ $62, rBa; \
XORQ rDo, rBe; \
MOVQ _se(iState), rBu; \
ROLQ $55, rBe; \
\
XORQ rDu, rBi; \
MOVQ rBa, rDu; \
XORQ rDe, rBu; \
ROLQ $2, rBu; \
ANDQ rBe, rDu; \
XORQ rBu, rDu; \
MOVQ rDu, _su(oState); \
\
ROLQ $39, rBi; \
S_RDU_RCU; \
NOTQ rBe; \
XORQ rDa, rBo; \
MOVQ rBe, rDa; \
ANDQ rBi, rDa; \
XORQ rBa, rDa; \
MOVQ rDa, _sa(oState); \
S_RDA_RCA; \
\
ROLQ $41, rBo; \
MOVQ rBi, rDe; \
ORQ rBo, rDe; \
XORQ rBe, rDe; \
MOVQ rDe, _se(oState); \
S_RDE_RCE; \
\
MOVQ rBo, rDi; \
MOVQ rBu, rDo; \
ANDQ rBu, rDi; \
ORQ rBa, rDo; \
XORQ rBi, rDi; \
XORQ rBo, rDo; \
MOVQ rDi, _si(oState); \
MOVQ rDo, _so(oState) \
// func keccakF1600(state *[25]uint64)
TEXT ·keccakF1600(SB), 0, $200-8
MOVQ state+0(FP), rpState
// Convert the user state into an internal state
NOTQ _be(rpState)
NOTQ _bi(rpState)
NOTQ _go(rpState)
NOTQ _ki(rpState)
NOTQ _mi(rpState)
NOTQ _sa(rpState)
// Execute the KeccakF permutation
MOVQ _ba(rpState), rCa
MOVQ _be(rpState), rCe
MOVQ _bu(rpState), rCu
XORQ _ga(rpState), rCa
XORQ _ge(rpState), rCe
XORQ _gu(rpState), rCu
XORQ _ka(rpState), rCa
XORQ _ke(rpState), rCe
XORQ _ku(rpState), rCu
XORQ _ma(rpState), rCa
XORQ _me(rpState), rCe
XORQ _mu(rpState), rCu
XORQ _sa(rpState), rCa
XORQ _se(rpState), rCe
MOVQ _si(rpState), rDi
MOVQ _so(rpState), rDo
XORQ _su(rpState), rCu
mKeccakRound(rpState, rpStack, $0x0000000000000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x0000000000008082, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x800000000000808a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000080008000, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x000000000000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000000008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x000000000000008a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x0000000000000088, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x0000000080008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x000000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x000000008000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x800000000000008b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x8000000000008089, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000000008003, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x8000000000008002, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000000000080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x000000000000800a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x800000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000000008080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000080008008, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP)
// Revert the internal state to the user state
NOTQ _be(rpState)
NOTQ _bi(rpState)
NOTQ _go(rpState)
NOTQ _ki(rpState)
NOTQ _mi(rpState)
NOTQ _sa(rpState)
RET

View File

@ -1,18 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.4
package sha3
import (
"crypto"
)
func init() {
crypto.RegisterHash(crypto.SHA3_224, New224)
crypto.RegisterHash(crypto.SHA3_256, New256)
crypto.RegisterHash(crypto.SHA3_384, New384)
crypto.RegisterHash(crypto.SHA3_512, New512)
}

View File

@ -1,192 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha3
// spongeDirection indicates the direction bytes are flowing through the sponge.
type spongeDirection int
const (
// spongeAbsorbing indicates that the sponge is absorbing input.
spongeAbsorbing spongeDirection = iota
// spongeSqueezing indicates that the sponge is being squeezed.
spongeSqueezing
)
const (
// maxRate is the maximum size of the internal buffer. SHAKE-256
// currently needs the largest buffer.
maxRate = 168
)
type state struct {
// Generic sponge components.
a [25]uint64 // main state of the hash
buf []byte // points into storage
rate int // the number of bytes of state to use
// dsbyte contains the "domain separation" bits and the first bit of
// the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the
// SHA-3 and SHAKE functions by appending bitstrings to the message.
// Using a little-endian bit-ordering convention, these are "01" for SHA-3
// and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the
// padding rule from section 5.1 is applied to pad the message to a multiple
// of the rate, which involves adding a "1" bit, zero or more "0" bits, and
// a final "1" bit. We merge the first "1" bit from the padding into dsbyte,
// giving 00000110b (0x06) and 00011111b (0x1f).
// [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf
// "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and
// Extendable-Output Functions (May 2014)"
dsbyte byte
storage [maxRate]byte
// Specific to SHA-3 and SHAKE.
outputLen int // the default output size in bytes
state spongeDirection // whether the sponge is absorbing or squeezing
}
// BlockSize returns the rate of sponge underlying this hash function.
func (d *state) BlockSize() int { return d.rate }
// Size returns the output size of the hash function in bytes.
func (d *state) Size() int { return d.outputLen }
// Reset clears the internal state by zeroing the sponge state and
// the byte buffer, and setting Sponge.state to absorbing.
func (d *state) Reset() {
// Zero the permutation's state.
for i := range d.a {
d.a[i] = 0
}
d.state = spongeAbsorbing
d.buf = d.storage[:0]
}
func (d *state) clone() *state {
ret := *d
if ret.state == spongeAbsorbing {
ret.buf = ret.storage[:len(ret.buf)]
} else {
ret.buf = ret.storage[d.rate-cap(d.buf) : d.rate]
}
return &ret
}
// permute applies the KeccakF-1600 permutation. It handles
// any input-output buffering.
func (d *state) permute() {
switch d.state {
case spongeAbsorbing:
// If we're absorbing, we need to xor the input into the state
// before applying the permutation.
xorIn(d, d.buf)
d.buf = d.storage[:0]
keccakF1600(&d.a)
case spongeSqueezing:
// If we're squeezing, we need to apply the permutatin before
// copying more output.
keccakF1600(&d.a)
d.buf = d.storage[:d.rate]
copyOut(d, d.buf)
}
}
// pads appends the domain separation bits in dsbyte, applies
// the multi-bitrate 10..1 padding rule, and permutes the state.
func (d *state) padAndPermute(dsbyte byte) {
if d.buf == nil {
d.buf = d.storage[:0]
}
// Pad with this instance's domain-separator bits. We know that there's
// at least one byte of space in d.buf because, if it were full,
// permute would have been called to empty it. dsbyte also contains the
// first one bit for the padding. See the comment in the state struct.
d.buf = append(d.buf, dsbyte)
zerosStart := len(d.buf)
d.buf = d.storage[:d.rate]
for i := zerosStart; i < d.rate; i++ {
d.buf[i] = 0
}
// This adds the final one bit for the padding. Because of the way that
// bits are numbered from the LSB upwards, the final bit is the MSB of
// the last byte.
d.buf[d.rate-1] ^= 0x80
// Apply the permutation
d.permute()
d.state = spongeSqueezing
d.buf = d.storage[:d.rate]
copyOut(d, d.buf)
}
// Write absorbs more data into the hash's state. It produces an error
// if more data is written to the ShakeHash after writing
func (d *state) Write(p []byte) (written int, err error) {
if d.state != spongeAbsorbing {
panic("sha3: write to sponge after read")
}
if d.buf == nil {
d.buf = d.storage[:0]
}
written = len(p)
for len(p) > 0 {
if len(d.buf) == 0 && len(p) >= d.rate {
// The fast path; absorb a full "rate" bytes of input and apply the permutation.
xorIn(d, p[:d.rate])
p = p[d.rate:]
keccakF1600(&d.a)
} else {
// The slow path; buffer the input until we can fill the sponge, and then xor it in.
todo := d.rate - len(d.buf)
if todo > len(p) {
todo = len(p)
}
d.buf = append(d.buf, p[:todo]...)
p = p[todo:]
// If the sponge is full, apply the permutation.
if len(d.buf) == d.rate {
d.permute()
}
}
}
return
}
// Read squeezes an arbitrary number of bytes from the sponge.
func (d *state) Read(out []byte) (n int, err error) {
// If we're still absorbing, pad and apply the permutation.
if d.state == spongeAbsorbing {
d.padAndPermute(d.dsbyte)
}
n = len(out)
// Now, do the squeezing.
for len(out) > 0 {
n := copy(out, d.buf)
d.buf = d.buf[n:]
out = out[n:]
// Apply the permutation if we've squeezed the sponge dry.
if len(d.buf) == 0 {
d.permute()
}
}
return
}
// Sum applies padding to the hash state and then squeezes out the desired
// number of output bytes.
func (d *state) Sum(in []byte) []byte {
// Make a copy of the original hash so that caller can keep writing
// and summing.
dup := d.clone()
hash := make([]byte, dup.outputLen)
dup.Read(hash)
return append(in, hash...)
}

View File

@ -1,60 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha3
// This file defines the ShakeHash interface, and provides
// functions for creating SHAKE instances, as well as utility
// functions for hashing bytes to arbitrary-length output.
import (
"io"
)
// ShakeHash defines the interface to hash functions that
// support arbitrary-length output.
type ShakeHash interface {
// Write absorbs more data into the hash's state. It panics if input is
// written to it after output has been read from it.
io.Writer
// Read reads more output from the hash; reading affects the hash's
// state. (ShakeHash.Read is thus very different from Hash.Sum)
// It never returns an error.
io.Reader
// Clone returns a copy of the ShakeHash in its current state.
Clone() ShakeHash
// Reset resets the ShakeHash to its initial state.
Reset()
}
func (d *state) Clone() ShakeHash {
return d.clone()
}
// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash.
// Its generic security strength is 128 bits against all attacks if at
// least 32 bytes of its output are used.
func NewShake128() ShakeHash { return &state{rate: 168, dsbyte: 0x1f} }
// NewShake256 creates a new SHAKE128 variable-output-length ShakeHash.
// Its generic security strength is 256 bits against all attacks if
// at least 64 bytes of its output are used.
func NewShake256() ShakeHash { return &state{rate: 136, dsbyte: 0x1f} }
// ShakeSum128 writes an arbitrary-length digest of data into hash.
func ShakeSum128(hash, data []byte) {
h := NewShake128()
h.Write(data)
h.Read(hash)
}
// ShakeSum256 writes an arbitrary-length digest of data into hash.
func ShakeSum256(hash, data []byte) {
h := NewShake256()
h.Write(data)
h.Read(hash)
}

View File

@ -1,16 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64,!386,!ppc64le appengine
package sha3
var (
xorIn = xorInGeneric
copyOut = copyOutGeneric
xorInUnaligned = xorInGeneric
copyOutUnaligned = copyOutGeneric
)
const xorImplementationUnaligned = "generic"

View File

@ -1,28 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha3
import "encoding/binary"
// xorInGeneric xors the bytes in buf into the state; it
// makes no non-portable assumptions about memory layout
// or alignment.
func xorInGeneric(d *state, buf []byte) {
n := len(buf) / 8
for i := 0; i < n; i++ {
a := binary.LittleEndian.Uint64(buf)
d.a[i] ^= a
buf = buf[8:]
}
}
// copyOutGeneric copies ulint64s to a byte buffer.
func copyOutGeneric(d *state, b []byte) {
for i := 0; len(b) >= 8; i++ {
binary.LittleEndian.PutUint64(b, d.a[i])
b = b[8:]
}
}

View File

@ -1,58 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64 386 ppc64le
// +build !appengine
package sha3
import "unsafe"
func xorInUnaligned(d *state, buf []byte) {
bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))
n := len(buf)
if n >= 72 {
d.a[0] ^= bw[0]
d.a[1] ^= bw[1]
d.a[2] ^= bw[2]
d.a[3] ^= bw[3]
d.a[4] ^= bw[4]
d.a[5] ^= bw[5]
d.a[6] ^= bw[6]
d.a[7] ^= bw[7]
d.a[8] ^= bw[8]
}
if n >= 104 {
d.a[9] ^= bw[9]
d.a[10] ^= bw[10]
d.a[11] ^= bw[11]
d.a[12] ^= bw[12]
}
if n >= 136 {
d.a[13] ^= bw[13]
d.a[14] ^= bw[14]
d.a[15] ^= bw[15]
d.a[16] ^= bw[16]
}
if n >= 144 {
d.a[17] ^= bw[17]
}
if n >= 168 {
d.a[18] ^= bw[18]
d.a[19] ^= bw[19]
d.a[20] ^= bw[20]
}
}
func copyOutUnaligned(d *state, buf []byte) {
ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0]))
copy(buf, ab[:])
}
var (
xorIn = xorInUnaligned
copyOut = copyOutUnaligned
)
const xorImplementationUnaligned = "unaligned"

View File

@ -214,7 +214,8 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl
log.Warn("Tracing failed", "hash", tx.Hash(), "block", task.block.NumberU64(), "err", err)
break
}
task.statedb.Finalise(true)
// Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect
task.statedb.Finalise(api.eth.blockchain.Config().IsEIP158(task.block.Number()))
task.results[i] = &txTraceResult{Result: res}
}
// Stream the result back to the user or abort on teardown
@ -506,7 +507,8 @@ func (api *PrivateDebugAPI) traceBlock(ctx context.Context, block *types.Block,
break
}
// Finalize the state so any modifications are written to the trie
statedb.Finalise(true)
// Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect
statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number()))
}
close(jobs)
pend.Wait()
@ -608,7 +610,8 @@ func (api *PrivateDebugAPI) standardTraceBlockToFile(ctx context.Context, block
return dumps, err
}
// Finalize the state so any modifications are written to the trie
statedb.Finalise(true)
// Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect
statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number()))
// If we've traced the transaction we were looking for, abort
if tx.Hash() == txHash {
@ -799,7 +802,8 @@ func (api *PrivateDebugAPI) computeTxEnv(blockHash common.Hash, txIndex int, ree
return nil, vm.Context{}, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err)
}
// Ensure any modifications are committed to the state
statedb.Finalise(true)
// Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect
statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number()))
}
return nil, vm.Context{}, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, blockHash)
}

View File

@ -143,8 +143,10 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
if !config.SkipBcVersionCheck {
bcVersion := rawdb.ReadDatabaseVersion(chainDb)
if bcVersion != core.BlockChainVersion && bcVersion != 0 {
return nil, fmt.Errorf("Blockchain DB version mismatch (%d / %d).\n", bcVersion, core.BlockChainVersion)
if bcVersion != nil && *bcVersion > core.BlockChainVersion {
return nil, fmt.Errorf("database version is v%d, Geth %s only supports v%d", *bcVersion, params.VersionWithMeta, core.BlockChainVersion)
} else if bcVersion != nil && *bcVersion < core.BlockChainVersion {
log.Warn("Upgrade blockchain database version", "from", *bcVersion, "to", core.BlockChainVersion)
}
rawdb.WriteDatabaseVersion(chainDb, core.BlockChainVersion)
}

View File

@ -1504,7 +1504,15 @@ func (d *Downloader) importBlockResults(results []*fetchResult) error {
blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
}
if index, err := d.blockchain.InsertChain(blocks); err != nil {
log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
if index < len(results) {
log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
} else {
// The InsertChain method in blockchain.go will sometimes return an out-of-bounds index,
// when it needs to preprocess blocks to import a sidechain.
// The importer will put together a new list of blocks to import, which is a superset
// of the blocks delivered from the downloader, and the indexing will be off.
log.Debug("Downloaded item processing failed on sidechain import", "index", index, "err", err)
}
return errInvalidChain
}
return nil

View File

@ -25,10 +25,10 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie"
"golang.org/x/crypto/sha3"
)
// stateReq represents a batch of state fetch requests grouped together into
@ -152,7 +152,7 @@ func (d *Downloader) runStateSync(s *stateSync) *stateSync {
finished = append(finished, req)
delete(active, pack.PeerId())
// Handle dropped peer connections:
// Handle dropped peer connections:
case p := <-peerDrop:
// Skip if no request is currently pending
req := active[p.id]
@ -240,7 +240,7 @@ func newStateSync(d *Downloader, root common.Hash) *stateSync {
return &stateSync{
d: d,
sched: state.NewStateSync(root, d.stateDB),
keccak: sha3.NewKeccak256(),
keccak: sha3.NewLegacyKeccak256(),
tasks: make(map[common.Hash]*stateTask),
deliver: make(chan *stateReq),
cancel: make(chan struct{}),
@ -398,9 +398,8 @@ func (s *stateSync) fillTasks(n int, req *stateReq) {
// process iterates over a batch of delivered state data, injecting each item
// into a running state sync, re-queuing any items that were requested but not
// delivered.
// Returns whether the peer actually managed to deliver anything of value,
// and any error that occurred
// delivered. Returns whether the peer actually managed to deliver anything of
// value, and any error that occurred.
func (s *stateSync) process(req *stateReq) (int, error) {
// Collect processing stats and update progress if valid data was received
duplicate, unexpected, successful := 0, 0, 0
@ -412,14 +411,12 @@ func (s *stateSync) process(req *stateReq) (int, error) {
}(time.Now())
// Iterate over all the delivered data and inject one-by-one into the trie
progress := false
for _, blob := range req.response {
prog, hash, err := s.processNodeData(blob)
_, hash, err := s.processNodeData(blob)
switch err {
case nil:
s.numUncommitted++
s.bytesUncommitted += len(blob)
progress = progress || prog
successful++
case trie.ErrNotRequested:
unexpected++

File diff suppressed because one or more lines are too long

View File

@ -38,7 +38,7 @@
var op = log.op.toString();
}
// If a new contract is being created, add to the call stack
if (syscall && op == 'CREATE') {
if (syscall && (op == 'CREATE' || op == "CREATE2")) {
var inOff = log.stack.peek(1).valueOf();
var inEnd = inOff + log.stack.peek(2).valueOf();
@ -116,7 +116,7 @@
// Pop off the last call and get the execution results
var call = this.callstack.pop();
if (call.type == 'CREATE') {
if (call.type == 'CREATE' || call.type == "CREATE2") {
// If the call was a CREATE, retrieve the contract address and output code
call.gasUsed = '0x' + bigInt(call.gasIn - call.gasCost - log.getGas()).toString(16);
delete call.gasIn; delete call.gasCost;

View File

@ -86,6 +86,14 @@
var from = log.contract.getAddress();
this.lookupAccount(toContract(from, db.getNonce(from)), db);
break;
case "CREATE2":
var from = log.contract.getAddress();
// stack: salt, size, offset, endowment
var offset = log.stack.peek(1).valueOf()
var size = log.stack.peek(2).valueOf()
var end = offset + size
this.lookupAccount(toContract2(from, log.stack.peek(3).toString(16), log.memory.slice(offset, end)), db);
break;
case "CALL": case "CALLCODE": case "DELEGATECALL": case "STATICCALL":
this.lookupAccount(toAddress(log.stack.peek(1).toString(16)), db);
break;

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:generate go-bindata -nometadata -o assets.go -pkg tracers -ignore ((tracers)|(assets)).go ./...
//go:generate go-bindata -nometadata -o assets.go -pkg tracers -ignore tracers.go -ignore assets.go ./...
//go:generate gofmt -s -w assets.go
// Package tracers contains the actual JavaScript tracer assets.

View File

@ -367,6 +367,28 @@ func New(code string) (*Tracer, error) {
copy(makeSlice(ctx.PushFixedBuffer(20), 20), contract[:])
return 1
})
tracer.vm.PushGlobalGoFunction("toContract2", func(ctx *duktape.Context) int {
var from common.Address
if ptr, size := ctx.GetBuffer(-3); ptr != nil {
from = common.BytesToAddress(makeSlice(ptr, size))
} else {
from = common.HexToAddress(ctx.GetString(-3))
}
// Retrieve salt hex string from js stack
salt := common.HexToHash(ctx.GetString(-2))
// Retrieve code slice from js stack
var code []byte
if ptr, size := ctx.GetBuffer(-1); ptr != nil {
code = common.CopyBytes(makeSlice(ptr, size))
} else {
code = common.FromHex(ctx.GetString(-1))
}
codeHash := crypto.Keccak256(code)
ctx.Pop3()
contract := crypto.CreateAddress2(from, salt, codeHash)
copy(makeSlice(ctx.PushFixedBuffer(20), 20), contract[:])
return 1
})
tracer.vm.PushGlobalGoFunction("isPrecompiled", func(ctx *duktape.Context) int {
_, ok := vm.PrecompiledContractsByzantium[common.BytesToAddress(popSlice(ctx))]
ctx.PushBoolean(ok)

View File

@ -0,0 +1,527 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package ethclient provides a client for the Ethereum RPC API.
package ethclient
import (
"context"
"encoding/json"
"errors"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
)
// Client defines typed wrappers for the Ethereum RPC API.
type Client struct {
c *rpc.Client
}
// Dial connects a client to the given URL.
func Dial(rawurl string) (*Client, error) {
return DialContext(context.Background(), rawurl)
}
func DialContext(ctx context.Context, rawurl string) (*Client, error) {
c, err := rpc.DialContext(ctx, rawurl)
if err != nil {
return nil, err
}
return NewClient(c), nil
}
// NewClient creates a client that uses the given RPC client.
func NewClient(c *rpc.Client) *Client {
return &Client{c}
}
func (ec *Client) Close() {
ec.c.Close()
}
// Blockchain Access
// BlockByHash returns the given full block.
//
// Note that loading full blocks requires two requests. Use HeaderByHash
// if you don't need all transactions or uncle headers.
func (ec *Client) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) {
return ec.getBlock(ctx, "eth_getBlockByHash", hash, true)
}
// BlockByNumber returns a block from the current canonical chain. If number is nil, the
// latest known block is returned.
//
// Note that loading full blocks requires two requests. Use HeaderByNumber
// if you don't need all transactions or uncle headers.
func (ec *Client) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) {
return ec.getBlock(ctx, "eth_getBlockByNumber", toBlockNumArg(number), true)
}
type rpcBlock struct {
Hash common.Hash `json:"hash"`
Transactions []rpcTransaction `json:"transactions"`
UncleHashes []common.Hash `json:"uncles"`
}
func (ec *Client) getBlock(ctx context.Context, method string, args ...interface{}) (*types.Block, error) {
var raw json.RawMessage
err := ec.c.CallContext(ctx, &raw, method, args...)
if err != nil {
return nil, err
} else if len(raw) == 0 {
return nil, ethereum.NotFound
}
// Decode header and transactions.
var head *types.Header
var body rpcBlock
if err := json.Unmarshal(raw, &head); err != nil {
return nil, err
}
if err := json.Unmarshal(raw, &body); err != nil {
return nil, err
}
// Quick-verify transaction and uncle lists. This mostly helps with debugging the server.
if head.UncleHash == types.EmptyUncleHash && len(body.UncleHashes) > 0 {
return nil, fmt.Errorf("server returned non-empty uncle list but block header indicates no uncles")
}
if head.UncleHash != types.EmptyUncleHash && len(body.UncleHashes) == 0 {
return nil, fmt.Errorf("server returned empty uncle list but block header indicates uncles")
}
if head.TxHash == types.EmptyRootHash && len(body.Transactions) > 0 {
return nil, fmt.Errorf("server returned non-empty transaction list but block header indicates no transactions")
}
if head.TxHash != types.EmptyRootHash && len(body.Transactions) == 0 {
return nil, fmt.Errorf("server returned empty transaction list but block header indicates transactions")
}
// Load uncles because they are not included in the block response.
var uncles []*types.Header
if len(body.UncleHashes) > 0 {
uncles = make([]*types.Header, len(body.UncleHashes))
reqs := make([]rpc.BatchElem, len(body.UncleHashes))
for i := range reqs {
reqs[i] = rpc.BatchElem{
Method: "eth_getUncleByBlockHashAndIndex",
Args: []interface{}{body.Hash, hexutil.EncodeUint64(uint64(i))},
Result: &uncles[i],
}
}
if err := ec.c.BatchCallContext(ctx, reqs); err != nil {
return nil, err
}
for i := range reqs {
if reqs[i].Error != nil {
return nil, reqs[i].Error
}
if uncles[i] == nil {
return nil, fmt.Errorf("got null header for uncle %d of block %x", i, body.Hash[:])
}
}
}
// Fill the sender cache of transactions in the block.
txs := make([]*types.Transaction, len(body.Transactions))
for i, tx := range body.Transactions {
if tx.From != nil {
setSenderFromServer(tx.tx, *tx.From, body.Hash)
}
txs[i] = tx.tx
}
return types.NewBlockWithHeader(head).WithBody(txs, uncles), nil
}
// HeaderByHash returns the block header with the given hash.
func (ec *Client) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
var head *types.Header
err := ec.c.CallContext(ctx, &head, "eth_getBlockByHash", hash, false)
if err == nil && head == nil {
err = ethereum.NotFound
}
return head, err
}
// HeaderByNumber returns a block header from the current canonical chain. If number is
// nil, the latest known header is returned.
func (ec *Client) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) {
var head *types.Header
err := ec.c.CallContext(ctx, &head, "eth_getBlockByNumber", toBlockNumArg(number), false)
if err == nil && head == nil {
err = ethereum.NotFound
}
return head, err
}
type rpcTransaction struct {
tx *types.Transaction
txExtraInfo
}
type txExtraInfo struct {
BlockNumber *string `json:"blockNumber,omitempty"`
BlockHash *common.Hash `json:"blockHash,omitempty"`
From *common.Address `json:"from,omitempty"`
}
func (tx *rpcTransaction) UnmarshalJSON(msg []byte) error {
if err := json.Unmarshal(msg, &tx.tx); err != nil {
return err
}
return json.Unmarshal(msg, &tx.txExtraInfo)
}
// TransactionByHash returns the transaction with the given hash.
func (ec *Client) TransactionByHash(ctx context.Context, hash common.Hash) (tx *types.Transaction, isPending bool, err error) {
var json *rpcTransaction
err = ec.c.CallContext(ctx, &json, "eth_getTransactionByHash", hash)
if err != nil {
return nil, false, err
} else if json == nil {
return nil, false, ethereum.NotFound
} else if _, r, _ := json.tx.RawSignatureValues(); r == nil {
return nil, false, fmt.Errorf("server returned transaction without signature")
}
if json.From != nil && json.BlockHash != nil {
setSenderFromServer(json.tx, *json.From, *json.BlockHash)
}
return json.tx, json.BlockNumber == nil, nil
}
// TransactionSender returns the sender address of the given transaction. The transaction
// must be known to the remote node and included in the blockchain at the given block and
// index. The sender is the one derived by the protocol at the time of inclusion.
//
// There is a fast-path for transactions retrieved by TransactionByHash and
// TransactionInBlock. Getting their sender address can be done without an RPC interaction.
func (ec *Client) TransactionSender(ctx context.Context, tx *types.Transaction, block common.Hash, index uint) (common.Address, error) {
// Try to load the address from the cache.
sender, err := types.Sender(&senderFromServer{blockhash: block}, tx)
if err == nil {
return sender, nil
}
var meta struct {
Hash common.Hash
From common.Address
}
if err = ec.c.CallContext(ctx, &meta, "eth_getTransactionByBlockHashAndIndex", block, hexutil.Uint64(index)); err != nil {
return common.Address{}, err
}
if meta.Hash == (common.Hash{}) || meta.Hash != tx.Hash() {
return common.Address{}, errors.New("wrong inclusion block/index")
}
return meta.From, nil
}
// TransactionCount returns the total number of transactions in the given block.
func (ec *Client) TransactionCount(ctx context.Context, blockHash common.Hash) (uint, error) {
var num hexutil.Uint
err := ec.c.CallContext(ctx, &num, "eth_getBlockTransactionCountByHash", blockHash)
return uint(num), err
}
// TransactionInBlock returns a single transaction at index in the given block.
func (ec *Client) TransactionInBlock(ctx context.Context, blockHash common.Hash, index uint) (*types.Transaction, error) {
var json *rpcTransaction
err := ec.c.CallContext(ctx, &json, "eth_getTransactionByBlockHashAndIndex", blockHash, hexutil.Uint64(index))
if err == nil {
if json == nil {
return nil, ethereum.NotFound
} else if _, r, _ := json.tx.RawSignatureValues(); r == nil {
return nil, fmt.Errorf("server returned transaction without signature")
}
}
if json.From != nil && json.BlockHash != nil {
setSenderFromServer(json.tx, *json.From, *json.BlockHash)
}
return json.tx, err
}
// TransactionReceipt returns the receipt of a transaction by transaction hash.
// Note that the receipt is not available for pending transactions.
func (ec *Client) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) {
var r *types.Receipt
err := ec.c.CallContext(ctx, &r, "eth_getTransactionReceipt", txHash)
if err == nil {
if r == nil {
return nil, ethereum.NotFound
}
}
return r, err
}
func toBlockNumArg(number *big.Int) string {
if number == nil {
return "latest"
}
return hexutil.EncodeBig(number)
}
type rpcProgress struct {
StartingBlock hexutil.Uint64
CurrentBlock hexutil.Uint64
HighestBlock hexutil.Uint64
PulledStates hexutil.Uint64
KnownStates hexutil.Uint64
}
// SyncProgress retrieves the current progress of the sync algorithm. If there's
// no sync currently running, it returns nil.
func (ec *Client) SyncProgress(ctx context.Context) (*ethereum.SyncProgress, error) {
var raw json.RawMessage
if err := ec.c.CallContext(ctx, &raw, "eth_syncing"); err != nil {
return nil, err
}
// Handle the possible response types
var syncing bool
if err := json.Unmarshal(raw, &syncing); err == nil {
return nil, nil // Not syncing (always false)
}
var progress *rpcProgress
if err := json.Unmarshal(raw, &progress); err != nil {
return nil, err
}
return &ethereum.SyncProgress{
StartingBlock: uint64(progress.StartingBlock),
CurrentBlock: uint64(progress.CurrentBlock),
HighestBlock: uint64(progress.HighestBlock),
PulledStates: uint64(progress.PulledStates),
KnownStates: uint64(progress.KnownStates),
}, nil
}
// SubscribeNewHead subscribes to notifications about the current blockchain head
// on the given channel.
func (ec *Client) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) {
return ec.c.EthSubscribe(ctx, ch, "newHeads")
}
// State Access
// NetworkID returns the network ID (also known as the chain ID) for this chain.
func (ec *Client) NetworkID(ctx context.Context) (*big.Int, error) {
version := new(big.Int)
var ver string
if err := ec.c.CallContext(ctx, &ver, "net_version"); err != nil {
return nil, err
}
if _, ok := version.SetString(ver, 10); !ok {
return nil, fmt.Errorf("invalid net_version result %q", ver)
}
return version, nil
}
// BalanceAt returns the wei balance of the given account.
// The block number can be nil, in which case the balance is taken from the latest known block.
func (ec *Client) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error) {
var result hexutil.Big
err := ec.c.CallContext(ctx, &result, "eth_getBalance", account, toBlockNumArg(blockNumber))
return (*big.Int)(&result), err
}
// StorageAt returns the value of key in the contract storage of the given account.
// The block number can be nil, in which case the value is taken from the latest known block.
func (ec *Client) StorageAt(ctx context.Context, account common.Address, key common.Hash, blockNumber *big.Int) ([]byte, error) {
var result hexutil.Bytes
err := ec.c.CallContext(ctx, &result, "eth_getStorageAt", account, key, toBlockNumArg(blockNumber))
return result, err
}
// CodeAt returns the contract code of the given account.
// The block number can be nil, in which case the code is taken from the latest known block.
func (ec *Client) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) {
var result hexutil.Bytes
err := ec.c.CallContext(ctx, &result, "eth_getCode", account, toBlockNumArg(blockNumber))
return result, err
}
// NonceAt returns the account nonce of the given account.
// The block number can be nil, in which case the nonce is taken from the latest known block.
func (ec *Client) NonceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (uint64, error) {
var result hexutil.Uint64
err := ec.c.CallContext(ctx, &result, "eth_getTransactionCount", account, toBlockNumArg(blockNumber))
return uint64(result), err
}
// Filters
// FilterLogs executes a filter query.
func (ec *Client) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) {
var result []types.Log
arg, err := toFilterArg(q)
if err != nil {
return nil, err
}
err = ec.c.CallContext(ctx, &result, "eth_getLogs", arg)
return result, err
}
// SubscribeFilterLogs subscribes to the results of a streaming filter query.
func (ec *Client) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) {
arg, err := toFilterArg(q)
if err != nil {
return nil, err
}
return ec.c.EthSubscribe(ctx, ch, "logs", arg)
}
func toFilterArg(q ethereum.FilterQuery) (interface{}, error) {
arg := map[string]interface{}{
"address": q.Addresses,
"topics": q.Topics,
}
if q.BlockHash != nil {
arg["blockHash"] = *q.BlockHash
if q.FromBlock != nil || q.ToBlock != nil {
return nil, fmt.Errorf("cannot specify both BlockHash and FromBlock/ToBlock")
}
} else {
if q.FromBlock == nil {
arg["fromBlock"] = "0x0"
} else {
arg["fromBlock"] = toBlockNumArg(q.FromBlock)
}
arg["toBlock"] = toBlockNumArg(q.ToBlock)
}
return arg, nil
}
// Pending State
// PendingBalanceAt returns the wei balance of the given account in the pending state.
func (ec *Client) PendingBalanceAt(ctx context.Context, account common.Address) (*big.Int, error) {
var result hexutil.Big
err := ec.c.CallContext(ctx, &result, "eth_getBalance", account, "pending")
return (*big.Int)(&result), err
}
// PendingStorageAt returns the value of key in the contract storage of the given account in the pending state.
func (ec *Client) PendingStorageAt(ctx context.Context, account common.Address, key common.Hash) ([]byte, error) {
var result hexutil.Bytes
err := ec.c.CallContext(ctx, &result, "eth_getStorageAt", account, key, "pending")
return result, err
}
// PendingCodeAt returns the contract code of the given account in the pending state.
func (ec *Client) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) {
var result hexutil.Bytes
err := ec.c.CallContext(ctx, &result, "eth_getCode", account, "pending")
return result, err
}
// PendingNonceAt returns the account nonce of the given account in the pending state.
// This is the nonce that should be used for the next transaction.
func (ec *Client) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) {
var result hexutil.Uint64
err := ec.c.CallContext(ctx, &result, "eth_getTransactionCount", account, "pending")
return uint64(result), err
}
// PendingTransactionCount returns the total number of transactions in the pending state.
func (ec *Client) PendingTransactionCount(ctx context.Context) (uint, error) {
var num hexutil.Uint
err := ec.c.CallContext(ctx, &num, "eth_getBlockTransactionCountByNumber", "pending")
return uint(num), err
}
// TODO: SubscribePendingTransactions (needs server side)
// Contract Calling
// CallContract executes a message call transaction, which is directly executed in the VM
// of the node, but never mined into the blockchain.
//
// blockNumber selects the block height at which the call runs. It can be nil, in which
// case the code is taken from the latest known block. Note that state from very old
// blocks might not be available.
func (ec *Client) CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
var hex hexutil.Bytes
err := ec.c.CallContext(ctx, &hex, "eth_call", toCallArg(msg), toBlockNumArg(blockNumber))
if err != nil {
return nil, err
}
return hex, nil
}
// PendingCallContract executes a message call transaction using the EVM.
// The state seen by the contract call is the pending state.
func (ec *Client) PendingCallContract(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) {
var hex hexutil.Bytes
err := ec.c.CallContext(ctx, &hex, "eth_call", toCallArg(msg), "pending")
if err != nil {
return nil, err
}
return hex, nil
}
// SuggestGasPrice retrieves the currently suggested gas price to allow a timely
// execution of a transaction.
func (ec *Client) SuggestGasPrice(ctx context.Context) (*big.Int, error) {
var hex hexutil.Big
if err := ec.c.CallContext(ctx, &hex, "eth_gasPrice"); err != nil {
return nil, err
}
return (*big.Int)(&hex), nil
}
// EstimateGas tries to estimate the gas needed to execute a specific transaction based on
// the current pending state of the backend blockchain. There is no guarantee that this is
// the true gas limit requirement as other transactions may be added or removed by miners,
// but it should provide a basis for setting a reasonable default.
func (ec *Client) EstimateGas(ctx context.Context, msg ethereum.CallMsg) (uint64, error) {
var hex hexutil.Uint64
err := ec.c.CallContext(ctx, &hex, "eth_estimateGas", toCallArg(msg))
if err != nil {
return 0, err
}
return uint64(hex), nil
}
// SendTransaction injects a signed transaction into the pending pool for execution.
//
// If the transaction was a contract creation use the TransactionReceipt method to get the
// contract address after the transaction has been mined.
func (ec *Client) SendTransaction(ctx context.Context, tx *types.Transaction) error {
data, err := rlp.EncodeToBytes(tx)
if err != nil {
return err
}
return ec.c.CallContext(ctx, nil, "eth_sendRawTransaction", common.ToHex(data))
}
func toCallArg(msg ethereum.CallMsg) interface{} {
arg := map[string]interface{}{
"from": msg.From,
"to": msg.To,
}
if len(msg.Data) > 0 {
arg["data"] = hexutil.Bytes(msg.Data)
}
if msg.Value != nil {
arg["value"] = (*hexutil.Big)(msg.Value)
}
if msg.Gas != 0 {
arg["gas"] = hexutil.Uint64(msg.Gas)
}
if msg.GasPrice != nil {
arg["gasPrice"] = (*hexutil.Big)(msg.GasPrice)
}
return arg
}

View File

@ -0,0 +1,59 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package ethclient
import (
"errors"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
// senderFromServer is a types.Signer that remembers the sender address returned by the RPC
// server. It is stored in the transaction's sender address cache to avoid an additional
// request in TransactionSender.
type senderFromServer struct {
addr common.Address
blockhash common.Hash
}
var errNotCached = errors.New("sender not cached")
func setSenderFromServer(tx *types.Transaction, addr common.Address, block common.Hash) {
// Use types.Sender for side-effect to store our signer into the cache.
types.Sender(&senderFromServer{addr, block}, tx)
}
func (s *senderFromServer) Equal(other types.Signer) bool {
os, ok := other.(*senderFromServer)
return ok && os.blockhash == s.blockhash
}
func (s *senderFromServer) Sender(tx *types.Transaction) (common.Address, error) {
if s.blockhash == (common.Hash{}) {
return common.Address{}, errNotCached
}
return s.addr, nil
}
func (s *senderFromServer) Hash(tx *types.Transaction) common.Hash {
panic("can't sign with senderFromServer")
}
func (s *senderFromServer) SignatureValues(tx *types.Transaction, sig []byte) (R, S, V *big.Int, err error) {
panic("can't sign with senderFromServer")
}

View File

@ -1082,6 +1082,15 @@ func (s *PublicTransactionPoolAPI) GetRawTransactionByBlockHashAndIndex(ctx cont
// GetTransactionCount returns the number of transactions the given address has sent for the given block number
func (s *PublicTransactionPoolAPI) GetTransactionCount(ctx context.Context, address common.Address, blockNr rpc.BlockNumber) (*hexutil.Uint64, error) {
// Ask transaction pool for the nonce which includes pending transactions
if blockNr == rpc.PendingBlockNumber {
nonce, err := s.b.GetPoolNonce(ctx, address)
if err != nil {
return nil, err
}
return (*hexutil.Uint64)(&nonce), nil
}
// Resolve block number and use its state to ask for the nonce
state, _, err := s.b.StateAndHeaderByNumber(ctx, blockNr)
if state == nil || err != nil {
return nil, err

View File

@ -43,13 +43,6 @@ var locationEnabled uint32
// padded to to aid in alignment.
var locationLength uint32
// fieldPadding is a global map with maximum field value lengths seen until now
// to allow padding log contexts in a bit smarter way.
var fieldPadding = make(map[string]int)
// fieldPaddingLock is a global mutex protecting the field padding map.
var fieldPaddingLock sync.RWMutex
type Format interface {
Format(r *Record) []byte
}
@ -168,20 +161,6 @@ func logfmt(buf *bytes.Buffer, ctx []interface{}, color int, term bool) {
if !ok {
k, v = errorKey, formatLogfmtValue(k, term)
}
// XXX: we should probably check that all of your key bytes aren't invalid
fieldPaddingLock.RLock()
padding := fieldPadding[k]
fieldPaddingLock.RUnlock()
length := utf8.RuneCountInString(v)
if padding < length {
padding = length
fieldPaddingLock.Lock()
fieldPadding[k] = padding
fieldPaddingLock.Unlock()
}
if color > 0 {
fmt.Fprintf(buf, "\x1b[%dm%s\x1b[0m=", color, k)
} else {
@ -189,9 +168,6 @@ func logfmt(buf *bytes.Buffer, ctx []interface{}, color int, term bool) {
buf.WriteByte('=')
}
buf.WriteString(v)
if i < len(ctx)-2 {
buf.Write(bytes.Repeat([]byte{' '}, padding-length))
}
}
buf.WriteByte('\n')
}

View File

@ -312,8 +312,9 @@ func (r *PrefixedRegistry) UnregisterAll() {
}
var (
DefaultRegistry = NewRegistry()
EphemeralRegistry = NewRegistry()
DefaultRegistry = NewRegistry()
EphemeralRegistry = NewRegistry()
AccountingRegistry = NewRegistry() // registry used in swarm
)
// Call the given function for each registered metric.

View File

@ -75,6 +75,24 @@ func (api *PrivateAdminAPI) RemovePeer(url string) (bool, error) {
return true, nil
}
// DeletePeer disconnects and deletes forcefully a remote node.
func (api *PrivateAdminAPI) DeletePeer(url string) (bool, error) {
// Make sure the server is running, fail otherwise
server := api.node.Server()
if server == nil {
return false, ErrNodeStopped
}
// Try to remove the url as a static peer and return
node, err := enode.ParseV4(url)
if err != nil {
return false, fmt.Errorf("invalid enode: %v", err)
}
if err := server.DeletePeer(node); err != nil {
return false, err
}
return true, nil
}
// AddTrustedPeer allows a remote node to always connect, even if slots are full
func (api *PrivateAdminAPI) AddTrustedPeer(url string) (bool, error) {
// Make sure the server is running, fail otherwise

View File

@ -33,7 +33,8 @@ import (
// The fields of Node may not be modified.
type node struct {
enode.Node
addedAt time.Time // time when the node was added to the table
addedAt time.Time // time when the node was added to the table
livenessChecks uint // how often liveness was checked
}
type encPubkey [64]byte

View File

@ -75,8 +75,10 @@ type Table struct {
net transport
refreshReq chan chan struct{}
initDone chan struct{}
closeReq chan struct{}
closed chan struct{}
closeOnce sync.Once
closeReq chan struct{}
closed chan struct{}
nodeAddedHook func(*node) // for testing
}
@ -180,16 +182,14 @@ func (tab *Table) ReadRandomNodes(buf []*enode.Node) (n int) {
// Close terminates the network listener and flushes the node database.
func (tab *Table) Close() {
if tab.net != nil {
tab.net.close()
}
select {
case <-tab.closed:
// already closed.
case tab.closeReq <- struct{}{}:
<-tab.closed // wait for refreshLoop to end.
}
tab.closeOnce.Do(func() {
if tab.net != nil {
tab.net.close()
}
// Wait for loop to end.
close(tab.closeReq)
<-tab.closed
})
}
// setFallbackNodes sets the initial points of contact. These nodes
@ -290,12 +290,16 @@ func (tab *Table) lookup(targetKey encPubkey, refreshIfEmpty bool) []*node {
// we have asked all closest nodes, stop the search
break
}
// wait for the next reply
for _, n := range <-reply {
if n != nil && !seen[n.ID()] {
seen[n.ID()] = true
result.push(n, bucketSize)
select {
case nodes := <-reply:
for _, n := range nodes {
if n != nil && !seen[n.ID()] {
seen[n.ID()] = true
result.push(n, bucketSize)
}
}
case <-tab.closeReq:
return nil // shutdown, no need to continue.
}
pendingQueries--
}
@ -303,24 +307,28 @@ func (tab *Table) lookup(targetKey encPubkey, refreshIfEmpty bool) []*node {
}
func (tab *Table) findnode(n *node, targetKey encPubkey, reply chan<- []*node) {
fails := tab.db.FindFails(n.ID())
fails := tab.db.FindFails(n.ID(), n.IP())
r, err := tab.net.findnode(n.ID(), n.addr(), targetKey)
if err != nil || len(r) == 0 {
if err == errClosed {
// Avoid recording failures on shutdown.
reply <- nil
return
} else if err != nil || len(r) == 0 {
fails++
tab.db.UpdateFindFails(n.ID(), fails)
tab.db.UpdateFindFails(n.ID(), n.IP(), fails)
log.Trace("Findnode failed", "id", n.ID(), "failcount", fails, "err", err)
if fails >= maxFindnodeFailures {
log.Trace("Too many findnode failures, dropping", "id", n.ID(), "failcount", fails)
tab.delete(n)
}
} else if fails > 0 {
tab.db.UpdateFindFails(n.ID(), fails-1)
tab.db.UpdateFindFails(n.ID(), n.IP(), fails-1)
}
// Grab as many nodes as possible. Some of them might not be alive anymore, but we'll
// just remove those again during revalidation.
for _, n := range r {
tab.add(n)
tab.addSeenNode(n)
}
reply <- r
}
@ -329,7 +337,7 @@ func (tab *Table) refresh() <-chan struct{} {
done := make(chan struct{})
select {
case tab.refreshReq <- done:
case <-tab.closed:
case <-tab.closeReq:
close(done)
}
return done
@ -433,9 +441,9 @@ func (tab *Table) loadSeedNodes() {
seeds = append(seeds, tab.nursery...)
for i := range seeds {
seed := seeds[i]
age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.LastPongReceived(seed.ID())) }}
age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.LastPongReceived(seed.ID(), seed.IP())) }}
log.Trace("Found seed node in database", "id", seed.ID(), "addr", seed.addr(), "age", age)
tab.add(seed)
tab.addSeenNode(seed)
}
}
@ -458,16 +466,17 @@ func (tab *Table) doRevalidate(done chan<- struct{}) {
b := tab.buckets[bi]
if err == nil {
// The node responded, move it to the front.
log.Debug("Revalidated node", "b", bi, "id", last.ID())
b.bump(last)
last.livenessChecks++
log.Debug("Revalidated node", "b", bi, "id", last.ID(), "checks", last.livenessChecks)
tab.bumpInBucket(b, last)
return
}
// No reply received, pick a replacement or delete the node if there aren't
// any replacements.
if r := tab.replace(b, last); r != nil {
log.Debug("Replaced dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "r", r.ID(), "rip", r.IP())
log.Debug("Replaced dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks, "r", r.ID(), "rip", r.IP())
} else {
log.Debug("Removed dead node", "b", bi, "id", last.ID(), "ip", last.IP())
log.Debug("Removed dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks)
}
}
@ -502,7 +511,7 @@ func (tab *Table) copyLiveNodes() {
now := time.Now()
for _, b := range &tab.buckets {
for _, n := range b.entries {
if now.Sub(n.addedAt) >= seedMinTableTime {
if n.livenessChecks > 0 && now.Sub(n.addedAt) >= seedMinTableTime {
tab.db.UpdateNode(unwrapNode(n))
}
}
@ -518,7 +527,9 @@ func (tab *Table) closest(target enode.ID, nresults int) *nodesByDistance {
close := &nodesByDistance{target: target}
for _, b := range &tab.buckets {
for _, n := range b.entries {
close.push(n, nresults)
if n.livenessChecks > 0 {
close.push(n, nresults)
}
}
}
return close
@ -540,12 +551,12 @@ func (tab *Table) bucket(id enode.ID) *bucket {
return tab.buckets[d-bucketMinDistance-1]
}
// add attempts to add the given node to its corresponding bucket. If the bucket has space
// available, adding the node succeeds immediately. Otherwise, the node is added if the
// least recently active node in the bucket does not respond to a ping packet.
// addSeenNode adds a node which may or may not be live to the end of a bucket. If the
// bucket has space available, adding the node succeeds immediately. Otherwise, the node is
// added to the replacements list.
//
// The caller must not hold tab.mutex.
func (tab *Table) add(n *node) {
func (tab *Table) addSeenNode(n *node) {
if n.ID() == tab.self().ID() {
return
}
@ -553,39 +564,67 @@ func (tab *Table) add(n *node) {
tab.mutex.Lock()
defer tab.mutex.Unlock()
b := tab.bucket(n.ID())
if !tab.bumpOrAdd(b, n) {
// Node is not in table. Add it to the replacement list.
if contains(b.entries, n.ID()) {
// Already in bucket, don't add.
return
}
if len(b.entries) >= bucketSize {
// Bucket full, maybe add as replacement.
tab.addReplacement(b, n)
return
}
if !tab.addIP(b, n.IP()) {
// Can't add: IP limit reached.
return
}
// Add to end of bucket:
b.entries = append(b.entries, n)
b.replacements = deleteNode(b.replacements, n)
n.addedAt = time.Now()
if tab.nodeAddedHook != nil {
tab.nodeAddedHook(n)
}
}
// addThroughPing adds the given node to the table. Compared to plain
// 'add' there is an additional safety measure: if the table is still
// initializing the node is not added. This prevents an attack where the
// table could be filled by just sending ping repeatedly.
// addVerifiedNode adds a node whose existence has been verified recently to the front of a
// bucket. If the node is already in the bucket, it is moved to the front. If the bucket
// has no space, the node is added to the replacements list.
//
// There is an additional safety measure: if the table is still initializing the node
// is not added. This prevents an attack where the table could be filled by just sending
// ping repeatedly.
//
// The caller must not hold tab.mutex.
func (tab *Table) addThroughPing(n *node) {
func (tab *Table) addVerifiedNode(n *node) {
if !tab.isInitDone() {
return
}
tab.add(n)
}
if n.ID() == tab.self().ID() {
return
}
// stuff adds nodes the table to the end of their corresponding bucket
// if the bucket is not full. The caller must not hold tab.mutex.
func (tab *Table) stuff(nodes []*node) {
tab.mutex.Lock()
defer tab.mutex.Unlock()
for _, n := range nodes {
if n.ID() == tab.self().ID() {
continue // don't add self
}
b := tab.bucket(n.ID())
if len(b.entries) < bucketSize {
tab.bumpOrAdd(b, n)
}
b := tab.bucket(n.ID())
if tab.bumpInBucket(b, n) {
// Already in bucket, moved to front.
return
}
if len(b.entries) >= bucketSize {
// Bucket full, maybe add as replacement.
tab.addReplacement(b, n)
return
}
if !tab.addIP(b, n.IP()) {
// Can't add: IP limit reached.
return
}
// Add to front of bucket.
b.entries, _ = pushNode(b.entries, n, bucketSize)
b.replacements = deleteNode(b.replacements, n)
n.addedAt = time.Now()
if tab.nodeAddedHook != nil {
tab.nodeAddedHook(n)
}
}
@ -657,12 +696,21 @@ func (tab *Table) replace(b *bucket, last *node) *node {
return r
}
// bump moves the given node to the front of the bucket entry list
// bumpInBucket moves the given node to the front of the bucket entry list
// if it is contained in that list.
func (b *bucket) bump(n *node) bool {
func (tab *Table) bumpInBucket(b *bucket, n *node) bool {
for i := range b.entries {
if b.entries[i].ID() == n.ID() {
// move it to the front
if !n.IP().Equal(b.entries[i].IP()) {
// Endpoint has changed, ensure that the new IP fits into table limits.
tab.removeIP(b, b.entries[i].IP())
if !tab.addIP(b, n.IP()) {
// It doesn't, put the previous one back.
tab.addIP(b, b.entries[i].IP())
return false
}
}
// Move it to the front.
copy(b.entries[1:], b.entries[:i])
b.entries[0] = n
return true
@ -671,29 +719,20 @@ func (b *bucket) bump(n *node) bool {
return false
}
// bumpOrAdd moves n to the front of the bucket entry list or adds it if the list isn't
// full. The return value is true if n is in the bucket.
func (tab *Table) bumpOrAdd(b *bucket, n *node) bool {
if b.bump(n) {
return true
}
if len(b.entries) >= bucketSize || !tab.addIP(b, n.IP()) {
return false
}
b.entries, _ = pushNode(b.entries, n, bucketSize)
b.replacements = deleteNode(b.replacements, n)
n.addedAt = time.Now()
if tab.nodeAddedHook != nil {
tab.nodeAddedHook(n)
}
return true
}
func (tab *Table) deleteInBucket(b *bucket, n *node) {
b.entries = deleteNode(b.entries, n)
tab.removeIP(b, n.IP())
}
func contains(ns []*node, id enode.ID) bool {
for _, n := range ns {
if n.ID() == id {
return true
}
}
return false
}
// pushNode adds n to the front of list, keeping at most max items.
func pushNode(list []*node, n *node, max int) ([]*node, *node) {
if len(list) < max {

View File

@ -67,6 +67,8 @@ const (
// RPC request structures
type (
ping struct {
senderKey *ecdsa.PublicKey // filled in by preverify
Version uint
From, To rpcEndpoint
Expiration uint64
@ -155,8 +157,13 @@ func nodeToRPC(n *node) rpcNode {
return rpcNode{ID: ekey, IP: n.IP(), UDP: uint16(n.UDP()), TCP: uint16(n.TCP())}
}
// packet is implemented by all protocol messages.
type packet interface {
handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []byte) error
// preverify checks whether the packet is valid and should be handled at all.
preverify(t *udp, from *net.UDPAddr, fromID enode.ID, fromKey encPubkey) error
// handle handles the packet.
handle(t *udp, from *net.UDPAddr, fromID enode.ID, mac []byte)
// name returns the name of the packet for logging purposes.
name() string
}
@ -177,43 +184,48 @@ type udp struct {
tab *Table
wg sync.WaitGroup
addpending chan *pending
gotreply chan reply
closing chan struct{}
addReplyMatcher chan *replyMatcher
gotreply chan reply
closing chan struct{}
}
// pending represents a pending reply.
//
// some implementations of the protocol wish to send more than one
// reply packet to findnode. in general, any neighbors packet cannot
// Some implementations of the protocol wish to send more than one
// reply packet to findnode. In general, any neighbors packet cannot
// be matched up with a specific findnode packet.
//
// our implementation handles this by storing a callback function for
// each pending reply. incoming packets from a node are dispatched
// to all the callback functions for that node.
type pending struct {
// Our implementation handles this by storing a callback function for
// each pending reply. Incoming packets from a node are dispatched
// to all callback functions for that node.
type replyMatcher struct {
// these fields must match in the reply.
from enode.ID
ip net.IP
ptype byte
// time when the request must complete
deadline time.Time
// callback is called when a matching reply arrives. if it returns
// true, the callback is removed from the pending reply queue.
// if it returns false, the reply is considered incomplete and
// the callback will be invoked again for the next matching reply.
callback func(resp interface{}) (done bool)
// callback is called when a matching reply arrives. If it returns matched == true, the
// reply was acceptable. The second return value indicates whether the callback should
// be removed from the pending reply queue. If it returns false, the reply is considered
// incomplete and the callback will be invoked again for the next matching reply.
callback replyMatchFunc
// errc receives nil when the callback indicates completion or an
// error if no further reply is received within the timeout.
errc chan<- error
}
type replyMatchFunc func(interface{}) (matched bool, requestDone bool)
type reply struct {
from enode.ID
ip net.IP
ptype byte
data interface{}
data packet
// loop indicates whether there was
// a matching request by sending on this channel.
matched chan<- bool
@ -247,14 +259,14 @@ func ListenUDP(c conn, ln *enode.LocalNode, cfg Config) (*Table, error) {
func newUDP(c conn, ln *enode.LocalNode, cfg Config) (*Table, *udp, error) {
udp := &udp{
conn: c,
priv: cfg.PrivateKey,
netrestrict: cfg.NetRestrict,
localNode: ln,
db: ln.Database(),
closing: make(chan struct{}),
gotreply: make(chan reply),
addpending: make(chan *pending),
conn: c,
priv: cfg.PrivateKey,
netrestrict: cfg.NetRestrict,
localNode: ln,
db: ln.Database(),
closing: make(chan struct{}),
gotreply: make(chan reply),
addReplyMatcher: make(chan *replyMatcher),
}
tab, err := newTable(udp, ln.Database(), cfg.Bootnodes)
if err != nil {
@ -304,35 +316,37 @@ func (t *udp) sendPing(toid enode.ID, toaddr *net.UDPAddr, callback func()) <-ch
errc <- err
return errc
}
errc := t.pending(toid, pongPacket, func(p interface{}) bool {
ok := bytes.Equal(p.(*pong).ReplyTok, hash)
if ok && callback != nil {
// Add a matcher for the reply to the pending reply queue. Pongs are matched if they
// reference the ping we're about to send.
errc := t.pending(toid, toaddr.IP, pongPacket, func(p interface{}) (matched bool, requestDone bool) {
matched = bytes.Equal(p.(*pong).ReplyTok, hash)
if matched && callback != nil {
callback()
}
return ok
return matched, matched
})
// Send the packet.
t.localNode.UDPContact(toaddr)
t.write(toaddr, req.name(), packet)
t.write(toaddr, toid, req.name(), packet)
return errc
}
func (t *udp) waitping(from enode.ID) error {
return <-t.pending(from, pingPacket, func(interface{}) bool { return true })
}
// findnode sends a findnode request to the given node and waits until
// the node has sent up to k neighbors.
func (t *udp) findnode(toid enode.ID, toaddr *net.UDPAddr, target encPubkey) ([]*node, error) {
// If we haven't seen a ping from the destination node for a while, it won't remember
// our endpoint proof and reject findnode. Solicit a ping first.
if time.Since(t.db.LastPingReceived(toid)) > bondExpiration {
if time.Since(t.db.LastPingReceived(toid, toaddr.IP)) > bondExpiration {
t.ping(toid, toaddr)
t.waitping(toid)
// Wait for them to ping back and process our pong.
time.Sleep(respTimeout)
}
// Add a matcher for 'neighbours' replies to the pending reply queue. The matcher is
// active until enough nodes have been received.
nodes := make([]*node, 0, bucketSize)
nreceived := 0
errc := t.pending(toid, neighborsPacket, func(r interface{}) bool {
errc := t.pending(toid, toaddr.IP, neighborsPacket, func(r interface{}) (matched bool, requestDone bool) {
reply := r.(*neighbors)
for _, rn := range reply.Nodes {
nreceived++
@ -343,22 +357,22 @@ func (t *udp) findnode(toid enode.ID, toaddr *net.UDPAddr, target encPubkey) ([]
}
nodes = append(nodes, n)
}
return nreceived >= bucketSize
return true, nreceived >= bucketSize
})
t.send(toaddr, findnodePacket, &findnode{
t.send(toaddr, toid, findnodePacket, &findnode{
Target: target,
Expiration: uint64(time.Now().Add(expiration).Unix()),
})
return nodes, <-errc
}
// pending adds a reply callback to the pending reply queue.
// see the documentation of type pending for a detailed explanation.
func (t *udp) pending(id enode.ID, ptype byte, callback func(interface{}) bool) <-chan error {
// pending adds a reply matcher to the pending reply queue.
// see the documentation of type replyMatcher for a detailed explanation.
func (t *udp) pending(id enode.ID, ip net.IP, ptype byte, callback replyMatchFunc) <-chan error {
ch := make(chan error, 1)
p := &pending{from: id, ptype: ptype, callback: callback, errc: ch}
p := &replyMatcher{from: id, ip: ip, ptype: ptype, callback: callback, errc: ch}
select {
case t.addpending <- p:
case t.addReplyMatcher <- p:
// loop will handle it
case <-t.closing:
ch <- errClosed
@ -366,10 +380,12 @@ func (t *udp) pending(id enode.ID, ptype byte, callback func(interface{}) bool)
return ch
}
func (t *udp) handleReply(from enode.ID, ptype byte, req packet) bool {
// handleReply dispatches a reply packet, invoking reply matchers. It returns
// whether any matcher considered the packet acceptable.
func (t *udp) handleReply(from enode.ID, fromIP net.IP, ptype byte, req packet) bool {
matched := make(chan bool, 1)
select {
case t.gotreply <- reply{from, ptype, req, matched}:
case t.gotreply <- reply{from, fromIP, ptype, req, matched}:
// loop will handle it
return <-matched
case <-t.closing:
@ -385,8 +401,8 @@ func (t *udp) loop() {
var (
plist = list.New()
timeout = time.NewTimer(0)
nextTimeout *pending // head of plist when timeout was last reset
contTimeouts = 0 // number of continuous timeouts to do NTP checks
nextTimeout *replyMatcher // head of plist when timeout was last reset
contTimeouts = 0 // number of continuous timeouts to do NTP checks
ntpWarnTime = time.Unix(0, 0)
)
<-timeout.C // ignore first timeout
@ -399,7 +415,7 @@ func (t *udp) loop() {
// Start the timer so it fires when the next pending reply has expired.
now := time.Now()
for el := plist.Front(); el != nil; el = el.Next() {
nextTimeout = el.Value.(*pending)
nextTimeout = el.Value.(*replyMatcher)
if dist := nextTimeout.deadline.Sub(now); dist < 2*respTimeout {
timeout.Reset(dist)
return
@ -420,25 +436,23 @@ func (t *udp) loop() {
select {
case <-t.closing:
for el := plist.Front(); el != nil; el = el.Next() {
el.Value.(*pending).errc <- errClosed
el.Value.(*replyMatcher).errc <- errClosed
}
return
case p := <-t.addpending:
case p := <-t.addReplyMatcher:
p.deadline = time.Now().Add(respTimeout)
plist.PushBack(p)
case r := <-t.gotreply:
var matched bool
var matched bool // whether any replyMatcher considered the reply acceptable.
for el := plist.Front(); el != nil; el = el.Next() {
p := el.Value.(*pending)
if p.from == r.from && p.ptype == r.ptype {
matched = true
// Remove the matcher if its callback indicates
// that all replies have been received. This is
// required for packet types that expect multiple
// reply packets.
if p.callback(r.data) {
p := el.Value.(*replyMatcher)
if p.from == r.from && p.ptype == r.ptype && p.ip.Equal(r.ip) {
ok, requestDone := p.callback(r.data)
matched = matched || ok
// Remove the matcher if callback indicates that all replies have been received.
if requestDone {
p.errc <- nil
plist.Remove(el)
}
@ -453,7 +467,7 @@ func (t *udp) loop() {
// Notify and remove callbacks whose deadline is in the past.
for el := plist.Front(); el != nil; el = el.Next() {
p := el.Value.(*pending)
p := el.Value.(*replyMatcher)
if now.After(p.deadline) || now.Equal(p.deadline) {
p.errc <- errTimeout
plist.Remove(el)
@ -504,17 +518,17 @@ func init() {
}
}
func (t *udp) send(toaddr *net.UDPAddr, ptype byte, req packet) ([]byte, error) {
func (t *udp) send(toaddr *net.UDPAddr, toid enode.ID, ptype byte, req packet) ([]byte, error) {
packet, hash, err := encodePacket(t.priv, ptype, req)
if err != nil {
return hash, err
}
return hash, t.write(toaddr, req.name(), packet)
return hash, t.write(toaddr, toid, req.name(), packet)
}
func (t *udp) write(toaddr *net.UDPAddr, what string, packet []byte) error {
func (t *udp) write(toaddr *net.UDPAddr, toid enode.ID, what string, packet []byte) error {
_, err := t.conn.WriteToUDP(packet, toaddr)
log.Trace(">> "+what, "addr", toaddr, "err", err)
log.Trace(">> "+what, "id", toid, "addr", toaddr, "err", err)
return err
}
@ -573,13 +587,19 @@ func (t *udp) readLoop(unhandled chan<- ReadPacket) {
}
func (t *udp) handlePacket(from *net.UDPAddr, buf []byte) error {
packet, fromID, hash, err := decodePacket(buf)
packet, fromKey, hash, err := decodePacket(buf)
if err != nil {
log.Debug("Bad discv4 packet", "addr", from, "err", err)
return err
}
err = packet.handle(t, from, fromID, hash)
log.Trace("<< "+packet.name(), "addr", from, "err", err)
fromID := fromKey.id()
if err == nil {
err = packet.preverify(t, from, fromID, fromKey)
}
log.Trace("<< "+packet.name(), "id", fromID, "addr", from, "err", err)
if err == nil {
packet.handle(t, from, fromID, hash)
}
return err
}
@ -615,54 +635,67 @@ func decodePacket(buf []byte) (packet, encPubkey, []byte, error) {
return req, fromKey, hash, err
}
func (req *ping) handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []byte) error {
// Packet Handlers
func (req *ping) preverify(t *udp, from *net.UDPAddr, fromID enode.ID, fromKey encPubkey) error {
if expired(req.Expiration) {
return errExpired
}
key, err := decodePubkey(fromKey)
if err != nil {
return fmt.Errorf("invalid public key: %v", err)
return errors.New("invalid public key")
}
t.send(from, pongPacket, &pong{
req.senderKey = key
return nil
}
func (req *ping) handle(t *udp, from *net.UDPAddr, fromID enode.ID, mac []byte) {
// Reply.
t.send(from, fromID, pongPacket, &pong{
To: makeEndpoint(from, req.From.TCP),
ReplyTok: mac,
Expiration: uint64(time.Now().Add(expiration).Unix()),
})
n := wrapNode(enode.NewV4(key, from.IP, int(req.From.TCP), from.Port))
t.handleReply(n.ID(), pingPacket, req)
if time.Since(t.db.LastPongReceived(n.ID())) > bondExpiration {
t.sendPing(n.ID(), from, func() { t.tab.addThroughPing(n) })
// Ping back if our last pong on file is too far in the past.
n := wrapNode(enode.NewV4(req.senderKey, from.IP, int(req.From.TCP), from.Port))
if time.Since(t.db.LastPongReceived(n.ID(), from.IP)) > bondExpiration {
t.sendPing(fromID, from, func() {
t.tab.addVerifiedNode(n)
})
} else {
t.tab.addThroughPing(n)
t.tab.addVerifiedNode(n)
}
// Update node database and endpoint predictor.
t.db.UpdateLastPingReceived(n.ID(), from.IP, time.Now())
t.localNode.UDPEndpointStatement(from, &net.UDPAddr{IP: req.To.IP, Port: int(req.To.UDP)})
t.db.UpdateLastPingReceived(n.ID(), time.Now())
return nil
}
func (req *ping) name() string { return "PING/v4" }
func (req *pong) handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []byte) error {
func (req *pong) preverify(t *udp, from *net.UDPAddr, fromID enode.ID, fromKey encPubkey) error {
if expired(req.Expiration) {
return errExpired
}
fromID := fromKey.id()
if !t.handleReply(fromID, pongPacket, req) {
if !t.handleReply(fromID, from.IP, pongPacket, req) {
return errUnsolicitedReply
}
t.localNode.UDPEndpointStatement(from, &net.UDPAddr{IP: req.To.IP, Port: int(req.To.UDP)})
t.db.UpdateLastPongReceived(fromID, time.Now())
return nil
}
func (req *pong) handle(t *udp, from *net.UDPAddr, fromID enode.ID, mac []byte) {
t.localNode.UDPEndpointStatement(from, &net.UDPAddr{IP: req.To.IP, Port: int(req.To.UDP)})
t.db.UpdateLastPongReceived(fromID, from.IP, time.Now())
}
func (req *pong) name() string { return "PONG/v4" }
func (req *findnode) handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []byte) error {
func (req *findnode) preverify(t *udp, from *net.UDPAddr, fromID enode.ID, fromKey encPubkey) error {
if expired(req.Expiration) {
return errExpired
}
fromID := fromKey.id()
if time.Since(t.db.LastPongReceived(fromID)) > bondExpiration {
if time.Since(t.db.LastPongReceived(fromID, from.IP)) > bondExpiration {
// No endpoint proof pong exists, we don't process the packet. This prevents an
// attack vector where the discovery protocol could be used to amplify traffic in a
// DDOS attack. A malicious actor would send a findnode request with the IP address
@ -671,43 +704,50 @@ func (req *findnode) handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []
// findnode) to the victim.
return errUnknownNode
}
return nil
}
func (req *findnode) handle(t *udp, from *net.UDPAddr, fromID enode.ID, mac []byte) {
// Determine closest nodes.
target := enode.ID(crypto.Keccak256Hash(req.Target[:]))
t.tab.mutex.Lock()
closest := t.tab.closest(target, bucketSize).entries
t.tab.mutex.Unlock()
p := neighbors{Expiration: uint64(time.Now().Add(expiration).Unix())}
var sent bool
// Send neighbors in chunks with at most maxNeighbors per packet
// to stay below the 1280 byte limit.
p := neighbors{Expiration: uint64(time.Now().Add(expiration).Unix())}
var sent bool
for _, n := range closest {
if netutil.CheckRelayIP(from.IP, n.IP()) == nil {
p.Nodes = append(p.Nodes, nodeToRPC(n))
}
if len(p.Nodes) == maxNeighbors {
t.send(from, neighborsPacket, &p)
t.send(from, fromID, neighborsPacket, &p)
p.Nodes = p.Nodes[:0]
sent = true
}
}
if len(p.Nodes) > 0 || !sent {
t.send(from, neighborsPacket, &p)
t.send(from, fromID, neighborsPacket, &p)
}
return nil
}
func (req *findnode) name() string { return "FINDNODE/v4" }
func (req *neighbors) handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []byte) error {
func (req *neighbors) preverify(t *udp, from *net.UDPAddr, fromID enode.ID, fromKey encPubkey) error {
if expired(req.Expiration) {
return errExpired
}
if !t.handleReply(fromKey.id(), neighborsPacket, req) {
if !t.handleReply(fromID, from.IP, neighborsPacket, req) {
return errUnsolicitedReply
}
return nil
}
func (req *neighbors) handle(t *udp, from *net.UDPAddr, fromID enode.ID, mac []byte) {
}
func (req *neighbors) name() string { return "NEIGHBORS/v4" }
func expired(ts uint64) bool {

View File

@ -27,10 +27,10 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/netutil"
"github.com/ethereum/go-ethereum/rlp"
"golang.org/x/crypto/sha3"
)
var (
@ -800,7 +800,7 @@ func (n *nodeNetGuts) startNextQuery(net *Network) {
func (q *findnodeQuery) start(net *Network) bool {
// Satisfy queries against the local node directly.
if q.remote == net.tab.self {
closest := net.tab.closest(crypto.Keccak256Hash(q.target[:]), bucketSize)
closest := net.tab.closest(q.target, bucketSize)
q.reply <- closest.entries
return true
}
@ -1234,7 +1234,7 @@ func (net *Network) checkTopicRegister(data *topicRegister) (*pong, error) {
}
func rlpHash(x interface{}) (h common.Hash) {
hw := sha3.NewKeccak256()
hw := sha3.NewLegacyKeccak256()
rlp.Encode(hw, x)
hw.Sum(h[:0])
return h

View File

@ -23,9 +23,9 @@ import (
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/rlp"
"golang.org/x/crypto/sha3"
)
// List of known secure identity schemes.
@ -48,7 +48,7 @@ func SignV4(r *enr.Record, privkey *ecdsa.PrivateKey) error {
cpy.Set(enr.ID("v4"))
cpy.Set(Secp256k1(privkey.PublicKey))
h := sha3.NewKeccak256()
h := sha3.NewLegacyKeccak256()
rlp.Encode(h, cpy.AppendElements(nil))
sig, err := crypto.Sign(h.Sum(nil), privkey)
if err != nil {
@ -69,7 +69,7 @@ func (V4ID) Verify(r *enr.Record, sig []byte) error {
return fmt.Errorf("invalid public key")
}
h := sha3.NewKeccak256()
h := sha3.NewLegacyKeccak256()
rlp.Encode(h, r.AppendElements(nil))
if !crypto.VerifySignature(entry, h.Sum(nil), sig) {
return enr.ErrInvalidSig

View File

@ -21,11 +21,11 @@ import (
"crypto/rand"
"encoding/binary"
"fmt"
"net"
"os"
"sync"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors"
@ -37,24 +37,31 @@ import (
// Keys in the node database.
const (
dbVersionKey = "version" // Version of the database to flush if changes
dbItemPrefix = "n:" // Identifier to prefix node entries with
dbVersionKey = "version" // Version of the database to flush if changes
dbNodePrefix = "n:" // Identifier to prefix node entries with
dbLocalPrefix = "local:"
dbDiscoverRoot = "v4"
dbDiscoverRoot = ":discover"
dbDiscoverSeq = dbDiscoverRoot + ":seq"
dbDiscoverPing = dbDiscoverRoot + ":lastping"
dbDiscoverPong = dbDiscoverRoot + ":lastpong"
dbDiscoverFindFails = dbDiscoverRoot + ":findfail"
dbLocalRoot = ":local"
dbLocalSeq = dbLocalRoot + ":seq"
// These fields are stored per ID and IP, the full key is "n:<ID>:v4:<IP>:findfail".
// Use nodeItemKey to create those keys.
dbNodeFindFails = "findfail"
dbNodePing = "lastping"
dbNodePong = "lastpong"
dbNodeSeq = "seq"
// Local information is keyed by ID only, the full key is "local:<ID>:seq".
// Use localItemKey to create those keys.
dbLocalSeq = "seq"
)
var (
const (
dbNodeExpiration = 24 * time.Hour // Time after which an unseen node should be dropped.
dbCleanupCycle = time.Hour // Time period for running the expiration task.
dbVersion = 7
dbVersion = 8
)
var zeroIP = make(net.IP, 16)
// DB is the node database, storing previously seen nodes and any collected metadata about
// them for QoS purposes.
type DB struct {
@ -119,27 +126,58 @@ func newPersistentDB(path string) (*DB, error) {
return &DB{lvl: db, quit: make(chan struct{})}, nil
}
// makeKey generates the leveldb key-blob from a node id and its particular
// field of interest.
func makeKey(id ID, field string) []byte {
if (id == ID{}) {
return []byte(field)
}
return append([]byte(dbItemPrefix), append(id[:], field...)...)
// nodeKey returns the database key for a node record.
func nodeKey(id ID) []byte {
key := append([]byte(dbNodePrefix), id[:]...)
key = append(key, ':')
key = append(key, dbDiscoverRoot...)
return key
}
// splitKey tries to split a database key into a node id and a field part.
func splitKey(key []byte) (id ID, field string) {
// If the key is not of a node, return it plainly
if !bytes.HasPrefix(key, []byte(dbItemPrefix)) {
return ID{}, string(key)
// splitNodeKey returns the node ID of a key created by nodeKey.
func splitNodeKey(key []byte) (id ID, rest []byte) {
if !bytes.HasPrefix(key, []byte(dbNodePrefix)) {
return ID{}, nil
}
// Otherwise split the id and field
item := key[len(dbItemPrefix):]
item := key[len(dbNodePrefix):]
copy(id[:], item[:len(id)])
field = string(item[len(id):])
return id, item[len(id)+1:]
}
return id, field
// nodeItemKey returns the database key for a node metadata field.
func nodeItemKey(id ID, ip net.IP, field string) []byte {
ip16 := ip.To16()
if ip16 == nil {
panic(fmt.Errorf("invalid IP (length %d)", len(ip)))
}
return bytes.Join([][]byte{nodeKey(id), ip16, []byte(field)}, []byte{':'})
}
// splitNodeItemKey returns the components of a key created by nodeItemKey.
func splitNodeItemKey(key []byte) (id ID, ip net.IP, field string) {
id, key = splitNodeKey(key)
// Skip discover root.
if string(key) == dbDiscoverRoot {
return id, nil, ""
}
key = key[len(dbDiscoverRoot)+1:]
// Split out the IP.
ip = net.IP(key[:16])
if ip4 := ip.To4(); ip4 != nil {
ip = ip4
}
key = key[16+1:]
// Field is the remainder of key.
field = string(key)
return id, ip, field
}
// localItemKey returns the key of a local node item.
func localItemKey(id ID, field string) []byte {
key := append([]byte(dbLocalPrefix), id[:]...)
key = append(key, ':')
key = append(key, field...)
return key
}
// fetchInt64 retrieves an integer associated with a particular key.
@ -181,7 +219,7 @@ func (db *DB) storeUint64(key []byte, n uint64) error {
// Node retrieves a node with a given id from the database.
func (db *DB) Node(id ID) *Node {
blob, err := db.lvl.Get(makeKey(id, dbDiscoverRoot), nil)
blob, err := db.lvl.Get(nodeKey(id), nil)
if err != nil {
return nil
}
@ -207,15 +245,15 @@ func (db *DB) UpdateNode(node *Node) error {
if err != nil {
return err
}
if err := db.lvl.Put(makeKey(node.ID(), dbDiscoverRoot), blob, nil); err != nil {
if err := db.lvl.Put(nodeKey(node.ID()), blob, nil); err != nil {
return err
}
return db.storeUint64(makeKey(node.ID(), dbDiscoverSeq), node.Seq())
return db.storeUint64(nodeItemKey(node.ID(), zeroIP, dbNodeSeq), node.Seq())
}
// NodeSeq returns the stored record sequence number of the given node.
func (db *DB) NodeSeq(id ID) uint64 {
return db.fetchUint64(makeKey(id, dbDiscoverSeq))
return db.fetchUint64(nodeItemKey(id, zeroIP, dbNodeSeq))
}
// Resolve returns the stored record of the node if it has a larger sequence
@ -227,15 +265,17 @@ func (db *DB) Resolve(n *Node) *Node {
return db.Node(n.ID())
}
// DeleteNode deletes all information/keys associated with a node.
func (db *DB) DeleteNode(id ID) error {
deleter := db.lvl.NewIterator(util.BytesPrefix(makeKey(id, "")), nil)
for deleter.Next() {
if err := db.lvl.Delete(deleter.Key(), nil); err != nil {
return err
}
// DeleteNode deletes all information associated with a node.
func (db *DB) DeleteNode(id ID) {
deleteRange(db.lvl, nodeKey(id))
}
func deleteRange(db *leveldb.DB, prefix []byte) {
it := db.NewIterator(util.BytesPrefix(prefix), nil)
defer it.Release()
for it.Next() {
db.Delete(it.Key(), nil)
}
return nil
}
// ensureExpirer is a small helper method ensuring that the data expiration
@ -259,9 +299,7 @@ func (db *DB) expirer() {
for {
select {
case <-tick.C:
if err := db.expireNodes(); err != nil {
log.Error("Failed to expire nodedb items", "err", err)
}
db.expireNodes()
case <-db.quit:
return
}
@ -269,71 +307,85 @@ func (db *DB) expirer() {
}
// expireNodes iterates over the database and deletes all nodes that have not
// been seen (i.e. received a pong from) for some allotted time.
func (db *DB) expireNodes() error {
threshold := time.Now().Add(-dbNodeExpiration)
// Find discovered nodes that are older than the allowance
it := db.lvl.NewIterator(nil, nil)
// been seen (i.e. received a pong from) for some time.
func (db *DB) expireNodes() {
it := db.lvl.NewIterator(util.BytesPrefix([]byte(dbNodePrefix)), nil)
defer it.Release()
for it.Next() {
// Skip the item if not a discovery node
id, field := splitKey(it.Key())
if field != dbDiscoverRoot {
continue
}
// Skip the node if not expired yet (and not self)
if seen := db.LastPongReceived(id); seen.After(threshold) {
continue
}
// Otherwise delete all associated information
db.DeleteNode(id)
if !it.Next() {
return
}
var (
threshold = time.Now().Add(-dbNodeExpiration).Unix()
youngestPong int64
atEnd = false
)
for !atEnd {
id, ip, field := splitNodeItemKey(it.Key())
if field == dbNodePong {
time, _ := binary.Varint(it.Value())
if time > youngestPong {
youngestPong = time
}
if time < threshold {
// Last pong from this IP older than threshold, remove fields belonging to it.
deleteRange(db.lvl, nodeItemKey(id, ip, ""))
}
}
atEnd = !it.Next()
nextID, _ := splitNodeKey(it.Key())
if atEnd || nextID != id {
// We've moved beyond the last entry of the current ID.
// Remove everything if there was no recent enough pong.
if youngestPong > 0 && youngestPong < threshold {
deleteRange(db.lvl, nodeKey(id))
}
youngestPong = 0
}
}
return nil
}
// LastPingReceived retrieves the time of the last ping packet received from
// a remote node.
func (db *DB) LastPingReceived(id ID) time.Time {
return time.Unix(db.fetchInt64(makeKey(id, dbDiscoverPing)), 0)
func (db *DB) LastPingReceived(id ID, ip net.IP) time.Time {
return time.Unix(db.fetchInt64(nodeItemKey(id, ip, dbNodePing)), 0)
}
// UpdateLastPingReceived updates the last time we tried contacting a remote node.
func (db *DB) UpdateLastPingReceived(id ID, instance time.Time) error {
return db.storeInt64(makeKey(id, dbDiscoverPing), instance.Unix())
func (db *DB) UpdateLastPingReceived(id ID, ip net.IP, instance time.Time) error {
return db.storeInt64(nodeItemKey(id, ip, dbNodePing), instance.Unix())
}
// LastPongReceived retrieves the time of the last successful pong from remote node.
func (db *DB) LastPongReceived(id ID) time.Time {
func (db *DB) LastPongReceived(id ID, ip net.IP) time.Time {
// Launch expirer
db.ensureExpirer()
return time.Unix(db.fetchInt64(makeKey(id, dbDiscoverPong)), 0)
return time.Unix(db.fetchInt64(nodeItemKey(id, ip, dbNodePong)), 0)
}
// UpdateLastPongReceived updates the last pong time of a node.
func (db *DB) UpdateLastPongReceived(id ID, instance time.Time) error {
return db.storeInt64(makeKey(id, dbDiscoverPong), instance.Unix())
func (db *DB) UpdateLastPongReceived(id ID, ip net.IP, instance time.Time) error {
return db.storeInt64(nodeItemKey(id, ip, dbNodePong), instance.Unix())
}
// FindFails retrieves the number of findnode failures since bonding.
func (db *DB) FindFails(id ID) int {
return int(db.fetchInt64(makeKey(id, dbDiscoverFindFails)))
func (db *DB) FindFails(id ID, ip net.IP) int {
return int(db.fetchInt64(nodeItemKey(id, ip, dbNodeFindFails)))
}
// UpdateFindFails updates the number of findnode failures since bonding.
func (db *DB) UpdateFindFails(id ID, fails int) error {
return db.storeInt64(makeKey(id, dbDiscoverFindFails), int64(fails))
func (db *DB) UpdateFindFails(id ID, ip net.IP, fails int) error {
return db.storeInt64(nodeItemKey(id, ip, dbNodeFindFails), int64(fails))
}
// LocalSeq retrieves the local record sequence counter.
func (db *DB) localSeq(id ID) uint64 {
return db.fetchUint64(makeKey(id, dbLocalSeq))
return db.fetchUint64(nodeItemKey(id, zeroIP, dbLocalSeq))
}
// storeLocalSeq stores the local record sequence counter.
func (db *DB) storeLocalSeq(id ID, n uint64) {
db.storeUint64(makeKey(id, dbLocalSeq), n)
db.storeUint64(nodeItemKey(id, zeroIP, dbLocalSeq), n)
}
// QuerySeeds retrieves random nodes to be used as potential seed nodes
@ -355,14 +407,14 @@ seek:
ctr := id[0]
rand.Read(id[:])
id[0] = ctr + id[0]%16
it.Seek(makeKey(id, dbDiscoverRoot))
it.Seek(nodeKey(id))
n := nextNode(it)
if n == nil {
id[0] = 0
continue seek // iterator exhausted
}
if now.Sub(db.LastPongReceived(n.ID())) > maxAge {
if now.Sub(db.LastPongReceived(n.ID(), n.IP())) > maxAge {
continue seek
}
for i := range nodes {
@ -379,8 +431,8 @@ seek:
// database entries.
func nextNode(it iterator.Iterator) *Node {
for end := false; !end; end = !it.Next() {
id, field := splitKey(it.Key())
if field != dbDiscoverRoot {
id, rest := splitNodeKey(it.Key())
if string(rest) != dbDiscoverRoot {
continue
}
return mustDecodeNode(id[:], it.Value())

View File

@ -23,7 +23,6 @@ import (
"net"
"sort"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
@ -45,10 +44,7 @@ const (
snappyProtocolVersion = 5
pingInterval = 1 * time.Second
// watchdogInterval intentionally lower than ping interval.
// this way we reduce potential flaky window size.
watchdogInterval = 200 * time.Millisecond
pingInterval = 15 * time.Second
)
const (
@ -110,7 +106,6 @@ type Peer struct {
log log.Logger
created mclock.AbsTime
flaky int32
wg sync.WaitGroup
protoErr chan error
closed chan struct{}
@ -130,11 +125,6 @@ func NewPeer(id enode.ID, name string, caps []Cap) *Peer {
return peer
}
// IsFlaky returns true if there was no incoming traffic recently.
func (p *Peer) IsFlaky() bool {
return atomic.LoadInt32(&p.flaky) == 1
}
// ID returns the node's public key.
func (p *Peer) ID() enode.ID {
return p.rw.node.ID()
@ -211,10 +201,8 @@ func (p *Peer) run() (remoteRequested bool, err error) {
readErr = make(chan error, 1)
reason DiscReason // sent to the peer
)
p.wg.Add(3)
reads := make(chan struct{}, 10) // channel for reads
go p.readLoop(readErr, reads)
go p.watchdogLoop(reads)
p.wg.Add(2)
go p.readLoop(readErr)
go p.pingLoop()
// Start all protocol handlers.
@ -252,10 +240,37 @@ loop:
close(p.closed)
p.rw.close(reason)
p.wg.Wait()
if shutdownErr := p.shutdownWithTimeout(5 * time.Second); shutdownErr != nil {
p.Log().Error("Timeout while waiting for the peer to shut down", "err", err)
}
return remoteRequested, err
}
func (p *Peer) shutdownWithTimeout(timeout time.Duration) error {
/* A watchdog for the waitGroup (p.wg)
* When a peer shuts down we give 5 seconds for all protocols to finish,
* but we continue the process anyway (and print a warning message).
* Otherwise, this peer will be stuck in the list of peers (`admin_peers`)
* and won't be able to reconnect until the app is restarted.
* */
c := make(chan struct{})
go func() {
p.wg.Wait()
close(c)
}()
select {
case <-c:
p.Log().Debug("Peer stopped successfully")
return nil
case <-time.After(timeout):
return errors.New("WATCHDOG_ENGAGED. A few goroutines leaked.")
}
}
func (p *Peer) pingLoop() {
ping := time.NewTimer(pingInterval)
defer p.wg.Done()
@ -274,24 +289,7 @@ func (p *Peer) pingLoop() {
}
}
func (p *Peer) watchdogLoop(reads <-chan struct{}) {
defer p.wg.Done()
hb := time.NewTimer(watchdogInterval)
defer hb.Stop()
for {
select {
case <-reads:
atomic.StoreInt32(&p.flaky, 0)
case <-hb.C:
atomic.StoreInt32(&p.flaky, 1)
case <-p.closed:
return
}
hb.Reset(watchdogInterval)
}
}
func (p *Peer) readLoop(errc chan<- error, reads chan<- struct{}) {
func (p *Peer) readLoop(errc chan<- error) {
defer p.wg.Done()
for {
msg, err := p.rw.ReadMsg()
@ -304,7 +302,6 @@ func (p *Peer) readLoop(errc chan<- error, reads chan<- struct{}) {
errc <- err
return
}
reads <- struct{}{}
}
}

View File

@ -39,9 +39,9 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/ecies"
"github.com/ethereum/go-ethereum/crypto/secp256k1"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/rlp"
"github.com/golang/snappy"
"golang.org/x/crypto/sha3"
)
const (
@ -253,10 +253,10 @@ func (h *encHandshake) secrets(auth, authResp []byte) (secrets, error) {
}
// setup sha3 instances for the MACs
mac1 := sha3.NewKeccak256()
mac1 := sha3.NewLegacyKeccak256()
mac1.Write(xor(s.MAC, h.respNonce))
mac1.Write(auth)
mac2 := sha3.NewKeccak256()
mac2 := sha3.NewLegacyKeccak256()
mac2.Write(xor(s.MAC, h.initNonce))
mac2.Write(authResp)
if h.initiator {

View File

@ -52,7 +52,7 @@ const (
// Maximum time allowed for reading a complete message.
// This is effectively the amount of time a connection can be idle.
frameReadTimeout = 10 * time.Second
frameReadTimeout = 30 * time.Second
// Maximum amount of time allowed for writing a complete message.
frameWriteTimeout = 20 * time.Second
@ -327,6 +327,36 @@ func (srv *Server) RemovePeer(node *enode.Node) {
}
}
// DeletePeer deletes the given node forcefully.
func (srv *Server) DeletePeer(node *enode.Node) error {
peerIDStr := node.ID().String()
srv.log.Info("DeletePeer called", "peerID", peerIDStr)
var peer *Peer
for _, p := range srv.Peers() {
if p.ID() == node.ID() {
peer = p
break
}
}
if peer == nil {
err := errors.New("peer not found")
srv.log.Info("DeletePeer failed to match a peer", "peerID", peerIDStr, "err", err)
return err
}
select {
case srv.delpeer <- peerDrop{peer, errors.New("forced delete"), true}:
case <-srv.quit:
}
srv.log.Info("DeletePeer passed the request to delpeer channel", "peerID", peerIDStr)
return nil
}
// AddTrustedPeer adds the given node to a reserved whitelist which allows the
// node to always connect, even if the slot are full.
func (srv *Server) AddTrustedPeer(node *enode.Node) {

View File

@ -47,6 +47,20 @@ var RinkebyBootnodes = []string{
"enode://b6b28890b006743680c52e64e0d16db57f28124885595fa03a562be1d2bf0f3a1da297d56b13da25fb992888fd556d4c1a27b1f39d531bde7de1921c90061cc6@159.89.28.211:30303", // AKASHA
}
// GoerliBootnodes are the enode URLs of the P2P bootstrap nodes running on the
// Görli test network.
var GoerliBootnodes = []string{
// Upstrem bootnodes
"enode://011f758e6552d105183b1761c5e2dea0111bc20fd5f6422bc7f91e0fabbec9a6595caf6239b37feb773dddd3f87240d99d859431891e4a642cf2a0a9e6cbb98a@51.141.78.53:30303",
"enode://176b9417f511d05b6b2cf3e34b756cf0a7096b3094572a8f6ef4cdcb9d1f9d00683bf0f83347eebdf3b81c3521c2332086d9592802230bf528eaf606a1d9677b@13.93.54.137:30303",
"enode://46add44b9f13965f7b9875ac6b85f016f341012d84f975377573800a863526f4da19ae2c620ec73d11591fa9510e992ecc03ad0751f53cc02f7c7ed6d55c7291@94.237.54.114:30313",
"enode://c1f8b7c2ac4453271fa07d8e9ecf9a2e8285aa0bd0c07df0131f47153306b0736fd3db8924e7a9bf0bed6b1d8d4f87362a71b033dc7c64547728d953e43e59b2@52.64.155.147:30303",
"enode://f4a9c6ee28586009fb5a96c8af13a58ed6d8315a9eee4772212c1d4d9cebe5a8b8a78ea4434f318726317d04a3f531a1ef0420cf9752605a562cfe858c46e263@213.186.16.82:30303",
// Ethereum Foundation bootnode
"enode://573b6607cd59f241e30e4c4943fd50e99e2b6f42f9bd5ca111659d309c06741247f4f1e93843ad3e8c8c18b6e2d94c161b7ef67479b3938780a97134b618b5ce@52.56.136.200:30303",
}
// DiscoveryV5Bootnodes are the enode URLs of the P2P bootstrap nodes for the
// experimental RLPx v5 topic-discovery network.
var DiscoveryV5Bootnodes = []string{

View File

@ -42,17 +42,18 @@ var (
EIP155Block: big.NewInt(2675000),
EIP158Block: big.NewInt(2675000),
ByzantiumBlock: big.NewInt(4370000),
ConstantinopleBlock: big.NewInt(7080000),
ConstantinopleBlock: big.NewInt(7280000),
PetersburgBlock: big.NewInt(7280000),
Ethash: new(EthashConfig),
}
// MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network.
MainnetTrustedCheckpoint = &TrustedCheckpoint{
Name: "mainnet",
SectionIndex: 208,
SectionHead: common.HexToHash("0x5e9f7696c397d9df8f3b1abda857753575c6f5cff894e1a3d9e1a2af1bd9d6ac"),
CHTRoot: common.HexToHash("0x954a63134f6897f015f026387c59c98c4dae7b336610ff5a143455aac9153e9d"),
BloomRoot: common.HexToHash("0x8006c5e44b14d90d7cc9cd5fa1cb48cf53697ee3bbbf4b76fdfa70b0242500a9"),
SectionIndex: 216,
SectionHead: common.HexToHash("0xae3e551c8d60d06fd411a8e6008e90625d3bb0cbbf664b65d5ed90b318553541"),
CHTRoot: common.HexToHash("0xeea7d2ab3545a37deecc66fc43c9556ae337c3ea1c6893e401428207bdb8e434"),
BloomRoot: common.HexToHash("0xb0d4176d160d67b99a9f963281e52bce0583a566b74b4497fe3ed24ae04004ff"),
}
// TestnetChainConfig contains the chain parameters to run a node on the Ropsten test network.
@ -67,16 +68,17 @@ var (
EIP158Block: big.NewInt(10),
ByzantiumBlock: big.NewInt(1700000),
ConstantinopleBlock: big.NewInt(4230000),
PetersburgBlock: big.NewInt(4939394),
Ethash: new(EthashConfig),
}
// TestnetTrustedCheckpoint contains the light client trusted checkpoint for the Ropsten test network.
TestnetTrustedCheckpoint = &TrustedCheckpoint{
Name: "testnet",
SectionIndex: 139,
SectionHead: common.HexToHash("0x9fad89a5e3b993c8339b9cf2cbbeb72cd08774ea6b71b105b3dd880420c618f4"),
CHTRoot: common.HexToHash("0xc815833881989c5d2035147e1a79a33d22cbc5313e104ff01e6ab405bd28b317"),
BloomRoot: common.HexToHash("0xd94ee9f3c480858f53ec5d059aebdbb2e8d904702f100875ee59ec5f366e841d"),
SectionIndex: 148,
SectionHead: common.HexToHash("0x4d3181bedb6aa96a6f3efa866c71f7802400d0fb4a6906946c453630d850efc0"),
CHTRoot: common.HexToHash("0x25df2f9d63a5f84b2852988f0f0f7af5a7877da061c11b85c812780b5a27a5ec"),
BloomRoot: common.HexToHash("0x0584834e5222471a06c669d210e302ca602780eaaddd04634fd65471c2a91419"),
}
// RinkebyChainConfig contains the chain parameters to run a node on the Rinkeby test network.
@ -91,6 +93,7 @@ var (
EIP158Block: big.NewInt(3),
ByzantiumBlock: big.NewInt(1035301),
ConstantinopleBlock: big.NewInt(3660663),
PetersburgBlock: big.NewInt(9999999), //TODO! Insert Rinkeby block number
Clique: &CliqueConfig{
Period: 15,
Epoch: 30000,
@ -100,10 +103,37 @@ var (
// RinkebyTrustedCheckpoint contains the light client trusted checkpoint for the Rinkeby test network.
RinkebyTrustedCheckpoint = &TrustedCheckpoint{
Name: "rinkeby",
SectionIndex: 105,
SectionHead: common.HexToHash("0xec8147d43f936258aaf1b9b9ec91b0a853abf7109f436a23649be809ea43d507"),
CHTRoot: common.HexToHash("0xd92703b444846a3db928e87e450770e5d5cbe193131dc8f7c4cf18b4de925a75"),
BloomRoot: common.HexToHash("0xff45a6f807138a2cde0cea0c209d9ce5ad8e43ccaae5a7c41af801bb72a1ef96"),
SectionIndex: 113,
SectionHead: common.HexToHash("0xb812f3095af3af1cb2de7d7c2086ee807736a7315992c461b0986699185daf77"),
CHTRoot: common.HexToHash("0x5416d0924925eb835987ad3d1f059ecc66778c51959c8246a7a35b22ec5f3109"),
BloomRoot: common.HexToHash("0xcf74ca2c14e843b366561dab4fc64237bf6bb335119cbc97d723f3b501863470"),
}
// GoerliChainConfig contains the chain parameters to run a node on the Görli test network.
GoerliChainConfig = &ChainConfig{
ChainID: big.NewInt(5),
HomesteadBlock: big.NewInt(0),
DAOForkBlock: nil,
DAOForkSupport: true,
EIP150Block: big.NewInt(0),
EIP155Block: big.NewInt(0),
EIP158Block: big.NewInt(0),
ByzantiumBlock: big.NewInt(0),
ConstantinopleBlock: big.NewInt(0),
PetersburgBlock: big.NewInt(0),
Clique: &CliqueConfig{
Period: 15,
Epoch: 30000,
},
}
// GoerliTrustedCheckpoint contains the light client trusted checkpoint for the Görli test network.
GoerliTrustedCheckpoint = &TrustedCheckpoint{
Name: "goerli",
SectionIndex: 0,
SectionHead: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
CHTRoot: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
BloomRoot: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
}
// AllEthashProtocolChanges contains every protocol change (EIPs) introduced
@ -111,16 +141,16 @@ var (
//
// This configuration is intentionally not using keyed fields to force anyone
// adding flags to the config to also have to set these fields.
AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
// AllCliqueProtocolChanges contains every protocol change (EIPs) introduced
// and accepted by the Ethereum core developers into the Clique consensus.
//
// This configuration is intentionally not using keyed fields to force anyone
// adding flags to the config to also have to set these fields.
AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}}
AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}}
TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
TestRules = TestChainConfig.Rules(new(big.Int))
)
@ -158,6 +188,7 @@ type ChainConfig struct {
ByzantiumBlock *big.Int `json:"byzantiumBlock,omitempty"` // Byzantium switch block (nil = no fork, 0 = already on byzantium)
ConstantinopleBlock *big.Int `json:"constantinopleBlock,omitempty"` // Constantinople switch block (nil = no fork, 0 = already activated)
PetersburgBlock *big.Int `json:"petersburgBlock,omitempty"` // Petersburg switch block (nil = same as Constantinople)
EWASMBlock *big.Int `json:"ewasmBlock,omitempty"` // EWASM switch block (nil = no fork, 0 = already activated)
// Various consensus engines
@ -195,7 +226,7 @@ func (c *ChainConfig) String() string {
default:
engine = "unknown"
}
return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Engine: %v}",
return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v ConstantinopleFix: %v Engine: %v}",
c.ChainID,
c.HomesteadBlock,
c.DAOForkBlock,
@ -205,6 +236,7 @@ func (c *ChainConfig) String() string {
c.EIP158Block,
c.ByzantiumBlock,
c.ConstantinopleBlock,
c.PetersburgBlock,
engine,
)
}
@ -244,6 +276,13 @@ func (c *ChainConfig) IsConstantinople(num *big.Int) bool {
return isForked(c.ConstantinopleBlock, num)
}
// IsPetersburg returns whether num is either
// - equal to or greater than the PetersburgBlock fork block,
// - OR is nil, and Constantinople is active
func (c *ChainConfig) IsPetersburg(num *big.Int) bool {
return isForked(c.PetersburgBlock, num) || c.PetersburgBlock == nil && isForked(c.ConstantinopleBlock, num)
}
// IsEWASM returns whether num represents a block number after the EWASM fork
func (c *ChainConfig) IsEWASM(num *big.Int) bool {
return isForked(c.EWASMBlock, num)
@ -314,6 +353,9 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head *big.Int) *Confi
if isForkIncompatible(c.ConstantinopleBlock, newcfg.ConstantinopleBlock, head) {
return newCompatError("Constantinople fork block", c.ConstantinopleBlock, newcfg.ConstantinopleBlock)
}
if isForkIncompatible(c.PetersburgBlock, newcfg.PetersburgBlock, head) {
return newCompatError("ConstantinopleFix fork block", c.PetersburgBlock, newcfg.PetersburgBlock)
}
if isForkIncompatible(c.EWASMBlock, newcfg.EWASMBlock, head) {
return newCompatError("ewasm fork block", c.EWASMBlock, newcfg.EWASMBlock)
}
@ -381,9 +423,9 @@ func (err *ConfigCompatError) Error() string {
// Rules is a one time interface meaning that it shouldn't be used in between transition
// phases.
type Rules struct {
ChainID *big.Int
IsHomestead, IsEIP150, IsEIP155, IsEIP158 bool
IsByzantium, IsConstantinople bool
ChainID *big.Int
IsHomestead, IsEIP150, IsEIP155, IsEIP158 bool
IsByzantium, IsConstantinople, IsPetersburg bool
}
// Rules ensures c's ChainID is not nil.
@ -400,5 +442,6 @@ func (c *ChainConfig) Rules(num *big.Int) Rules {
IsEIP158: c.IsEIP158(num),
IsByzantium: c.IsByzantium(num),
IsConstantinople: c.IsConstantinople(num),
IsPetersburg: c.IsPetersburg(num),
}
}

View File

@ -23,7 +23,7 @@ import (
const (
VersionMajor = 1 // Major version component of the current release
VersionMinor = 8 // Minor version component of the current release
VersionPatch = 20 // Patch version component of the current release
VersionPatch = 23 // Patch version component of the current release
VersionMeta = "stable" // Version metadata to append to the version string
)

View File

@ -36,11 +36,15 @@ import (
)
const (
contentType = "application/json"
maxRequestContentLength = 1024 * 512
)
var nullAddr, _ = net.ResolveTCPAddr("tcp", "127.0.0.1:0")
var (
// https://www.jsonrpc.org/historical/json-rpc-over-http.html#id13
acceptedContentTypes = []string{"application/json", "application/json-rpc", "application/jsonrequest"}
contentType = acceptedContentTypes[0]
nullAddr, _ = net.ResolveTCPAddr("tcp", "127.0.0.1:0")
)
type httpConn struct {
client *http.Client
@ -263,12 +267,21 @@ func validateRequest(r *http.Request) (int, error) {
err := fmt.Errorf("content length too large (%d>%d)", r.ContentLength, maxRequestContentLength)
return http.StatusRequestEntityTooLarge, err
}
mt, _, err := mime.ParseMediaType(r.Header.Get("content-type"))
if r.Method != http.MethodOptions && (err != nil || mt != contentType) {
err := fmt.Errorf("invalid content type, only %s is supported", contentType)
return http.StatusUnsupportedMediaType, err
// Allow OPTIONS (regardless of content-type)
if r.Method == http.MethodOptions {
return 0, nil
}
return 0, nil
// Check content-type
if mt, _, err := mime.ParseMediaType(r.Header.Get("content-type")); err == nil {
for _, accepted := range acceptedContentTypes {
if accepted == mt {
return 0, nil
}
}
}
// Invalid content-type
err := fmt.Errorf("invalid content type, only %s is supported", contentType)
return http.StatusUnsupportedMediaType, err
}
func newCorsHandler(srv *Server, allowedOrigins []string) http.Handler {

View File

@ -20,13 +20,31 @@ package rpc
import (
"context"
"fmt"
"net"
"os"
"path/filepath"
"github.com/ethereum/go-ethereum/log"
)
/*
#include <sys/un.h>
int max_socket_path_size() {
struct sockaddr_un s;
return sizeof(s.sun_path);
}
*/
import "C"
// ipcListen will create a Unix socket on the given endpoint.
func ipcListen(endpoint string) (net.Listener, error) {
if len(endpoint) > int(C.max_socket_path_size()) {
log.Warn(fmt.Sprintf("The ipc endpoint is longer than %d characters. ", C.max_socket_path_size()),
"endpoint", endpoint)
}
// Ensure the IPC path exists and remove any previous leftover
if err := os.MkdirAll(filepath.Dir(endpoint), 0751); err != nil {
return nil, err

View File

@ -52,6 +52,9 @@ func hexToCompact(hex []byte) []byte {
}
func compactToHex(compact []byte) []byte {
if len(compact) == 0 {
return compact
}
base := keybytesToHex(compact)
// delete terminator flag
if base[0] < 2 {

View File

@ -21,8 +21,8 @@ import (
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/rlp"
"golang.org/x/crypto/sha3"
)
type hasher struct {
@ -57,7 +57,7 @@ var hasherPool = sync.Pool{
New: func() interface{} {
return &hasher{
tmp: make(sliceBuffer, 0, 550), // cap is as large as a full fullNode.
sha: sha3.NewKeccak256().(keccakState),
sha: sha3.NewLegacyKeccak256().(keccakState),
}
},
}

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// GoMock - a mock framework for Go.
// Package gomock is a mock framework for Go.
//
// Standard usage:
// (1) Define an interface that you wish to mock.
@ -63,8 +63,8 @@ import (
"sync"
)
// A TestReporter is something that can be used to report test failures.
// It is satisfied by the standard library's *testing.T.
// A TestReporter is something that can be used to report test failures. It
// is satisfied by the standard library's *testing.T.
type TestReporter interface {
Errorf(format string, args ...interface{})
Fatalf(format string, args ...interface{})
@ -77,14 +77,35 @@ type TestHelper interface {
Helper()
}
// A Controller represents the top-level control of a mock ecosystem.
// It defines the scope and lifetime of mock objects, as well as their expectations.
// It is safe to call Controller's methods from multiple goroutines.
// A Controller represents the top-level control of a mock ecosystem. It
// defines the scope and lifetime of mock objects, as well as their
// expectations. It is safe to call Controller's methods from multiple
// goroutines. Each test should create a new Controller and invoke Finish via
// defer.
//
// func TestFoo(t *testing.T) {
// ctrl := gomock.NewController(st)
// defer ctrl.Finish()
// // ..
// }
//
// func TestBar(t *testing.T) {
// t.Run("Sub-Test-1", st) {
// ctrl := gomock.NewController(st)
// defer ctrl.Finish()
// // ..
// })
// t.Run("Sub-Test-2", st) {
// ctrl := gomock.NewController(st)
// defer ctrl.Finish()
// // ..
// })
// })
type Controller struct {
// T should only be called within a generated mock. It is not intended to
// be used in user code and may be changed in future versions. T is the
// TestReporter passed in when creating the Controller via NewController.
// If the TestReporter does not implment a TestHelper it will be wrapped
// If the TestReporter does not implement a TestHelper it will be wrapped
// with a nopTestHelper.
T TestHelper
mu sync.Mutex
@ -92,6 +113,8 @@ type Controller struct {
finished bool
}
// NewController returns a new Controller. It is the preferred way to create a
// Controller.
func NewController(t TestReporter) *Controller {
h, ok := t.(TestHelper)
if !ok {
@ -135,6 +158,7 @@ type nopTestHelper struct {
func (h nopTestHelper) Helper() {}
// RecordCall is called by a mock. It should not be called by user code.
func (ctrl *Controller) RecordCall(receiver interface{}, method string, args ...interface{}) *Call {
ctrl.T.Helper()
@ -148,6 +172,7 @@ func (ctrl *Controller) RecordCall(receiver interface{}, method string, args ...
panic("unreachable")
}
// RecordCallWithMethodType is called by a mock. It should not be called by user code.
func (ctrl *Controller) RecordCallWithMethodType(receiver interface{}, method string, methodType reflect.Type, args ...interface{}) *Call {
ctrl.T.Helper()
@ -160,6 +185,7 @@ func (ctrl *Controller) RecordCallWithMethodType(receiver interface{}, method st
return call
}
// Call is called by a mock. It should not be called by user code.
func (ctrl *Controller) Call(receiver interface{}, method string, args ...interface{}) []interface{} {
ctrl.T.Helper()
@ -200,6 +226,9 @@ func (ctrl *Controller) Call(receiver interface{}, method string, args ...interf
return rets
}
// Finish checks to see if all the methods that were expected to be called
// were called. It should be invoked for each Controller. It is not idempotent
// and therefore can only be invoked once.
func (ctrl *Controller) Finish() {
ctrl.T.Helper()

View File

@ -98,9 +98,30 @@ func (m assignableToTypeOfMatcher) String() string {
}
// Constructors
func Any() Matcher { return anyMatcher{} }
// Any returns a matcher that always matches.
func Any() Matcher { return anyMatcher{} }
// Eq returns a matcher that matches on equality.
//
// Example usage:
// Eq(5).Matches(5) // returns true
// Eq(5).Matches(4) // returns false
func Eq(x interface{}) Matcher { return eqMatcher{x} }
func Nil() Matcher { return nilMatcher{} }
// Nil returns a matcher that matches if the received value is nil.
//
// Example usage:
// var x *bytes.Buffer
// Nil().Matches(x) // returns true
// x = &bytes.Buffer{}
// Nil().Matches(x) // returns false
func Nil() Matcher { return nilMatcher{} }
// Not reverses the results of its given child matcher.
//
// Example usage:
// Not(Eq(5)).Matches(4) // returns true
// Not(Eq(5)).Matches(5) // returns false
func Not(x interface{}) Matcher {
if m, ok := x.(Matcher); ok {
return notMatcher{m}
@ -112,11 +133,9 @@ func Not(x interface{}) Matcher {
// function is assignable to the type of the parameter to this function.
//
// Example usage:
//
// dbMock.EXPECT().
// Insert(gomock.AssignableToTypeOf(&EmployeeRecord{})).
// Return(errors.New("DB error"))
//
// var s fmt.Stringer = &bytes.Buffer{}
// AssignableToTypeOf(s).Matches(time.Second) // returns true
// AssignableToTypeOf(s).Matches(99) // returns false
func AssignableToTypeOf(x interface{}) Matcher {
return assignableToTypeOfMatcher{reflect.TypeOf(x)}
}

4
vendor/github.com/lib/pq/.gitignore generated vendored Normal file
View File

@ -0,0 +1,4 @@
.db
*.test
*~
*.swp

86
vendor/github.com/lib/pq/.travis.sh generated vendored Normal file
View File

@ -0,0 +1,86 @@
#!/bin/bash
set -eu
client_configure() {
sudo chmod 600 $PQSSLCERTTEST_PATH/postgresql.key
}
pgdg_repository() {
local sourcelist='sources.list.d/postgresql.list'
curl -sS 'https://www.postgresql.org/media/keys/ACCC4CF8.asc' | sudo apt-key add -
echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PGVERSION | sudo tee "/etc/apt/$sourcelist"
sudo apt-get -o Dir::Etc::sourcelist="$sourcelist" -o Dir::Etc::sourceparts='-' -o APT::Get::List-Cleanup='0' update
}
postgresql_configure() {
sudo tee /etc/postgresql/$PGVERSION/main/pg_hba.conf > /dev/null <<-config
local all all trust
hostnossl all pqgossltest 127.0.0.1/32 reject
hostnossl all pqgosslcert 127.0.0.1/32 reject
hostssl all pqgossltest 127.0.0.1/32 trust
hostssl all pqgosslcert 127.0.0.1/32 cert
host all all 127.0.0.1/32 trust
hostnossl all pqgossltest ::1/128 reject
hostnossl all pqgosslcert ::1/128 reject
hostssl all pqgossltest ::1/128 trust
hostssl all pqgosslcert ::1/128 cert
host all all ::1/128 trust
config
xargs sudo install -o postgres -g postgres -m 600 -t /var/lib/postgresql/$PGVERSION/main/ <<-certificates
certs/root.crt
certs/server.crt
certs/server.key
certificates
sort -VCu <<-versions ||
$PGVERSION
9.2
versions
sudo tee -a /etc/postgresql/$PGVERSION/main/postgresql.conf > /dev/null <<-config
ssl_ca_file = 'root.crt'
ssl_cert_file = 'server.crt'
ssl_key_file = 'server.key'
config
echo 127.0.0.1 postgres | sudo tee -a /etc/hosts > /dev/null
sudo service postgresql restart
}
postgresql_install() {
xargs sudo apt-get -y -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confnew' install <<-packages
postgresql-$PGVERSION
postgresql-server-dev-$PGVERSION
postgresql-contrib-$PGVERSION
packages
}
postgresql_uninstall() {
sudo service postgresql stop
xargs sudo apt-get -y --purge remove <<-packages
libpq-dev
libpq5
postgresql
postgresql-client-common
postgresql-common
packages
sudo rm -rf /var/lib/postgresql
}
megacheck_install() {
# Lock megacheck version at $MEGACHECK_VERSION to prevent spontaneous
# new error messages in old code.
go get -d honnef.co/go/tools/...
git -C $GOPATH/src/honnef.co/go/tools/ checkout $MEGACHECK_VERSION
go install honnef.co/go/tools/cmd/megacheck
megacheck --version
}
golint_install() {
go get github.com/golang/lint/golint
}
$1

50
vendor/github.com/lib/pq/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,50 @@
language: go
go:
- 1.8.x
- 1.9.x
- 1.10.x
- master
sudo: true
env:
global:
- PGUSER=postgres
- PQGOSSLTESTS=1
- PQSSLCERTTEST_PATH=$PWD/certs
- PGHOST=127.0.0.1
- MEGACHECK_VERSION=2017.2.2
matrix:
- PGVERSION=10
- PGVERSION=9.6
- PGVERSION=9.5
- PGVERSION=9.4
- PGVERSION=9.3
- PGVERSION=9.2
- PGVERSION=9.1
- PGVERSION=9.0
before_install:
- ./.travis.sh postgresql_uninstall
- ./.travis.sh pgdg_repository
- ./.travis.sh postgresql_install
- ./.travis.sh postgresql_configure
- ./.travis.sh client_configure
- ./.travis.sh megacheck_install
- ./.travis.sh golint_install
- go get golang.org/x/tools/cmd/goimports
before_script:
- createdb pqgotest
- createuser -DRS pqgossltest
- createuser -DRS pqgosslcert
script:
- >
goimports -d -e $(find -name '*.go') | awk '{ print } END { exit NR == 0 ? 0 : 1 }'
- go vet ./...
- megacheck -go 1.8 ./...
- golint ./...
- PQTEST_BINARY_PARAMETERS=no go test -race -v ./...
- PQTEST_BINARY_PARAMETERS=yes go test -race -v ./...

29
vendor/github.com/lib/pq/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,29 @@
## Contributing to pq
`pq` has a backlog of pull requests, but contributions are still very
much welcome. You can help with patch review, submitting bug reports,
or adding new functionality. There is no formal style guide, but
please conform to the style of existing code and general Go formatting
conventions when submitting patches.
### Patch review
Help review existing open pull requests by commenting on the code or
proposed functionality.
### Bug reports
We appreciate any bug reports, but especially ones with self-contained
(doesn't depend on code outside of pq), minimal (can't be simplified
further) test cases. It's especially helpful if you can submit a pull
request with just the failing test case (you'll probably want to
pattern it after the tests in
[conn_test.go](https://github.com/lib/pq/blob/master/conn_test.go).
### New functionality
There are a number of pending patches for new functionality, so
additional feature patches will take a while to merge. Still, patches
are generally reviewed based on usefulness and complexity in addition
to time-in-queue, so if you have a knockout idea, take a shot. Feel
free to open an issue discussion your proposed patch beforehand.

8
vendor/github.com/lib/pq/LICENSE.md generated vendored Normal file
View File

@ -0,0 +1,8 @@
Copyright (c) 2011-2013, 'pq' Contributors
Portions Copyright (C) 2011 Blake Mizerany
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

95
vendor/github.com/lib/pq/README.md generated vendored Normal file
View File

@ -0,0 +1,95 @@
# pq - A pure Go postgres driver for Go's database/sql package
[![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://godoc.org/github.com/lib/pq)
[![Build Status](https://travis-ci.org/lib/pq.svg?branch=master)](https://travis-ci.org/lib/pq)
## Install
go get github.com/lib/pq
## Docs
For detailed documentation and basic usage examples, please see the package
documentation at <http://godoc.org/github.com/lib/pq>.
## Tests
`go test` is used for testing. See [TESTS.md](TESTS.md) for more details.
## Features
* SSL
* Handles bad connections for `database/sql`
* Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`)
* Scan binary blobs correctly (i.e. `bytea`)
* Package for `hstore` support
* COPY FROM support
* pq.ParseURL for converting urls to connection strings for sql.Open.
* Many libpq compatible environment variables
* Unix socket support
* Notifications: `LISTEN`/`NOTIFY`
* pgpass support
## Future / Things you can help with
* Better COPY FROM / COPY TO (see discussion in #181)
## Thank you (alphabetical)
Some of these contributors are from the original library `bmizerany/pq.go` whose
code still exists in here.
* Andy Balholm (andybalholm)
* Ben Berkert (benburkert)
* Benjamin Heatwole (bheatwole)
* Bill Mill (llimllib)
* Bjørn Madsen (aeons)
* Blake Gentry (bgentry)
* Brad Fitzpatrick (bradfitz)
* Charlie Melbye (cmelbye)
* Chris Bandy (cbandy)
* Chris Gilling (cgilling)
* Chris Walsh (cwds)
* Dan Sosedoff (sosedoff)
* Daniel Farina (fdr)
* Eric Chlebek (echlebek)
* Eric Garrido (minusnine)
* Eric Urban (hydrogen18)
* Everyone at The Go Team
* Evan Shaw (edsrzf)
* Ewan Chou (coocood)
* Fazal Majid (fazalmajid)
* Federico Romero (federomero)
* Fumin (fumin)
* Gary Burd (garyburd)
* Heroku (heroku)
* James Pozdena (jpoz)
* Jason McVetta (jmcvetta)
* Jeremy Jay (pbnjay)
* Joakim Sernbrant (serbaut)
* John Gallagher (jgallagher)
* Jonathan Rudenberg (titanous)
* Joël Stemmer (jstemmer)
* Kamil Kisiel (kisielk)
* Kelly Dunn (kellydunn)
* Keith Rarick (kr)
* Kir Shatrov (kirs)
* Lann Martin (lann)
* Maciek Sakrejda (uhoh-itsmaciek)
* Marc Brinkmann (mbr)
* Marko Tiikkaja (johto)
* Matt Newberry (MattNewberry)
* Matt Robenolt (mattrobenolt)
* Martin Olsen (martinolsen)
* Mike Lewis (mikelikespie)
* Nicolas Patry (Narsil)
* Oliver Tonnhofer (olt)
* Patrick Hayes (phayes)
* Paul Hammond (paulhammond)
* Ryan Smith (ryandotsmith)
* Samuel Stauffer (samuel)
* Timothée Peignier (cyberdelia)
* Travis Cline (tmc)
* TruongSinh Tran-Nguyen (truongsinh)
* Yaismel Miranda (ympons)
* notedit (notedit)

33
vendor/github.com/lib/pq/TESTS.md generated vendored Normal file
View File

@ -0,0 +1,33 @@
# Tests
## Running Tests
`go test` is used for testing. A running PostgreSQL
server is required, with the ability to log in. The
database to connect to test with is "pqgotest," on
"localhost" but these can be overridden using [environment
variables](https://www.postgresql.org/docs/9.3/static/libpq-envars.html).
Example:
PGHOST=/run/postgresql go test
## Benchmarks
A benchmark suite can be run as part of the tests:
go test -bench .
## Example setup (Docker)
Run a postgres container:
```
docker run --expose 5432:5432 postgres
```
Run tests:
```
PGHOST=localhost PGPORT=5432 PGUSER=postgres PGSSLMODE=disable PGDATABASE=postgres go test
```

756
vendor/github.com/lib/pq/array.go generated vendored Normal file
View File

@ -0,0 +1,756 @@
package pq
import (
"bytes"
"database/sql"
"database/sql/driver"
"encoding/hex"
"fmt"
"reflect"
"strconv"
"strings"
)
var typeByteSlice = reflect.TypeOf([]byte{})
var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
var typeSQLScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem()
// Array returns the optimal driver.Valuer and sql.Scanner for an array or
// slice of any dimension.
//
// For example:
// db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401}))
//
// var x []sql.NullInt64
// db.QueryRow('SELECT ARRAY[235, 401]').Scan(pq.Array(&x))
//
// Scanning multi-dimensional arrays is not supported. Arrays where the lower
// bound is not one (such as `[0:0]={1}') are not supported.
func Array(a interface{}) interface {
driver.Valuer
sql.Scanner
} {
switch a := a.(type) {
case []bool:
return (*BoolArray)(&a)
case []float64:
return (*Float64Array)(&a)
case []int64:
return (*Int64Array)(&a)
case []string:
return (*StringArray)(&a)
case *[]bool:
return (*BoolArray)(a)
case *[]float64:
return (*Float64Array)(a)
case *[]int64:
return (*Int64Array)(a)
case *[]string:
return (*StringArray)(a)
}
return GenericArray{a}
}
// ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner
// to override the array delimiter used by GenericArray.
type ArrayDelimiter interface {
// ArrayDelimiter returns the delimiter character(s) for this element's type.
ArrayDelimiter() string
}
// BoolArray represents a one-dimensional array of the PostgreSQL boolean type.
type BoolArray []bool
// Scan implements the sql.Scanner interface.
func (a *BoolArray) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to BoolArray", src)
}
func (a *BoolArray) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "BoolArray")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(BoolArray, len(elems))
for i, v := range elems {
if len(v) != 1 {
return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v)
}
switch v[0] {
case 't':
b[i] = true
case 'f':
b[i] = false
default:
return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a BoolArray) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be exactly two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 1+2*n)
for i := 0; i < n; i++ {
b[2*i] = ','
if a[i] {
b[1+2*i] = 't'
} else {
b[1+2*i] = 'f'
}
}
b[0] = '{'
b[2*n] = '}'
return string(b), nil
}
return "{}", nil
}
// ByteaArray represents a one-dimensional array of the PostgreSQL bytea type.
type ByteaArray [][]byte
// Scan implements the sql.Scanner interface.
func (a *ByteaArray) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to ByteaArray", src)
}
func (a *ByteaArray) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "ByteaArray")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(ByteaArray, len(elems))
for i, v := range elems {
b[i], err = parseBytea(v)
if err != nil {
return fmt.Errorf("could not parse bytea array index %d: %s", i, err.Error())
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface. It uses the "hex" format which
// is only supported on PostgreSQL 9.0 or newer.
func (a ByteaArray) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, 2*N bytes of quotes,
// 3*N bytes of hex formatting, and N-1 bytes of delimiters.
size := 1 + 6*n
for _, x := range a {
size += hex.EncodedLen(len(x))
}
b := make([]byte, size)
for i, s := 0, b; i < n; i++ {
o := copy(s, `,"\\x`)
o += hex.Encode(s[o:], a[i])
s[o] = '"'
s = s[o+1:]
}
b[0] = '{'
b[size-1] = '}'
return string(b), nil
}
return "{}", nil
}
// Float64Array represents a one-dimensional array of the PostgreSQL double
// precision type.
type Float64Array []float64
// Scan implements the sql.Scanner interface.
func (a *Float64Array) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to Float64Array", src)
}
func (a *Float64Array) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "Float64Array")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(Float64Array, len(elems))
for i, v := range elems {
if b[i], err = strconv.ParseFloat(string(v), 64); err != nil {
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a Float64Array) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 1, 1+2*n)
b[0] = '{'
b = strconv.AppendFloat(b, a[0], 'f', -1, 64)
for i := 1; i < n; i++ {
b = append(b, ',')
b = strconv.AppendFloat(b, a[i], 'f', -1, 64)
}
return string(append(b, '}')), nil
}
return "{}", nil
}
// GenericArray implements the driver.Valuer and sql.Scanner interfaces for
// an array or slice of any dimension.
type GenericArray struct{ A interface{} }
func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) {
var assign func([]byte, reflect.Value) error
var del = ","
// TODO calculate the assign function for other types
// TODO repeat this section on the element type of arrays or slices (multidimensional)
{
if reflect.PtrTo(rt).Implements(typeSQLScanner) {
// dest is always addressable because it is an element of a slice.
assign = func(src []byte, dest reflect.Value) (err error) {
ss := dest.Addr().Interface().(sql.Scanner)
if src == nil {
err = ss.Scan(nil)
} else {
err = ss.Scan(src)
}
return
}
goto FoundType
}
assign = func([]byte, reflect.Value) error {
return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt)
}
}
FoundType:
if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok {
del = ad.ArrayDelimiter()
}
return rt, assign, del
}
// Scan implements the sql.Scanner interface.
func (a GenericArray) Scan(src interface{}) error {
dpv := reflect.ValueOf(a.A)
switch {
case dpv.Kind() != reflect.Ptr:
return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A)
case dpv.IsNil():
return fmt.Errorf("pq: destination %T is nil", a.A)
}
dv := dpv.Elem()
switch dv.Kind() {
case reflect.Slice:
case reflect.Array:
default:
return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A)
}
switch src := src.(type) {
case []byte:
return a.scanBytes(src, dv)
case string:
return a.scanBytes([]byte(src), dv)
case nil:
if dv.Kind() == reflect.Slice {
dv.Set(reflect.Zero(dv.Type()))
return nil
}
}
return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type())
}
func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error {
dtype, assign, del := a.evaluateDestination(dv.Type().Elem())
dims, elems, err := parseArray(src, []byte(del))
if err != nil {
return err
}
// TODO allow multidimensional
if len(dims) > 1 {
return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented",
strings.Replace(fmt.Sprint(dims), " ", "][", -1))
}
// Treat a zero-dimensional array like an array with a single dimension of zero.
if len(dims) == 0 {
dims = append(dims, 0)
}
for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() {
switch rt.Kind() {
case reflect.Slice:
case reflect.Array:
if rt.Len() != dims[i] {
return fmt.Errorf("pq: cannot convert ARRAY%s to %s",
strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type())
}
default:
// TODO handle multidimensional
}
}
values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems))
for i, e := range elems {
if err := assign(e, values.Index(i)); err != nil {
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
}
}
// TODO handle multidimensional
switch dv.Kind() {
case reflect.Slice:
dv.Set(values.Slice(0, dims[0]))
case reflect.Array:
for i := 0; i < dims[0]; i++ {
dv.Index(i).Set(values.Index(i))
}
}
return nil
}
// Value implements the driver.Valuer interface.
func (a GenericArray) Value() (driver.Value, error) {
if a.A == nil {
return nil, nil
}
rv := reflect.ValueOf(a.A)
switch rv.Kind() {
case reflect.Slice:
if rv.IsNil() {
return nil, nil
}
case reflect.Array:
default:
return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A)
}
if n := rv.Len(); n > 0 {
// There will be at least two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 0, 1+2*n)
b, _, err := appendArray(b, rv, n)
return string(b), err
}
return "{}", nil
}
// Int64Array represents a one-dimensional array of the PostgreSQL integer types.
type Int64Array []int64
// Scan implements the sql.Scanner interface.
func (a *Int64Array) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to Int64Array", src)
}
func (a *Int64Array) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "Int64Array")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(Int64Array, len(elems))
for i, v := range elems {
if b[i], err = strconv.ParseInt(string(v), 10, 64); err != nil {
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a Int64Array) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 1, 1+2*n)
b[0] = '{'
b = strconv.AppendInt(b, a[0], 10)
for i := 1; i < n; i++ {
b = append(b, ',')
b = strconv.AppendInt(b, a[i], 10)
}
return string(append(b, '}')), nil
}
return "{}", nil
}
// StringArray represents a one-dimensional array of the PostgreSQL character types.
type StringArray []string
// Scan implements the sql.Scanner interface.
func (a *StringArray) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to StringArray", src)
}
func (a *StringArray) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "StringArray")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(StringArray, len(elems))
for i, v := range elems {
if b[i] = string(v); v == nil {
return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a StringArray) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, 2*N bytes of quotes,
// and N-1 bytes of delimiters.
b := make([]byte, 1, 1+3*n)
b[0] = '{'
b = appendArrayQuotedBytes(b, []byte(a[0]))
for i := 1; i < n; i++ {
b = append(b, ',')
b = appendArrayQuotedBytes(b, []byte(a[i]))
}
return string(append(b, '}')), nil
}
return "{}", nil
}
// appendArray appends rv to the buffer, returning the extended buffer and
// the delimiter used between elements.
//
// It panics when n <= 0 or rv's Kind is not reflect.Array nor reflect.Slice.
func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) {
var del string
var err error
b = append(b, '{')
if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil {
return b, del, err
}
for i := 1; i < n; i++ {
b = append(b, del...)
if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil {
return b, del, err
}
}
return append(b, '}'), del, nil
}
// appendArrayElement appends rv to the buffer, returning the extended buffer
// and the delimiter to use before the next element.
//
// When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted
// using driver.DefaultParameterConverter and the resulting []byte or string
// is double-quoted.
//
// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) {
if k := rv.Kind(); k == reflect.Array || k == reflect.Slice {
if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) {
if n := rv.Len(); n > 0 {
return appendArray(b, rv, n)
}
return b, "", nil
}
}
var del = ","
var err error
var iv interface{} = rv.Interface()
if ad, ok := iv.(ArrayDelimiter); ok {
del = ad.ArrayDelimiter()
}
if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil {
return b, del, err
}
switch v := iv.(type) {
case nil:
return append(b, "NULL"...), del, nil
case []byte:
return appendArrayQuotedBytes(b, v), del, nil
case string:
return appendArrayQuotedBytes(b, []byte(v)), del, nil
}
b, err = appendValue(b, iv)
return b, del, err
}
func appendArrayQuotedBytes(b, v []byte) []byte {
b = append(b, '"')
for {
i := bytes.IndexAny(v, `"\`)
if i < 0 {
b = append(b, v...)
break
}
if i > 0 {
b = append(b, v[:i]...)
}
b = append(b, '\\', v[i])
v = v[i+1:]
}
return append(b, '"')
}
func appendValue(b []byte, v driver.Value) ([]byte, error) {
return append(b, encode(nil, v, 0)...), nil
}
// parseArray extracts the dimensions and elements of an array represented in
// text format. Only representations emitted by the backend are supported.
// Notably, whitespace around brackets and delimiters is significant, and NULL
// is case-sensitive.
//
// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) {
var depth, i int
if len(src) < 1 || src[0] != '{' {
return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0)
}
Open:
for i < len(src) {
switch src[i] {
case '{':
depth++
i++
case '}':
elems = make([][]byte, 0)
goto Close
default:
break Open
}
}
dims = make([]int, i)
Element:
for i < len(src) {
switch src[i] {
case '{':
if depth == len(dims) {
break Element
}
depth++
dims[depth-1] = 0
i++
case '"':
var elem = []byte{}
var escape bool
for i++; i < len(src); i++ {
if escape {
elem = append(elem, src[i])
escape = false
} else {
switch src[i] {
default:
elem = append(elem, src[i])
case '\\':
escape = true
case '"':
elems = append(elems, elem)
i++
break Element
}
}
}
default:
for start := i; i < len(src); i++ {
if bytes.HasPrefix(src[i:], del) || src[i] == '}' {
elem := src[start:i]
if len(elem) == 0 {
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
}
if bytes.Equal(elem, []byte("NULL")) {
elem = nil
}
elems = append(elems, elem)
break Element
}
}
}
}
for i < len(src) {
if bytes.HasPrefix(src[i:], del) && depth > 0 {
dims[depth-1]++
i += len(del)
goto Element
} else if src[i] == '}' && depth > 0 {
dims[depth-1]++
depth--
i++
} else {
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
}
}
Close:
for i < len(src) {
if src[i] == '}' && depth > 0 {
depth--
i++
} else {
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
}
}
if depth > 0 {
err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i)
}
if err == nil {
for _, d := range dims {
if (len(elems) % d) != 0 {
err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions")
}
}
}
return
}
func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) {
dims, elems, err := parseArray(src, del)
if err != nil {
return nil, err
}
if len(dims) > 1 {
return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ)
}
return elems, err
}

91
vendor/github.com/lib/pq/buf.go generated vendored Normal file
View File

@ -0,0 +1,91 @@
package pq
import (
"bytes"
"encoding/binary"
"github.com/lib/pq/oid"
)
type readBuf []byte
func (b *readBuf) int32() (n int) {
n = int(int32(binary.BigEndian.Uint32(*b)))
*b = (*b)[4:]
return
}
func (b *readBuf) oid() (n oid.Oid) {
n = oid.Oid(binary.BigEndian.Uint32(*b))
*b = (*b)[4:]
return
}
// N.B: this is actually an unsigned 16-bit integer, unlike int32
func (b *readBuf) int16() (n int) {
n = int(binary.BigEndian.Uint16(*b))
*b = (*b)[2:]
return
}
func (b *readBuf) string() string {
i := bytes.IndexByte(*b, 0)
if i < 0 {
errorf("invalid message format; expected string terminator")
}
s := (*b)[:i]
*b = (*b)[i+1:]
return string(s)
}
func (b *readBuf) next(n int) (v []byte) {
v = (*b)[:n]
*b = (*b)[n:]
return
}
func (b *readBuf) byte() byte {
return b.next(1)[0]
}
type writeBuf struct {
buf []byte
pos int
}
func (b *writeBuf) int32(n int) {
x := make([]byte, 4)
binary.BigEndian.PutUint32(x, uint32(n))
b.buf = append(b.buf, x...)
}
func (b *writeBuf) int16(n int) {
x := make([]byte, 2)
binary.BigEndian.PutUint16(x, uint16(n))
b.buf = append(b.buf, x...)
}
func (b *writeBuf) string(s string) {
b.buf = append(b.buf, (s + "\000")...)
}
func (b *writeBuf) byte(c byte) {
b.buf = append(b.buf, c)
}
func (b *writeBuf) bytes(v []byte) {
b.buf = append(b.buf, v...)
}
func (b *writeBuf) wrap() []byte {
p := b.buf[b.pos:]
binary.BigEndian.PutUint32(p, uint32(len(p)))
return b.buf
}
func (b *writeBuf) next(c byte) {
p := b.buf[b.pos:]
binary.BigEndian.PutUint32(p, uint32(len(p)))
b.pos = len(b.buf) + 1
b.buf = append(b.buf, c, 0, 0, 0, 0)
}

1854
vendor/github.com/lib/pq/conn.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

131
vendor/github.com/lib/pq/conn_go18.go generated vendored Normal file
View File

@ -0,0 +1,131 @@
// +build go1.8
package pq
import (
"context"
"database/sql"
"database/sql/driver"
"fmt"
"io"
"io/ioutil"
)
// Implement the "QueryerContext" interface
func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
list := make([]driver.Value, len(args))
for i, nv := range args {
list[i] = nv.Value
}
finish := cn.watchCancel(ctx)
r, err := cn.query(query, list)
if err != nil {
if finish != nil {
finish()
}
return nil, err
}
r.finish = finish
return r, nil
}
// Implement the "ExecerContext" interface
func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
list := make([]driver.Value, len(args))
for i, nv := range args {
list[i] = nv.Value
}
if finish := cn.watchCancel(ctx); finish != nil {
defer finish()
}
return cn.Exec(query, list)
}
// Implement the "ConnBeginTx" interface
func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
var mode string
switch sql.IsolationLevel(opts.Isolation) {
case sql.LevelDefault:
// Don't touch mode: use the server's default
case sql.LevelReadUncommitted:
mode = " ISOLATION LEVEL READ UNCOMMITTED"
case sql.LevelReadCommitted:
mode = " ISOLATION LEVEL READ COMMITTED"
case sql.LevelRepeatableRead:
mode = " ISOLATION LEVEL REPEATABLE READ"
case sql.LevelSerializable:
mode = " ISOLATION LEVEL SERIALIZABLE"
default:
return nil, fmt.Errorf("pq: isolation level not supported: %d", opts.Isolation)
}
if opts.ReadOnly {
mode += " READ ONLY"
} else {
mode += " READ WRITE"
}
tx, err := cn.begin(mode)
if err != nil {
return nil, err
}
cn.txnFinish = cn.watchCancel(ctx)
return tx, nil
}
func (cn *conn) watchCancel(ctx context.Context) func() {
if done := ctx.Done(); done != nil {
finished := make(chan struct{})
go func() {
select {
case <-done:
_ = cn.cancel()
finished <- struct{}{}
case <-finished:
}
}()
return func() {
select {
case <-finished:
case finished <- struct{}{}:
}
}
}
return nil
}
func (cn *conn) cancel() error {
c, err := dial(cn.dialer, cn.opts)
if err != nil {
return err
}
defer c.Close()
{
can := conn{
c: c,
}
err = can.ssl(cn.opts)
if err != nil {
return err
}
w := can.writeBuf(0)
w.int32(80877102) // cancel request code
w.int32(cn.processID)
w.int32(cn.secretKey)
if err := can.sendStartupPacket(w); err != nil {
return err
}
}
// Read until EOF to ensure that the server received the cancel.
{
_, err := io.Copy(ioutil.Discard, c)
return err
}
}

43
vendor/github.com/lib/pq/connector.go generated vendored Normal file
View File

@ -0,0 +1,43 @@
// +build go1.10
package pq
import (
"context"
"database/sql/driver"
)
// Connector represents a fixed configuration for the pq driver with a given
// name. Connector satisfies the database/sql/driver Connector interface and
// can be used to create any number of DB Conn's via the database/sql OpenDB
// function.
//
// See https://golang.org/pkg/database/sql/driver/#Connector.
// See https://golang.org/pkg/database/sql/#OpenDB.
type connector struct {
name string
}
// Connect returns a connection to the database using the fixed configuration
// of this Connector. Context is not used.
func (c *connector) Connect(_ context.Context) (driver.Conn, error) {
return (&Driver{}).Open(c.name)
}
// Driver returnst the underlying driver of this Connector.
func (c *connector) Driver() driver.Driver {
return &Driver{}
}
var _ driver.Connector = &connector{}
// NewConnector returns a connector for the pq driver in a fixed configuration
// with the given name. The returned connector can be used to create any number
// of equivalent Conn's. The returned connector is intended to be used with
// database/sql.OpenDB.
//
// See https://golang.org/pkg/database/sql/driver/#Connector.
// See https://golang.org/pkg/database/sql/#OpenDB.
func NewConnector(name string) (driver.Connector, error) {
return &connector{name: name}, nil
}

282
vendor/github.com/lib/pq/copy.go generated vendored Normal file
View File

@ -0,0 +1,282 @@
package pq
import (
"database/sql/driver"
"encoding/binary"
"errors"
"fmt"
"sync"
)
var (
errCopyInClosed = errors.New("pq: copyin statement has already been closed")
errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY")
errCopyToNotSupported = errors.New("pq: COPY TO is not supported")
errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction")
errCopyInProgress = errors.New("pq: COPY in progress")
)
// CopyIn creates a COPY FROM statement which can be prepared with
// Tx.Prepare(). The target table should be visible in search_path.
func CopyIn(table string, columns ...string) string {
stmt := "COPY " + QuoteIdentifier(table) + " ("
for i, col := range columns {
if i != 0 {
stmt += ", "
}
stmt += QuoteIdentifier(col)
}
stmt += ") FROM STDIN"
return stmt
}
// CopyInSchema creates a COPY FROM statement which can be prepared with
// Tx.Prepare().
func CopyInSchema(schema, table string, columns ...string) string {
stmt := "COPY " + QuoteIdentifier(schema) + "." + QuoteIdentifier(table) + " ("
for i, col := range columns {
if i != 0 {
stmt += ", "
}
stmt += QuoteIdentifier(col)
}
stmt += ") FROM STDIN"
return stmt
}
type copyin struct {
cn *conn
buffer []byte
rowData chan []byte
done chan bool
closed bool
sync.Mutex // guards err
err error
}
const ciBufferSize = 64 * 1024
// flush buffer before the buffer is filled up and needs reallocation
const ciBufferFlushSize = 63 * 1024
func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) {
if !cn.isInTransaction() {
return nil, errCopyNotSupportedOutsideTxn
}
ci := &copyin{
cn: cn,
buffer: make([]byte, 0, ciBufferSize),
rowData: make(chan []byte),
done: make(chan bool, 1),
}
// add CopyData identifier + 4 bytes for message length
ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0)
b := cn.writeBuf('Q')
b.string(q)
cn.send(b)
awaitCopyInResponse:
for {
t, r := cn.recv1()
switch t {
case 'G':
if r.byte() != 0 {
err = errBinaryCopyNotSupported
break awaitCopyInResponse
}
go ci.resploop()
return ci, nil
case 'H':
err = errCopyToNotSupported
break awaitCopyInResponse
case 'E':
err = parseError(r)
case 'Z':
if err == nil {
ci.setBad()
errorf("unexpected ReadyForQuery in response to COPY")
}
cn.processReadyForQuery(r)
return nil, err
default:
ci.setBad()
errorf("unknown response for copy query: %q", t)
}
}
// something went wrong, abort COPY before we return
b = cn.writeBuf('f')
b.string(err.Error())
cn.send(b)
for {
t, r := cn.recv1()
switch t {
case 'c', 'C', 'E':
case 'Z':
// correctly aborted, we're done
cn.processReadyForQuery(r)
return nil, err
default:
ci.setBad()
errorf("unknown response for CopyFail: %q", t)
}
}
}
func (ci *copyin) flush(buf []byte) {
// set message length (without message identifier)
binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1))
_, err := ci.cn.c.Write(buf)
if err != nil {
panic(err)
}
}
func (ci *copyin) resploop() {
for {
var r readBuf
t, err := ci.cn.recvMessage(&r)
if err != nil {
ci.setBad()
ci.setError(err)
ci.done <- true
return
}
switch t {
case 'C':
// complete
case 'N':
// NoticeResponse
case 'Z':
ci.cn.processReadyForQuery(&r)
ci.done <- true
return
case 'E':
err := parseError(&r)
ci.setError(err)
default:
ci.setBad()
ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t))
ci.done <- true
return
}
}
}
func (ci *copyin) setBad() {
ci.Lock()
ci.cn.bad = true
ci.Unlock()
}
func (ci *copyin) isBad() bool {
ci.Lock()
b := ci.cn.bad
ci.Unlock()
return b
}
func (ci *copyin) isErrorSet() bool {
ci.Lock()
isSet := (ci.err != nil)
ci.Unlock()
return isSet
}
// setError() sets ci.err if one has not been set already. Caller must not be
// holding ci.Mutex.
func (ci *copyin) setError(err error) {
ci.Lock()
if ci.err == nil {
ci.err = err
}
ci.Unlock()
}
func (ci *copyin) NumInput() int {
return -1
}
func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) {
return nil, ErrNotSupported
}
// Exec inserts values into the COPY stream. The insert is asynchronous
// and Exec can return errors from previous Exec calls to the same
// COPY stmt.
//
// You need to call Exec(nil) to sync the COPY stream and to get any
// errors from pending data, since Stmt.Close() doesn't return errors
// to the user.
func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) {
if ci.closed {
return nil, errCopyInClosed
}
if ci.isBad() {
return nil, driver.ErrBadConn
}
defer ci.cn.errRecover(&err)
if ci.isErrorSet() {
return nil, ci.err
}
if len(v) == 0 {
return nil, ci.Close()
}
numValues := len(v)
for i, value := range v {
ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value)
if i < numValues-1 {
ci.buffer = append(ci.buffer, '\t')
}
}
ci.buffer = append(ci.buffer, '\n')
if len(ci.buffer) > ciBufferFlushSize {
ci.flush(ci.buffer)
// reset buffer, keep bytes for message identifier and length
ci.buffer = ci.buffer[:5]
}
return driver.RowsAffected(0), nil
}
func (ci *copyin) Close() (err error) {
if ci.closed { // Don't do anything, we're already closed
return nil
}
ci.closed = true
if ci.isBad() {
return driver.ErrBadConn
}
defer ci.cn.errRecover(&err)
if len(ci.buffer) > 0 {
ci.flush(ci.buffer)
}
// Avoid touching the scratch buffer as resploop could be using it.
err = ci.cn.sendSimpleMessage('c')
if err != nil {
return err
}
<-ci.done
ci.cn.inCopy = false
if ci.isErrorSet() {
err = ci.err
return err
}
return nil
}

245
vendor/github.com/lib/pq/doc.go generated vendored Normal file
View File

@ -0,0 +1,245 @@
/*
Package pq is a pure Go Postgres driver for the database/sql package.
In most cases clients will use the database/sql package instead of
using this package directly. For example:
import (
"database/sql"
_ "github.com/lib/pq"
)
func main() {
connStr := "user=pqgotest dbname=pqgotest sslmode=verify-full"
db, err := sql.Open("postgres", connStr)
if err != nil {
log.Fatal(err)
}
age := 21
rows, err := db.Query("SELECT name FROM users WHERE age = $1", age)
}
You can also connect to a database using a URL. For example:
connStr := "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full"
db, err := sql.Open("postgres", connStr)
Connection String Parameters
Similarly to libpq, when establishing a connection using pq you are expected to
supply a connection string containing zero or more parameters.
A subset of the connection parameters supported by libpq are also supported by pq.
Additionally, pq also lets you specify run-time parameters (such as search_path or work_mem)
directly in the connection string. This is different from libpq, which does not allow
run-time parameters in the connection string, instead requiring you to supply
them in the options parameter.
For compatibility with libpq, the following special connection parameters are
supported:
* dbname - The name of the database to connect to
* user - The user to sign in as
* password - The user's password
* host - The host to connect to. Values that start with / are for unix
domain sockets. (default is localhost)
* port - The port to bind to. (default is 5432)
* sslmode - Whether or not to use SSL (default is require, this is not
the default for libpq)
* fallback_application_name - An application_name to fall back to if one isn't provided.
* connect_timeout - Maximum wait for connection, in seconds. Zero or
not specified means wait indefinitely.
* sslcert - Cert file location. The file must contain PEM encoded data.
* sslkey - Key file location. The file must contain PEM encoded data.
* sslrootcert - The location of the root certificate file. The file
must contain PEM encoded data.
Valid values for sslmode are:
* disable - No SSL
* require - Always SSL (skip verification)
* verify-ca - Always SSL (verify that the certificate presented by the
server was signed by a trusted CA)
* verify-full - Always SSL (verify that the certification presented by
the server was signed by a trusted CA and the server host name
matches the one in the certificate)
See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING
for more information about connection string parameters.
Use single quotes for values that contain whitespace:
"user=pqgotest password='with spaces'"
A backslash will escape the next character in values:
"user=space\ man password='it\'s valid'"
Note that the connection parameter client_encoding (which sets the
text encoding for the connection) may be set but must be "UTF8",
matching with the same rules as Postgres. It is an error to provide
any other value.
In addition to the parameters listed above, any run-time parameter that can be
set at backend start time can be set in the connection string. For more
information, see
http://www.postgresql.org/docs/current/static/runtime-config.html.
Most environment variables as specified at http://www.postgresql.org/docs/current/static/libpq-envars.html
supported by libpq are also supported by pq. If any of the environment
variables not supported by pq are set, pq will panic during connection
establishment. Environment variables have a lower precedence than explicitly
provided connection parameters.
The pgpass mechanism as described in http://www.postgresql.org/docs/current/static/libpq-pgpass.html
is supported, but on Windows PGPASSFILE must be specified explicitly.
Queries
database/sql does not dictate any specific format for parameter
markers in query strings, and pq uses the Postgres-native ordinal markers,
as shown above. The same marker can be reused for the same parameter:
rows, err := db.Query(`SELECT name FROM users WHERE favorite_fruit = $1
OR age BETWEEN $2 AND $2 + 3`, "orange", 64)
pq does not support the LastInsertId() method of the Result type in database/sql.
To return the identifier of an INSERT (or UPDATE or DELETE), use the Postgres
RETURNING clause with a standard Query or QueryRow call:
var userid int
err := db.QueryRow(`INSERT INTO users(name, favorite_fruit, age)
VALUES('beatrice', 'starfruit', 93) RETURNING id`).Scan(&userid)
For more details on RETURNING, see the Postgres documentation:
http://www.postgresql.org/docs/current/static/sql-insert.html
http://www.postgresql.org/docs/current/static/sql-update.html
http://www.postgresql.org/docs/current/static/sql-delete.html
For additional instructions on querying see the documentation for the database/sql package.
Data Types
Parameters pass through driver.DefaultParameterConverter before they are handled
by this package. When the binary_parameters connection option is enabled,
[]byte values are sent directly to the backend as data in binary format.
This package returns the following types for values from the PostgreSQL backend:
- integer types smallint, integer, and bigint are returned as int64
- floating-point types real and double precision are returned as float64
- character types char, varchar, and text are returned as string
- temporal types date, time, timetz, timestamp, and timestamptz are
returned as time.Time
- the boolean type is returned as bool
- the bytea type is returned as []byte
All other types are returned directly from the backend as []byte values in text format.
Errors
pq may return errors of type *pq.Error which can be interrogated for error details:
if err, ok := err.(*pq.Error); ok {
fmt.Println("pq error:", err.Code.Name())
}
See the pq.Error type for details.
Bulk imports
You can perform bulk imports by preparing a statement returned by pq.CopyIn (or
pq.CopyInSchema) in an explicit transaction (sql.Tx). The returned statement
handle can then be repeatedly "executed" to copy data into the target table.
After all data has been processed you should call Exec() once with no arguments
to flush all buffered data. Any call to Exec() might return an error which
should be handled appropriately, but because of the internal buffering an error
returned by Exec() might not be related to the data passed in the call that
failed.
CopyIn uses COPY FROM internally. It is not possible to COPY outside of an
explicit transaction in pq.
Usage example:
txn, err := db.Begin()
if err != nil {
log.Fatal(err)
}
stmt, err := txn.Prepare(pq.CopyIn("users", "name", "age"))
if err != nil {
log.Fatal(err)
}
for _, user := range users {
_, err = stmt.Exec(user.Name, int64(user.Age))
if err != nil {
log.Fatal(err)
}
}
_, err = stmt.Exec()
if err != nil {
log.Fatal(err)
}
err = stmt.Close()
if err != nil {
log.Fatal(err)
}
err = txn.Commit()
if err != nil {
log.Fatal(err)
}
Notifications
PostgreSQL supports a simple publish/subscribe model over database
connections. See http://www.postgresql.org/docs/current/static/sql-notify.html
for more information about the general mechanism.
To start listening for notifications, you first have to open a new connection
to the database by calling NewListener. This connection can not be used for
anything other than LISTEN / NOTIFY. Calling Listen will open a "notification
channel"; once a notification channel is open, a notification generated on that
channel will effect a send on the Listener.Notify channel. A notification
channel will remain open until Unlisten is called, though connection loss might
result in some notifications being lost. To solve this problem, Listener sends
a nil pointer over the Notify channel any time the connection is re-established
following a connection loss. The application can get information about the
state of the underlying connection by setting an event callback in the call to
NewListener.
A single Listener can safely be used from concurrent goroutines, which means
that there is often no need to create more than one Listener in your
application. However, a Listener is always connected to a single database, so
you will need to create a new Listener instance for every database you want to
receive notifications in.
The channel name in both Listen and Unlisten is case sensitive, and can contain
any characters legal in an identifier (see
http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS
for more information). Note that the channel name will be truncated to 63
bytes by the PostgreSQL server.
You can find a complete, working example of Listener usage at
http://godoc.org/github.com/lib/pq/example/listen.
*/
package pq

603
vendor/github.com/lib/pq/encode.go generated vendored Normal file
View File

@ -0,0 +1,603 @@
package pq
import (
"bytes"
"database/sql/driver"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"math"
"strconv"
"strings"
"sync"
"time"
"github.com/lib/pq/oid"
)
func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte {
switch v := x.(type) {
case []byte:
return v
default:
return encode(parameterStatus, x, oid.T_unknown)
}
}
func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte {
switch v := x.(type) {
case int64:
return strconv.AppendInt(nil, v, 10)
case float64:
return strconv.AppendFloat(nil, v, 'f', -1, 64)
case []byte:
if pgtypOid == oid.T_bytea {
return encodeBytea(parameterStatus.serverVersion, v)
}
return v
case string:
if pgtypOid == oid.T_bytea {
return encodeBytea(parameterStatus.serverVersion, []byte(v))
}
return []byte(v)
case bool:
return strconv.AppendBool(nil, v)
case time.Time:
return formatTs(v)
default:
errorf("encode: unknown type for %T", v)
}
panic("not reached")
}
func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} {
switch f {
case formatBinary:
return binaryDecode(parameterStatus, s, typ)
case formatText:
return textDecode(parameterStatus, s, typ)
default:
panic("not reached")
}
}
func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} {
switch typ {
case oid.T_bytea:
return s
case oid.T_int8:
return int64(binary.BigEndian.Uint64(s))
case oid.T_int4:
return int64(int32(binary.BigEndian.Uint32(s)))
case oid.T_int2:
return int64(int16(binary.BigEndian.Uint16(s)))
case oid.T_uuid:
b, err := decodeUUIDBinary(s)
if err != nil {
panic(err)
}
return b
default:
errorf("don't know how to decode binary parameter of type %d", uint32(typ))
}
panic("not reached")
}
func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} {
switch typ {
case oid.T_char, oid.T_varchar, oid.T_text:
return string(s)
case oid.T_bytea:
b, err := parseBytea(s)
if err != nil {
errorf("%s", err)
}
return b
case oid.T_timestamptz:
return parseTs(parameterStatus.currentLocation, string(s))
case oid.T_timestamp, oid.T_date:
return parseTs(nil, string(s))
case oid.T_time:
return mustParse("15:04:05", typ, s)
case oid.T_timetz:
return mustParse("15:04:05-07", typ, s)
case oid.T_bool:
return s[0] == 't'
case oid.T_int8, oid.T_int4, oid.T_int2:
i, err := strconv.ParseInt(string(s), 10, 64)
if err != nil {
errorf("%s", err)
}
return i
case oid.T_float4, oid.T_float8:
bits := 64
if typ == oid.T_float4 {
bits = 32
}
f, err := strconv.ParseFloat(string(s), bits)
if err != nil {
errorf("%s", err)
}
return f
}
return s
}
// appendEncodedText encodes item in text format as required by COPY
// and appends to buf
func appendEncodedText(parameterStatus *parameterStatus, buf []byte, x interface{}) []byte {
switch v := x.(type) {
case int64:
return strconv.AppendInt(buf, v, 10)
case float64:
return strconv.AppendFloat(buf, v, 'f', -1, 64)
case []byte:
encodedBytea := encodeBytea(parameterStatus.serverVersion, v)
return appendEscapedText(buf, string(encodedBytea))
case string:
return appendEscapedText(buf, v)
case bool:
return strconv.AppendBool(buf, v)
case time.Time:
return append(buf, formatTs(v)...)
case nil:
return append(buf, "\\N"...)
default:
errorf("encode: unknown type for %T", v)
}
panic("not reached")
}
func appendEscapedText(buf []byte, text string) []byte {
escapeNeeded := false
startPos := 0
var c byte
// check if we need to escape
for i := 0; i < len(text); i++ {
c = text[i]
if c == '\\' || c == '\n' || c == '\r' || c == '\t' {
escapeNeeded = true
startPos = i
break
}
}
if !escapeNeeded {
return append(buf, text...)
}
// copy till first char to escape, iterate the rest
result := append(buf, text[:startPos]...)
for i := startPos; i < len(text); i++ {
c = text[i]
switch c {
case '\\':
result = append(result, '\\', '\\')
case '\n':
result = append(result, '\\', 'n')
case '\r':
result = append(result, '\\', 'r')
case '\t':
result = append(result, '\\', 't')
default:
result = append(result, c)
}
}
return result
}
func mustParse(f string, typ oid.Oid, s []byte) time.Time {
str := string(s)
// check for a 30-minute-offset timezone
if (typ == oid.T_timestamptz || typ == oid.T_timetz) &&
str[len(str)-3] == ':' {
f += ":00"
}
t, err := time.Parse(f, str)
if err != nil {
errorf("decode: %s", err)
}
return t
}
var errInvalidTimestamp = errors.New("invalid timestamp")
type timestampParser struct {
err error
}
func (p *timestampParser) expect(str string, char byte, pos int) {
if p.err != nil {
return
}
if pos+1 > len(str) {
p.err = errInvalidTimestamp
return
}
if c := str[pos]; c != char && p.err == nil {
p.err = fmt.Errorf("expected '%v' at position %v; got '%v'", char, pos, c)
}
}
func (p *timestampParser) mustAtoi(str string, begin int, end int) int {
if p.err != nil {
return 0
}
if begin < 0 || end < 0 || begin > end || end > len(str) {
p.err = errInvalidTimestamp
return 0
}
result, err := strconv.Atoi(str[begin:end])
if err != nil {
if p.err == nil {
p.err = fmt.Errorf("expected number; got '%v'", str)
}
return 0
}
return result
}
// The location cache caches the time zones typically used by the client.
type locationCache struct {
cache map[int]*time.Location
lock sync.Mutex
}
// All connections share the same list of timezones. Benchmarking shows that
// about 5% speed could be gained by putting the cache in the connection and
// losing the mutex, at the cost of a small amount of memory and a somewhat
// significant increase in code complexity.
var globalLocationCache = newLocationCache()
func newLocationCache() *locationCache {
return &locationCache{cache: make(map[int]*time.Location)}
}
// Returns the cached timezone for the specified offset, creating and caching
// it if necessary.
func (c *locationCache) getLocation(offset int) *time.Location {
c.lock.Lock()
defer c.lock.Unlock()
location, ok := c.cache[offset]
if !ok {
location = time.FixedZone("", offset)
c.cache[offset] = location
}
return location
}
var infinityTsEnabled = false
var infinityTsNegative time.Time
var infinityTsPositive time.Time
const (
infinityTsEnabledAlready = "pq: infinity timestamp enabled already"
infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive"
)
// EnableInfinityTs controls the handling of Postgres' "-infinity" and
// "infinity" "timestamp"s.
//
// If EnableInfinityTs is not called, "-infinity" and "infinity" will return
// []byte("-infinity") and []byte("infinity") respectively, and potentially
// cause error "sql: Scan error on column index 0: unsupported driver -> Scan
// pair: []uint8 -> *time.Time", when scanning into a time.Time value.
//
// Once EnableInfinityTs has been called, all connections created using this
// driver will decode Postgres' "-infinity" and "infinity" for "timestamp",
// "timestamp with time zone" and "date" types to the predefined minimum and
// maximum times, respectively. When encoding time.Time values, any time which
// equals or precedes the predefined minimum time will be encoded to
// "-infinity". Any values at or past the maximum time will similarly be
// encoded to "infinity".
//
// If EnableInfinityTs is called with negative >= positive, it will panic.
// Calling EnableInfinityTs after a connection has been established results in
// undefined behavior. If EnableInfinityTs is called more than once, it will
// panic.
func EnableInfinityTs(negative time.Time, positive time.Time) {
if infinityTsEnabled {
panic(infinityTsEnabledAlready)
}
if !negative.Before(positive) {
panic(infinityTsNegativeMustBeSmaller)
}
infinityTsEnabled = true
infinityTsNegative = negative
infinityTsPositive = positive
}
/*
* Testing might want to toggle infinityTsEnabled
*/
func disableInfinityTs() {
infinityTsEnabled = false
}
// This is a time function specific to the Postgres default DateStyle
// setting ("ISO, MDY"), the only one we currently support. This
// accounts for the discrepancies between the parsing available with
// time.Parse and the Postgres date formatting quirks.
func parseTs(currentLocation *time.Location, str string) interface{} {
switch str {
case "-infinity":
if infinityTsEnabled {
return infinityTsNegative
}
return []byte(str)
case "infinity":
if infinityTsEnabled {
return infinityTsPositive
}
return []byte(str)
}
t, err := ParseTimestamp(currentLocation, str)
if err != nil {
panic(err)
}
return t
}
// ParseTimestamp parses Postgres' text format. It returns a time.Time in
// currentLocation iff that time's offset agrees with the offset sent from the
// Postgres server. Otherwise, ParseTimestamp returns a time.Time with the
// fixed offset offset provided by the Postgres server.
func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, error) {
p := timestampParser{}
monSep := strings.IndexRune(str, '-')
// this is Gregorian year, not ISO Year
// In Gregorian system, the year 1 BC is followed by AD 1
year := p.mustAtoi(str, 0, monSep)
daySep := monSep + 3
month := p.mustAtoi(str, monSep+1, daySep)
p.expect(str, '-', daySep)
timeSep := daySep + 3
day := p.mustAtoi(str, daySep+1, timeSep)
minLen := monSep + len("01-01") + 1
isBC := strings.HasSuffix(str, " BC")
if isBC {
minLen += 3
}
var hour, minute, second int
if len(str) > minLen {
p.expect(str, ' ', timeSep)
minSep := timeSep + 3
p.expect(str, ':', minSep)
hour = p.mustAtoi(str, timeSep+1, minSep)
secSep := minSep + 3
p.expect(str, ':', secSep)
minute = p.mustAtoi(str, minSep+1, secSep)
secEnd := secSep + 3
second = p.mustAtoi(str, secSep+1, secEnd)
}
remainderIdx := monSep + len("01-01 00:00:00") + 1
// Three optional (but ordered) sections follow: the
// fractional seconds, the time zone offset, and the BC
// designation. We set them up here and adjust the other
// offsets if the preceding sections exist.
nanoSec := 0
tzOff := 0
if remainderIdx < len(str) && str[remainderIdx] == '.' {
fracStart := remainderIdx + 1
fracOff := strings.IndexAny(str[fracStart:], "-+ ")
if fracOff < 0 {
fracOff = len(str) - fracStart
}
fracSec := p.mustAtoi(str, fracStart, fracStart+fracOff)
nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff))))
remainderIdx += fracOff + 1
}
if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') {
// time zone separator is always '-' or '+' (UTC is +00)
var tzSign int
switch c := str[tzStart]; c {
case '-':
tzSign = -1
case '+':
tzSign = +1
default:
return time.Time{}, fmt.Errorf("expected '-' or '+' at position %v; got %v", tzStart, c)
}
tzHours := p.mustAtoi(str, tzStart+1, tzStart+3)
remainderIdx += 3
var tzMin, tzSec int
if remainderIdx < len(str) && str[remainderIdx] == ':' {
tzMin = p.mustAtoi(str, remainderIdx+1, remainderIdx+3)
remainderIdx += 3
}
if remainderIdx < len(str) && str[remainderIdx] == ':' {
tzSec = p.mustAtoi(str, remainderIdx+1, remainderIdx+3)
remainderIdx += 3
}
tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec)
}
var isoYear int
if isBC {
isoYear = 1 - year
remainderIdx += 3
} else {
isoYear = year
}
if remainderIdx < len(str) {
return time.Time{}, fmt.Errorf("expected end of input, got %v", str[remainderIdx:])
}
t := time.Date(isoYear, time.Month(month), day,
hour, minute, second, nanoSec,
globalLocationCache.getLocation(tzOff))
if currentLocation != nil {
// Set the location of the returned Time based on the session's
// TimeZone value, but only if the local time zone database agrees with
// the remote database on the offset.
lt := t.In(currentLocation)
_, newOff := lt.Zone()
if newOff == tzOff {
t = lt
}
}
return t, p.err
}
// formatTs formats t into a format postgres understands.
func formatTs(t time.Time) []byte {
if infinityTsEnabled {
// t <= -infinity : ! (t > -infinity)
if !t.After(infinityTsNegative) {
return []byte("-infinity")
}
// t >= infinity : ! (!t < infinity)
if !t.Before(infinityTsPositive) {
return []byte("infinity")
}
}
return FormatTimestamp(t)
}
// FormatTimestamp formats t into Postgres' text format for timestamps.
func FormatTimestamp(t time.Time) []byte {
// Need to send dates before 0001 A.D. with " BC" suffix, instead of the
// minus sign preferred by Go.
// Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on
bc := false
if t.Year() <= 0 {
// flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11"
t = t.AddDate((-t.Year())*2+1, 0, 0)
bc = true
}
b := []byte(t.Format("2006-01-02 15:04:05.999999999Z07:00"))
_, offset := t.Zone()
offset = offset % 60
if offset != 0 {
// RFC3339Nano already printed the minus sign
if offset < 0 {
offset = -offset
}
b = append(b, ':')
if offset < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(offset), 10)
}
if bc {
b = append(b, " BC"...)
}
return b
}
// Parse a bytea value received from the server. Both "hex" and the legacy
// "escape" format are supported.
func parseBytea(s []byte) (result []byte, err error) {
if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) {
// bytea_output = hex
s = s[2:] // trim off leading "\\x"
result = make([]byte, hex.DecodedLen(len(s)))
_, err := hex.Decode(result, s)
if err != nil {
return nil, err
}
} else {
// bytea_output = escape
for len(s) > 0 {
if s[0] == '\\' {
// escaped '\\'
if len(s) >= 2 && s[1] == '\\' {
result = append(result, '\\')
s = s[2:]
continue
}
// '\\' followed by an octal number
if len(s) < 4 {
return nil, fmt.Errorf("invalid bytea sequence %v", s)
}
r, err := strconv.ParseInt(string(s[1:4]), 8, 9)
if err != nil {
return nil, fmt.Errorf("could not parse bytea value: %s", err.Error())
}
result = append(result, byte(r))
s = s[4:]
} else {
// We hit an unescaped, raw byte. Try to read in as many as
// possible in one go.
i := bytes.IndexByte(s, '\\')
if i == -1 {
result = append(result, s...)
break
}
result = append(result, s[:i]...)
s = s[i:]
}
}
}
return result, nil
}
func encodeBytea(serverVersion int, v []byte) (result []byte) {
if serverVersion >= 90000 {
// Use the hex format if we know that the server supports it
result = make([]byte, 2+hex.EncodedLen(len(v)))
result[0] = '\\'
result[1] = 'x'
hex.Encode(result[2:], v)
} else {
// .. or resort to "escape"
for _, b := range v {
if b == '\\' {
result = append(result, '\\', '\\')
} else if b < 0x20 || b > 0x7e {
result = append(result, []byte(fmt.Sprintf("\\%03o", b))...)
} else {
result = append(result, b)
}
}
}
return result
}
// NullTime represents a time.Time that may be null. NullTime implements the
// sql.Scanner interface so it can be used as a scan destination, similar to
// sql.NullString.
type NullTime struct {
Time time.Time
Valid bool // Valid is true if Time is not NULL
}
// Scan implements the Scanner interface.
func (nt *NullTime) Scan(value interface{}) error {
nt.Time, nt.Valid = value.(time.Time)
return nil
}
// Value implements the driver Valuer interface.
func (nt NullTime) Value() (driver.Value, error) {
if !nt.Valid {
return nil, nil
}
return nt.Time, nil
}

Some files were not shown because too many files have changed in this diff Show More