chore: bump go-waku version

This commit is contained in:
Richard Ramos 2023-03-02 14:25:30 -04:00 committed by RichΛrd
parent 48078393f9
commit b8d5455904
159 changed files with 5902 additions and 3045 deletions

View File

@ -1 +1 @@
0.136.3 0.137.0

16
go.mod
View File

@ -80,7 +80,7 @@ require (
github.com/ipfs/go-log/v2 v2.5.1 github.com/ipfs/go-log/v2 v2.5.1
github.com/ladydascalie/currency v1.6.0 github.com/ladydascalie/currency v1.6.0
github.com/meirf/gopart v0.0.0-20180520194036-37e9492a85a8 github.com/meirf/gopart v0.0.0-20180520194036-37e9492a85a8
github.com/waku-org/go-waku v0.5.2-0.20230224151428-d6c87f346b72 github.com/waku-org/go-waku v0.5.2-0.20230302181640-4c385249f567
github.com/yeqown/go-qrcode/v2 v2.2.1 github.com/yeqown/go-qrcode/v2 v2.2.1
github.com/yeqown/go-qrcode/writer/standard v1.2.1 github.com/yeqown/go-qrcode/writer/standard v1.2.1
) )
@ -132,7 +132,7 @@ require (
github.com/go-ole/go-ole v1.2.5 // indirect github.com/go-ole/go-ole v1.2.5 // indirect
github.com/go-playground/locales v0.14.0 // indirect github.com/go-playground/locales v0.14.0 // indirect
github.com/go-playground/universal-translator v0.18.0 // indirect github.com/go-playground/universal-translator v0.18.0 // indirect
github.com/go-stack/stack v1.8.0 // indirect github.com/go-stack/stack v1.8.1 // indirect
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
@ -157,7 +157,7 @@ require (
github.com/huin/goupnp v1.0.3 // indirect github.com/huin/goupnp v1.0.3 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/klauspost/compress v1.15.12 // indirect github.com/klauspost/compress v1.15.15 // indirect
github.com/klauspost/cpuid/v2 v2.2.1 // indirect github.com/klauspost/cpuid/v2 v2.2.1 // indirect
github.com/koron/go-ssdp v0.0.3 // indirect github.com/koron/go-ssdp v0.0.3 // indirect
github.com/leodido/go-urn v1.2.1 // indirect github.com/leodido/go-urn v1.2.1 // indirect
@ -213,8 +213,8 @@ require (
github.com/pion/webrtc/v3 v3.1.24-0.20220208053747-94262c1b2b38 // indirect github.com/pion/webrtc/v3 v3.1.24-0.20220208053747-94262c1b2b38 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/common v0.39.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect
github.com/prometheus/tsdb v0.10.0 // indirect github.com/prometheus/tsdb v0.10.0 // indirect
github.com/quic-go/qpack v0.4.0 // indirect github.com/quic-go/qpack v0.4.0 // indirect
github.com/quic-go/qtls-go1-18 v0.2.0 // indirect github.com/quic-go/qtls-go1-18 v0.2.0 // indirect
@ -252,14 +252,14 @@ require (
go.uber.org/dig v1.15.0 // indirect go.uber.org/dig v1.15.0 // indirect
go.uber.org/fx v1.18.2 // indirect go.uber.org/fx v1.18.2 // indirect
go.uber.org/multierr v1.8.0 // indirect go.uber.org/multierr v1.8.0 // indirect
golang.org/x/exp v0.0.0-20221205204356-47842c84f3db // indirect golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect
golang.org/x/mod v0.7.0 // indirect golang.org/x/mod v0.7.0 // indirect
golang.org/x/net v0.4.0 // indirect golang.org/x/net v0.4.0 // indirect
golang.org/x/sync v0.1.0 // indirect golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.3.0 // indirect golang.org/x/sys v0.5.0 // indirect
golang.org/x/term v0.3.0 // indirect golang.org/x/term v0.3.0 // indirect
golang.org/x/text v0.7.0 // indirect golang.org/x/text v0.7.0 // indirect
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
golang.org/x/tools v0.3.0 // indirect golang.org/x/tools v0.3.0 // indirect
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect

38
go.sum
View File

@ -765,8 +765,8 @@ github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk= github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk=
github.com/fsouza/fake-gcs-server v1.17.0/go.mod h1:D1rTE4YCyHFNa99oyJJ5HyclvN/0uQR+pM/VdlL83bw= github.com/fsouza/fake-gcs-server v1.17.0/go.mod h1:D1rTE4YCyHFNa99oyJJ5HyclvN/0uQR+pM/VdlL83bw=
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
@ -808,13 +808,11 @@ github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo=
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
@ -855,8 +853,9 @@ github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5Nq
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
@ -1290,8 +1289,8 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY
github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM= github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw=
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4=
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
@ -1794,7 +1793,6 @@ github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
@ -1820,9 +1818,8 @@ github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16
github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@ -1840,8 +1837,8 @@ github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.2/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.2/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/prometheus/tsdb v0.10.0 h1:If5rVCMTp6W2SiRAQFlbpJNgVlgMEd+U2GZckwK38ic= github.com/prometheus/tsdb v0.10.0 h1:If5rVCMTp6W2SiRAQFlbpJNgVlgMEd+U2GZckwK38ic=
github.com/prometheus/tsdb v0.10.0/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4= github.com/prometheus/tsdb v0.10.0/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4=
@ -2103,8 +2100,8 @@ github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/waku-org/go-discover v0.0.0-20221209174356-61c833f34d98 h1:xwY0kW5XZFimdqfZb9cZwT1S3VJP9j3AE6bdNd9boXM= github.com/waku-org/go-discover v0.0.0-20221209174356-61c833f34d98 h1:xwY0kW5XZFimdqfZb9cZwT1S3VJP9j3AE6bdNd9boXM=
github.com/waku-org/go-discover v0.0.0-20221209174356-61c833f34d98/go.mod h1:eBHgM6T4EG0RZzxpxKy+rGz/6Dw2Nd8DWxS0lm9ESDw= github.com/waku-org/go-discover v0.0.0-20221209174356-61c833f34d98/go.mod h1:eBHgM6T4EG0RZzxpxKy+rGz/6Dw2Nd8DWxS0lm9ESDw=
github.com/waku-org/go-waku v0.5.2-0.20230224151428-d6c87f346b72 h1:0W0A9n895veIh0tFw2TmgW4PmI0d84WM6eriP6K80Dg= github.com/waku-org/go-waku v0.5.2-0.20230302181640-4c385249f567 h1:8r7Y1hOmtcPaKKSDT0RaJCNG4HWDZ11a4/tyfbPE260=
github.com/waku-org/go-waku v0.5.2-0.20230224151428-d6c87f346b72/go.mod h1:0e0itkseairW1Uz9thxhb0OPRuPfC6qagJrOgZAV9PA= github.com/waku-org/go-waku v0.5.2-0.20230302181640-4c385249f567/go.mod h1:Uz6WhNbCtbM8fSr0wb8apqhAPQYKvOPoyaGOHdw9DkU=
github.com/waku-org/go-zerokit-rln v0.1.7-wakuorg h1:2vVIBCtBih2w1K9ll8YnToTDZvbxcgbsClsPlJS/kkg= github.com/waku-org/go-zerokit-rln v0.1.7-wakuorg h1:2vVIBCtBih2w1K9ll8YnToTDZvbxcgbsClsPlJS/kkg=
github.com/waku-org/go-zerokit-rln v0.1.7-wakuorg/go.mod h1:GlyaVeEWNEBxVJrWC6jFTvb4LNb9d9qnjdS6EiWVUvk= github.com/waku-org/go-zerokit-rln v0.1.7-wakuorg/go.mod h1:GlyaVeEWNEBxVJrWC6jFTvb4LNb9d9qnjdS6EiWVUvk=
github.com/wealdtech/go-ens/v3 v3.5.0 h1:Huc9GxBgiGweCOGTYomvsg07K2QggAqZpZ5SuiZdC8o= github.com/wealdtech/go-ens/v3 v3.5.0 h1:Huc9GxBgiGweCOGTYomvsg07K2QggAqZpZ5SuiZdC8o=
@ -2300,8 +2297,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20221205204356-47842c84f3db h1:D/cFflL63o2KSLJIwjlcIt8PR064j/xsmdEJL/YvY/o= golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg=
golang.org/x/exp v0.0.0-20221205204356-47842c84f3db/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
@ -2452,7 +2449,6 @@ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -2620,14 +2616,13 @@ golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220317061510-51cd9980dadf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220317061510-51cd9980dadf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@ -2655,8 +2650,9 @@ golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 h1:M73Iuj3xbbb9Uk1DYhzydthsj6oOd6l9bpuFcNoUvTs=
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y=
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=

View File

@ -1,15 +0,0 @@
language: go
sudo: false
go:
- 1.7.x
- 1.8.x
- 1.9.x
- 1.10.x
- 1.11.x
- tip
before_install:
- go get github.com/mattn/goveralls
script:
- goveralls -service=travis-ci

View File

@ -3,7 +3,7 @@
before: before:
hooks: hooks:
- ./gen.sh - ./gen.sh
- go install mvdan.cc/garble@latest - go install mvdan.cc/garble@v0.7.2
builds: builds:
- -

View File

@ -9,7 +9,6 @@ This package provides various compression algorithms.
* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. * [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding.
* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. * [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently.
* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. * [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation.
* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here.
[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) [![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories)
[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) [![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml)
@ -17,6 +16,22 @@ This package provides various compression algorithms.
# changelog # changelog
* Jan 3rd, 2023 (v1.15.14)
* flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718
* zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720
* export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722
* s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723
* Dec 11, 2022 (v1.15.13)
* zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691
* zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708
* Oct 26, 2022 (v1.15.12)
* zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680
* gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683
* Sept 26, 2022 (v1.15.11) * Sept 26, 2022 (v1.15.11)
* flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678

View File

@ -294,7 +294,6 @@ func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, of
} }
offset = 0 offset = 0
cGain := 0
if d.chain < 100 { if d.chain < 100 {
for i := prevHead; tries > 0; tries-- { for i := prevHead; tries > 0; tries-- {
if wEnd == win[i+length] { if wEnd == win[i+length] {
@ -322,10 +321,14 @@ func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, of
return return
} }
// Minimum gain to accept a match.
cGain := 4
// Some like it higher (CSV), some like it lower (JSON) // Some like it higher (CSV), some like it lower (JSON)
const baseCost = 6 const baseCost = 3
// Base is 4 bytes at with an additional cost. // Base is 4 bytes at with an additional cost.
// Matches must be better than this. // Matches must be better than this.
for i := prevHead; tries > 0; tries-- { for i := prevHead; tries > 0; tries-- {
if wEnd == win[i+length] { if wEnd == win[i+length] {
n := matchLen(win[i:i+minMatchLook], wPos) n := matchLen(win[i:i+minMatchLook], wPos)
@ -333,7 +336,7 @@ func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, of
// Calculate gain. Estimate // Calculate gain. Estimate
newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]]) newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]])
//fmt.Println(n, "gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n])) //fmt.Println("gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]), "this-len:", n, "prev-len:", length)
if newGain > cGain { if newGain > cGain {
length = n length = n
offset = pos - i offset = pos - i
@ -490,27 +493,103 @@ func (d *compressor) deflateLazy() {
} }
if prevLength >= minMatchLength && s.length <= prevLength { if prevLength >= minMatchLength && s.length <= prevLength {
// Check for better match at end... // No better match, but check for better match at end...
// //
// checkOff must be >=2 since we otherwise risk checking s.index // Skip forward a number of bytes.
// Offset of 2 seems to yield best results. // Offset of 2 seems to yield best results. 3 is sometimes better.
const checkOff = 2 const checkOff = 2
prevIndex := s.index - 1
if prevIndex+prevLength+checkOff < s.maxInsertIndex { // Check all, except full length
end := lookahead if prevLength < maxMatchLength-checkOff {
if lookahead > maxMatchLength { prevIndex := s.index - 1
end = maxMatchLength if prevIndex+prevLength < s.maxInsertIndex {
} end := lookahead
end += prevIndex if lookahead > maxMatchLength+checkOff {
idx := prevIndex + prevLength - (4 - checkOff) end = maxMatchLength + checkOff
h := hash4(d.window[idx:]) }
ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength + (4 - checkOff) end += prevIndex
if ch2 > minIndex {
length := matchLen(d.window[prevIndex:end], d.window[ch2:]) // Hash at match end.
// It seems like a pure length metric is best. h := hash4(d.window[prevIndex+prevLength:])
if length > prevLength { ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength
prevLength = length if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff {
prevOffset = prevIndex - ch2 length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:])
// It seems like a pure length metric is best.
if length > prevLength {
prevLength = length
prevOffset = prevIndex - ch2
// Extend back...
for i := checkOff - 1; i >= 0; i-- {
if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i] {
// Emit tokens we "owe"
for j := 0; j <= i; j++ {
d.tokens.AddLiteral(d.window[prevIndex+j])
if d.tokens.n == maxFlateBlockTokens {
// The block includes the current character
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.Reset()
}
s.index++
if s.index < s.maxInsertIndex {
h := hash4(d.window[s.index:])
ch := s.hashHead[h]
s.chainHead = int(ch)
s.hashPrev[s.index&windowMask] = ch
s.hashHead[h] = uint32(s.index + s.hashOffset)
}
}
break
} else {
prevLength++
}
}
} else if false {
// Check one further ahead.
// Only rarely better, disabled for now.
prevIndex++
h := hash4(d.window[prevIndex+prevLength:])
ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength
if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff {
length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:])
// It seems like a pure length metric is best.
if length > prevLength+checkOff {
prevLength = length
prevOffset = prevIndex - ch2
prevIndex--
// Extend back...
for i := checkOff; i >= 0; i-- {
if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i-1] {
// Emit tokens we "owe"
for j := 0; j <= i; j++ {
d.tokens.AddLiteral(d.window[prevIndex+j])
if d.tokens.n == maxFlateBlockTokens {
// The block includes the current character
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.Reset()
}
s.index++
if s.index < s.maxInsertIndex {
h := hash4(d.window[s.index:])
ch := s.hashHead[h]
s.chainHead = int(ch)
s.hashPrev[s.index&windowMask] = ch
s.hashHead[h] = uint32(s.index + s.hashOffset)
}
}
break
} else {
prevLength++
}
}
}
}
}
} }
} }
} }

View File

@ -86,11 +86,19 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
dict = dict[len(dict)-maxStatelessDict:] dict = dict[len(dict)-maxStatelessDict:]
} }
// For subsequent loops, keep shallow dict reference to avoid alloc+copy.
var inDict []byte
for len(in) > 0 { for len(in) > 0 {
todo := in todo := in
if len(todo) > maxStatelessBlock-len(dict) { if len(inDict) > 0 {
if len(todo) > maxStatelessBlock-maxStatelessDict {
todo = todo[:maxStatelessBlock-maxStatelessDict]
}
} else if len(todo) > maxStatelessBlock-len(dict) {
todo = todo[:maxStatelessBlock-len(dict)] todo = todo[:maxStatelessBlock-len(dict)]
} }
inOrg := in
in = in[len(todo):] in = in[len(todo):]
uncompressed := todo uncompressed := todo
if len(dict) > 0 { if len(dict) > 0 {
@ -102,7 +110,11 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
todo = combined todo = combined
} }
// Compress // Compress
statelessEnc(&dst, todo, int16(len(dict))) if len(inDict) == 0 {
statelessEnc(&dst, todo, int16(len(dict)))
} else {
statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict)
}
isEof := eof && len(in) == 0 isEof := eof && len(in) == 0
if dst.n == 0 { if dst.n == 0 {
@ -119,7 +131,8 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
} }
if len(in) > 0 { if len(in) > 0 {
// Retain a dict if we have more // Retain a dict if we have more
dict = todo[len(todo)-maxStatelessDict:] inDict = inOrg[len(uncompressed)-maxStatelessDict:]
dict = nil
dst.Reset() dst.Reset()
} }
if bw.err != nil { if bw.err != nil {

View File

@ -146,54 +146,51 @@ func (s *Scratch) compress(src []byte) error {
c1.encodeZero(tt[src[ip-2]]) c1.encodeZero(tt[src[ip-2]])
ip -= 2 ip -= 2
} }
src = src[:ip]
// Main compression loop. // Main compression loop.
switch { switch {
case !s.zeroBits && s.actualTableLog <= 8: case !s.zeroBits && s.actualTableLog <= 8:
// We can encode 4 symbols without requiring a flush. // We can encode 4 symbols without requiring a flush.
// We do not need to check if any output is 0 bits. // We do not need to check if any output is 0 bits.
for ip >= 4 { for ; len(src) >= 4; src = src[:len(src)-4] {
s.bw.flush32() s.bw.flush32()
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
c2.encode(tt[v0]) c2.encode(tt[v0])
c1.encode(tt[v1]) c1.encode(tt[v1])
c2.encode(tt[v2]) c2.encode(tt[v2])
c1.encode(tt[v3]) c1.encode(tt[v3])
ip -= 4
} }
case !s.zeroBits: case !s.zeroBits:
// We do not need to check if any output is 0 bits. // We do not need to check if any output is 0 bits.
for ip >= 4 { for ; len(src) >= 4; src = src[:len(src)-4] {
s.bw.flush32() s.bw.flush32()
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
c2.encode(tt[v0]) c2.encode(tt[v0])
c1.encode(tt[v1]) c1.encode(tt[v1])
s.bw.flush32() s.bw.flush32()
c2.encode(tt[v2]) c2.encode(tt[v2])
c1.encode(tt[v3]) c1.encode(tt[v3])
ip -= 4
} }
case s.actualTableLog <= 8: case s.actualTableLog <= 8:
// We can encode 4 symbols without requiring a flush // We can encode 4 symbols without requiring a flush
for ip >= 4 { for ; len(src) >= 4; src = src[:len(src)-4] {
s.bw.flush32() s.bw.flush32()
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
c2.encodeZero(tt[v0]) c2.encodeZero(tt[v0])
c1.encodeZero(tt[v1]) c1.encodeZero(tt[v1])
c2.encodeZero(tt[v2]) c2.encodeZero(tt[v2])
c1.encodeZero(tt[v3]) c1.encodeZero(tt[v3])
ip -= 4
} }
default: default:
for ip >= 4 { for ; len(src) >= 4; src = src[:len(src)-4] {
s.bw.flush32() s.bw.flush32()
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
c2.encodeZero(tt[v0]) c2.encodeZero(tt[v0])
c1.encodeZero(tt[v1]) c1.encodeZero(tt[v1])
s.bw.flush32() s.bw.flush32()
c2.encodeZero(tt[v2]) c2.encodeZero(tt[v2])
c1.encodeZero(tt[v3]) c1.encodeZero(tt[v3])
ip -= 4
} }
} }
@ -459,15 +456,17 @@ func (s *Scratch) countSimple(in []byte) (max int) {
for _, v := range in { for _, v := range in {
s.count[v]++ s.count[v]++
} }
m := uint32(0) m, symlen := uint32(0), s.symbolLen
for i, v := range s.count[:] { for i, v := range s.count[:] {
if v == 0 {
continue
}
if v > m { if v > m {
m = v m = v
} }
if v > 0 { symlen = uint16(i) + 1
s.symbolLen = uint16(i) + 1
}
} }
s.symbolLen = symlen
return int(m) return int(m)
} }

View File

@ -67,7 +67,6 @@ func (b *bitReaderBytes) fillFast() {
// 2 bounds checks. // 2 bounds checks.
v := b.in[b.off-4 : b.off] v := b.in[b.off-4 : b.off]
v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value |= uint64(low) << (b.bitsRead - 32) b.value |= uint64(low) << (b.bitsRead - 32)
b.bitsRead -= 32 b.bitsRead -= 32
@ -88,8 +87,7 @@ func (b *bitReaderBytes) fill() {
return return
} }
if b.off > 4 { if b.off > 4 {
v := b.in[b.off-4:] v := b.in[b.off-4 : b.off]
v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value |= uint64(low) << (b.bitsRead - 32) b.value |= uint64(low) << (b.bitsRead - 32)
b.bitsRead -= 32 b.bitsRead -= 32
@ -179,7 +177,6 @@ func (b *bitReaderShifted) fillFast() {
// 2 bounds checks. // 2 bounds checks.
v := b.in[b.off-4 : b.off] v := b.in[b.off-4 : b.off]
v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
b.bitsRead -= 32 b.bitsRead -= 32
@ -200,8 +197,7 @@ func (b *bitReaderShifted) fill() {
return return
} }
if b.off > 4 { if b.off > 4 {
v := b.in[b.off-4:] v := b.in[b.off-4 : b.off]
v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
b.bitsRead -= 32 b.bitsRead -= 32

View File

@ -365,29 +365,29 @@ func (s *Scratch) countSimple(in []byte) (max int, reuse bool) {
m := uint32(0) m := uint32(0)
if len(s.prevTable) > 0 { if len(s.prevTable) > 0 {
for i, v := range s.count[:] { for i, v := range s.count[:] {
if v == 0 {
continue
}
if v > m { if v > m {
m = v m = v
} }
if v > 0 { s.symbolLen = uint16(i) + 1
s.symbolLen = uint16(i) + 1 if i >= len(s.prevTable) {
if i >= len(s.prevTable) { reuse = false
reuse = false } else if s.prevTable[i].nBits == 0 {
} else { reuse = false
if s.prevTable[i].nBits == 0 {
reuse = false
}
}
} }
} }
return int(m), reuse return int(m), reuse
} }
for i, v := range s.count[:] { for i, v := range s.count[:] {
if v == 0 {
continue
}
if v > m { if v > m {
m = v m = v
} }
if v > 0 { s.symbolLen = uint16(i) + 1
s.symbolLen = uint16(i) + 1
}
} }
return int(m), false return int(m), false
} }
@ -484,34 +484,35 @@ func (s *Scratch) buildCTable() error {
// Different from reference implementation. // Different from reference implementation.
huffNode0 := s.nodes[0 : huffNodesLen+1] huffNode0 := s.nodes[0 : huffNodesLen+1]
for huffNode[nonNullRank].count == 0 { for huffNode[nonNullRank].count() == 0 {
nonNullRank-- nonNullRank--
} }
lowS := int16(nonNullRank) lowS := int16(nonNullRank)
nodeRoot := nodeNb + lowS - 1 nodeRoot := nodeNb + lowS - 1
lowN := nodeNb lowN := nodeNb
huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count())
huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb) huffNode[lowS].setParent(nodeNb)
huffNode[lowS-1].setParent(nodeNb)
nodeNb++ nodeNb++
lowS -= 2 lowS -= 2
for n := nodeNb; n <= nodeRoot; n++ { for n := nodeNb; n <= nodeRoot; n++ {
huffNode[n].count = 1 << 30 huffNode[n].setCount(1 << 30)
} }
// fake entry, strong barrier // fake entry, strong barrier
huffNode0[0].count = 1 << 31 huffNode0[0].setCount(1 << 31)
// create parents // create parents
for nodeNb <= nodeRoot { for nodeNb <= nodeRoot {
var n1, n2 int16 var n1, n2 int16
if huffNode0[lowS+1].count < huffNode0[lowN+1].count { if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() {
n1 = lowS n1 = lowS
lowS-- lowS--
} else { } else {
n1 = lowN n1 = lowN
lowN++ lowN++
} }
if huffNode0[lowS+1].count < huffNode0[lowN+1].count { if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() {
n2 = lowS n2 = lowS
lowS-- lowS--
} else { } else {
@ -519,18 +520,19 @@ func (s *Scratch) buildCTable() error {
lowN++ lowN++
} }
huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count())
huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb) huffNode0[n1+1].setParent(nodeNb)
huffNode0[n2+1].setParent(nodeNb)
nodeNb++ nodeNb++
} }
// distribute weights (unlimited tree height) // distribute weights (unlimited tree height)
huffNode[nodeRoot].nbBits = 0 huffNode[nodeRoot].setNbBits(0)
for n := nodeRoot - 1; n >= startNode; n-- { for n := nodeRoot - 1; n >= startNode; n-- {
huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1)
} }
for n := uint16(0); n <= nonNullRank; n++ { for n := uint16(0); n <= nonNullRank; n++ {
huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1)
} }
s.actualTableLog = s.setMaxHeight(int(nonNullRank)) s.actualTableLog = s.setMaxHeight(int(nonNullRank))
maxNbBits := s.actualTableLog maxNbBits := s.actualTableLog
@ -542,7 +544,7 @@ func (s *Scratch) buildCTable() error {
var nbPerRank [tableLogMax + 1]uint16 var nbPerRank [tableLogMax + 1]uint16
var valPerRank [16]uint16 var valPerRank [16]uint16
for _, v := range huffNode[:nonNullRank+1] { for _, v := range huffNode[:nonNullRank+1] {
nbPerRank[v.nbBits]++ nbPerRank[v.nbBits()]++
} }
// determine stating value per rank // determine stating value per rank
{ {
@ -557,7 +559,7 @@ func (s *Scratch) buildCTable() error {
// push nbBits per symbol, symbol order // push nbBits per symbol, symbol order
for _, v := range huffNode[:nonNullRank+1] { for _, v := range huffNode[:nonNullRank+1] {
s.cTable[v.symbol].nBits = v.nbBits s.cTable[v.symbol()].nBits = v.nbBits()
} }
// assign value within rank, symbol order // assign value within rank, symbol order
@ -603,12 +605,12 @@ func (s *Scratch) huffSort() {
pos := rank[r].current pos := rank[r].current
rank[r].current++ rank[r].current++
prev := nodes[(pos-1)&huffNodesMask] prev := nodes[(pos-1)&huffNodesMask]
for pos > rank[r].base && c > prev.count { for pos > rank[r].base && c > prev.count() {
nodes[pos&huffNodesMask] = prev nodes[pos&huffNodesMask] = prev
pos-- pos--
prev = nodes[(pos-1)&huffNodesMask] prev = nodes[(pos-1)&huffNodesMask]
} }
nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n))
} }
} }
@ -617,7 +619,7 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
huffNode := s.nodes[1 : huffNodesLen+1] huffNode := s.nodes[1 : huffNodesLen+1]
//huffNode = huffNode[: huffNodesLen] //huffNode = huffNode[: huffNodesLen]
largestBits := huffNode[lastNonNull].nbBits largestBits := huffNode[lastNonNull].nbBits()
// early exit : no elt > maxNbBits // early exit : no elt > maxNbBits
if largestBits <= maxNbBits { if largestBits <= maxNbBits {
@ -627,14 +629,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
baseCost := int(1) << (largestBits - maxNbBits) baseCost := int(1) << (largestBits - maxNbBits)
n := uint32(lastNonNull) n := uint32(lastNonNull)
for huffNode[n].nbBits > maxNbBits { for huffNode[n].nbBits() > maxNbBits {
totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)) totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits()))
huffNode[n].nbBits = maxNbBits huffNode[n].setNbBits(maxNbBits)
n-- n--
} }
// n stops at huffNode[n].nbBits <= maxNbBits // n stops at huffNode[n].nbBits <= maxNbBits
for huffNode[n].nbBits == maxNbBits { for huffNode[n].nbBits() == maxNbBits {
n-- n--
} }
// n end at index of smallest symbol using < maxNbBits // n end at index of smallest symbol using < maxNbBits
@ -655,10 +657,10 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
{ {
currentNbBits := maxNbBits currentNbBits := maxNbBits
for pos := int(n); pos >= 0; pos-- { for pos := int(n); pos >= 0; pos-- {
if huffNode[pos].nbBits >= currentNbBits { if huffNode[pos].nbBits() >= currentNbBits {
continue continue
} }
currentNbBits = huffNode[pos].nbBits // < maxNbBits currentNbBits = huffNode[pos].nbBits() // < maxNbBits
rankLast[maxNbBits-currentNbBits] = uint32(pos) rankLast[maxNbBits-currentNbBits] = uint32(pos)
} }
} }
@ -675,8 +677,8 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
if lowPos == noSymbol { if lowPos == noSymbol {
break break
} }
highTotal := huffNode[highPos].count highTotal := huffNode[highPos].count()
lowTotal := 2 * huffNode[lowPos].count lowTotal := 2 * huffNode[lowPos].count()
if highTotal <= lowTotal { if highTotal <= lowTotal {
break break
} }
@ -692,13 +694,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
// this rank is no longer empty // this rank is no longer empty
rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]
} }
huffNode[rankLast[nBitsToDecrease]].nbBits++ huffNode[rankLast[nBitsToDecrease]].setNbBits(1 +
huffNode[rankLast[nBitsToDecrease]].nbBits())
if rankLast[nBitsToDecrease] == 0 { if rankLast[nBitsToDecrease] == 0 {
/* special case, reached largest symbol */ /* special case, reached largest symbol */
rankLast[nBitsToDecrease] = noSymbol rankLast[nBitsToDecrease] = noSymbol
} else { } else {
rankLast[nBitsToDecrease]-- rankLast[nBitsToDecrease]--
if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease { if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease {
rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */
} }
} }
@ -706,15 +709,15 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
for totalCost < 0 { /* Sometimes, cost correction overshoot */ for totalCost < 0 { /* Sometimes, cost correction overshoot */
if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
for huffNode[n].nbBits == maxNbBits { for huffNode[n].nbBits() == maxNbBits {
n-- n--
} }
huffNode[n+1].nbBits-- huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1)
rankLast[1] = n + 1 rankLast[1] = n + 1
totalCost++ totalCost++
continue continue
} }
huffNode[rankLast[1]+1].nbBits-- huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1)
rankLast[1]++ rankLast[1]++
totalCost++ totalCost++
} }
@ -722,9 +725,26 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
return maxNbBits return maxNbBits
} }
type nodeElt struct { // A nodeElt is the fields
count uint32 //
parent uint16 // count uint32
symbol byte // parent uint16
nbBits uint8 // symbol byte
// nbBits uint8
//
// in some order, all squashed into an integer so that the compiler
// always loads and stores entire nodeElts instead of separate fields.
type nodeElt uint64
func makeNodeElt(count uint32, symbol byte) nodeElt {
return nodeElt(count) | nodeElt(symbol)<<48
} }
func (e *nodeElt) count() uint32 { return uint32(*e) }
func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) }
func (e *nodeElt) symbol() byte { return byte(*e >> 48) }
func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) }
func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) }
func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 }
func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 }

View File

@ -4,360 +4,349 @@
// func decompress4x_main_loop_amd64(ctx *decompress4xContext) // func decompress4x_main_loop_amd64(ctx *decompress4xContext)
TEXT ·decompress4x_main_loop_amd64(SB), $0-8 TEXT ·decompress4x_main_loop_amd64(SB), $0-8
XORQ DX, DX
// Preload values // Preload values
MOVQ ctx+0(FP), AX MOVQ ctx+0(FP), AX
MOVBQZX 8(AX), DI MOVBQZX 8(AX), DI
MOVQ 16(AX), SI MOVQ 16(AX), BX
MOVQ 48(AX), BX MOVQ 48(AX), SI
MOVQ 24(AX), R9 MOVQ 24(AX), R8
MOVQ 32(AX), R10 MOVQ 32(AX), R9
MOVQ (AX), R11 MOVQ (AX), R10
// Main loop // Main loop
main_loop: main_loop:
MOVQ SI, R8 XORL DX, DX
CMPQ R8, BX CMPQ BX, SI
SETGE DL SETGE DL
// br0.fillFast32() // br0.fillFast32()
MOVQ 32(R11), R12 MOVQ 32(R10), R11
MOVBQZX 40(R11), R13 MOVBQZX 40(R10), R12
CMPQ R13, $0x20 CMPQ R12, $0x20
JBE skip_fill0 JBE skip_fill0
MOVQ 24(R11), AX MOVQ 24(R10), AX
SUBQ $0x20, R13 SUBQ $0x20, R12
SUBQ $0x04, AX SUBQ $0x04, AX
MOVQ (R11), R14 MOVQ (R10), R13
// b.value |= uint64(low) << (b.bitsRead & 63) // b.value |= uint64(low) << (b.bitsRead & 63)
MOVL (AX)(R14*1), R14 MOVL (AX)(R13*1), R13
MOVQ R13, CX MOVQ R12, CX
SHLQ CL, R14 SHLQ CL, R13
MOVQ AX, 24(R11) MOVQ AX, 24(R10)
ORQ R14, R12 ORQ R13, R11
// exhausted = exhausted || (br0.off < 4) // exhausted += (br0.off < 4)
CMPQ AX, $0x04 CMPQ AX, $0x04
SETLT AL ADCB $+0, DL
ORB AL, DL
skip_fill0: skip_fill0:
// val0 := br0.peekTopBits(peekBits) // val0 := br0.peekTopBits(peekBits)
MOVQ R12, R14 MOVQ R11, R13
MOVQ DI, CX MOVQ DI, CX
SHRQ CL, R14 SHRQ CL, R13
// v0 := table[val0&mask] // v0 := table[val0&mask]
MOVW (R10)(R14*2), CX MOVW (R9)(R13*2), CX
// br0.advance(uint8(v0.entry) // br0.advance(uint8(v0.entry)
MOVB CH, AL MOVB CH, AL
SHLQ CL, R12 SHLQ CL, R11
ADDB CL, R13 ADDB CL, R12
// val1 := br0.peekTopBits(peekBits) // val1 := br0.peekTopBits(peekBits)
MOVQ DI, CX MOVQ DI, CX
MOVQ R12, R14 MOVQ R11, R13
SHRQ CL, R14 SHRQ CL, R13
// v1 := table[val1&mask] // v1 := table[val1&mask]
MOVW (R10)(R14*2), CX MOVW (R9)(R13*2), CX
// br0.advance(uint8(v1.entry)) // br0.advance(uint8(v1.entry))
MOVB CH, AH MOVB CH, AH
SHLQ CL, R12 SHLQ CL, R11
ADDB CL, R13 ADDB CL, R12
// these two writes get coalesced // these two writes get coalesced
// out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
// out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
MOVW AX, (R8) MOVW AX, (BX)
// update the bitreader structure // update the bitreader structure
MOVQ R12, 32(R11) MOVQ R11, 32(R10)
MOVB R13, 40(R11) MOVB R12, 40(R10)
ADDQ R9, R8
// br1.fillFast32() // br1.fillFast32()
MOVQ 80(R11), R12 MOVQ 80(R10), R11
MOVBQZX 88(R11), R13 MOVBQZX 88(R10), R12
CMPQ R13, $0x20 CMPQ R12, $0x20
JBE skip_fill1 JBE skip_fill1
MOVQ 72(R11), AX MOVQ 72(R10), AX
SUBQ $0x20, R13 SUBQ $0x20, R12
SUBQ $0x04, AX SUBQ $0x04, AX
MOVQ 48(R11), R14 MOVQ 48(R10), R13
// b.value |= uint64(low) << (b.bitsRead & 63) // b.value |= uint64(low) << (b.bitsRead & 63)
MOVL (AX)(R14*1), R14 MOVL (AX)(R13*1), R13
MOVQ R13, CX MOVQ R12, CX
SHLQ CL, R14 SHLQ CL, R13
MOVQ AX, 72(R11) MOVQ AX, 72(R10)
ORQ R14, R12 ORQ R13, R11
// exhausted = exhausted || (br1.off < 4) // exhausted += (br1.off < 4)
CMPQ AX, $0x04 CMPQ AX, $0x04
SETLT AL ADCB $+0, DL
ORB AL, DL
skip_fill1: skip_fill1:
// val0 := br1.peekTopBits(peekBits) // val0 := br1.peekTopBits(peekBits)
MOVQ R12, R14 MOVQ R11, R13
MOVQ DI, CX MOVQ DI, CX
SHRQ CL, R14 SHRQ CL, R13
// v0 := table[val0&mask] // v0 := table[val0&mask]
MOVW (R10)(R14*2), CX MOVW (R9)(R13*2), CX
// br1.advance(uint8(v0.entry) // br1.advance(uint8(v0.entry)
MOVB CH, AL MOVB CH, AL
SHLQ CL, R12 SHLQ CL, R11
ADDB CL, R13 ADDB CL, R12
// val1 := br1.peekTopBits(peekBits) // val1 := br1.peekTopBits(peekBits)
MOVQ DI, CX MOVQ DI, CX
MOVQ R12, R14 MOVQ R11, R13
SHRQ CL, R14 SHRQ CL, R13
// v1 := table[val1&mask] // v1 := table[val1&mask]
MOVW (R10)(R14*2), CX MOVW (R9)(R13*2), CX
// br1.advance(uint8(v1.entry)) // br1.advance(uint8(v1.entry))
MOVB CH, AH MOVB CH, AH
SHLQ CL, R12 SHLQ CL, R11
ADDB CL, R13 ADDB CL, R12
// these two writes get coalesced // these two writes get coalesced
// out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
// out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
MOVW AX, (R8) MOVW AX, (BX)(R8*1)
// update the bitreader structure // update the bitreader structure
MOVQ R12, 80(R11) MOVQ R11, 80(R10)
MOVB R13, 88(R11) MOVB R12, 88(R10)
ADDQ R9, R8
// br2.fillFast32() // br2.fillFast32()
MOVQ 128(R11), R12 MOVQ 128(R10), R11
MOVBQZX 136(R11), R13 MOVBQZX 136(R10), R12
CMPQ R13, $0x20 CMPQ R12, $0x20
JBE skip_fill2 JBE skip_fill2
MOVQ 120(R11), AX MOVQ 120(R10), AX
SUBQ $0x20, R13 SUBQ $0x20, R12
SUBQ $0x04, AX SUBQ $0x04, AX
MOVQ 96(R11), R14 MOVQ 96(R10), R13
// b.value |= uint64(low) << (b.bitsRead & 63) // b.value |= uint64(low) << (b.bitsRead & 63)
MOVL (AX)(R14*1), R14 MOVL (AX)(R13*1), R13
MOVQ R13, CX MOVQ R12, CX
SHLQ CL, R14 SHLQ CL, R13
MOVQ AX, 120(R11) MOVQ AX, 120(R10)
ORQ R14, R12 ORQ R13, R11
// exhausted = exhausted || (br2.off < 4) // exhausted += (br2.off < 4)
CMPQ AX, $0x04 CMPQ AX, $0x04
SETLT AL ADCB $+0, DL
ORB AL, DL
skip_fill2: skip_fill2:
// val0 := br2.peekTopBits(peekBits) // val0 := br2.peekTopBits(peekBits)
MOVQ R12, R14 MOVQ R11, R13
MOVQ DI, CX MOVQ DI, CX
SHRQ CL, R14 SHRQ CL, R13
// v0 := table[val0&mask] // v0 := table[val0&mask]
MOVW (R10)(R14*2), CX MOVW (R9)(R13*2), CX
// br2.advance(uint8(v0.entry) // br2.advance(uint8(v0.entry)
MOVB CH, AL MOVB CH, AL
SHLQ CL, R12 SHLQ CL, R11
ADDB CL, R13 ADDB CL, R12
// val1 := br2.peekTopBits(peekBits) // val1 := br2.peekTopBits(peekBits)
MOVQ DI, CX MOVQ DI, CX
MOVQ R12, R14 MOVQ R11, R13
SHRQ CL, R14 SHRQ CL, R13
// v1 := table[val1&mask] // v1 := table[val1&mask]
MOVW (R10)(R14*2), CX MOVW (R9)(R13*2), CX
// br2.advance(uint8(v1.entry)) // br2.advance(uint8(v1.entry))
MOVB CH, AH MOVB CH, AH
SHLQ CL, R12 SHLQ CL, R11
ADDB CL, R13 ADDB CL, R12
// these two writes get coalesced // these two writes get coalesced
// out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
// out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
MOVW AX, (R8) MOVW AX, (BX)(R8*2)
// update the bitreader structure // update the bitreader structure
MOVQ R12, 128(R11) MOVQ R11, 128(R10)
MOVB R13, 136(R11) MOVB R12, 136(R10)
ADDQ R9, R8
// br3.fillFast32() // br3.fillFast32()
MOVQ 176(R11), R12 MOVQ 176(R10), R11
MOVBQZX 184(R11), R13 MOVBQZX 184(R10), R12
CMPQ R13, $0x20 CMPQ R12, $0x20
JBE skip_fill3 JBE skip_fill3
MOVQ 168(R11), AX MOVQ 168(R10), AX
SUBQ $0x20, R13 SUBQ $0x20, R12
SUBQ $0x04, AX SUBQ $0x04, AX
MOVQ 144(R11), R14 MOVQ 144(R10), R13
// b.value |= uint64(low) << (b.bitsRead & 63) // b.value |= uint64(low) << (b.bitsRead & 63)
MOVL (AX)(R14*1), R14 MOVL (AX)(R13*1), R13
MOVQ R13, CX MOVQ R12, CX
SHLQ CL, R14 SHLQ CL, R13
MOVQ AX, 168(R11) MOVQ AX, 168(R10)
ORQ R14, R12 ORQ R13, R11
// exhausted = exhausted || (br3.off < 4) // exhausted += (br3.off < 4)
CMPQ AX, $0x04 CMPQ AX, $0x04
SETLT AL ADCB $+0, DL
ORB AL, DL
skip_fill3: skip_fill3:
// val0 := br3.peekTopBits(peekBits) // val0 := br3.peekTopBits(peekBits)
MOVQ R12, R14 MOVQ R11, R13
MOVQ DI, CX MOVQ DI, CX
SHRQ CL, R14 SHRQ CL, R13
// v0 := table[val0&mask] // v0 := table[val0&mask]
MOVW (R10)(R14*2), CX MOVW (R9)(R13*2), CX
// br3.advance(uint8(v0.entry) // br3.advance(uint8(v0.entry)
MOVB CH, AL MOVB CH, AL
SHLQ CL, R12 SHLQ CL, R11
ADDB CL, R13 ADDB CL, R12
// val1 := br3.peekTopBits(peekBits) // val1 := br3.peekTopBits(peekBits)
MOVQ DI, CX MOVQ DI, CX
MOVQ R12, R14 MOVQ R11, R13
SHRQ CL, R14 SHRQ CL, R13
// v1 := table[val1&mask] // v1 := table[val1&mask]
MOVW (R10)(R14*2), CX MOVW (R9)(R13*2), CX
// br3.advance(uint8(v1.entry)) // br3.advance(uint8(v1.entry))
MOVB CH, AH MOVB CH, AH
SHLQ CL, R12 SHLQ CL, R11
ADDB CL, R13 ADDB CL, R12
// these two writes get coalesced // these two writes get coalesced
// out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
// out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
MOVW AX, (R8) LEAQ (R8)(R8*2), CX
MOVW AX, (BX)(CX*1)
// update the bitreader structure // update the bitreader structure
MOVQ R12, 176(R11) MOVQ R11, 176(R10)
MOVB R13, 184(R11) MOVB R12, 184(R10)
ADDQ $0x02, SI ADDQ $0x02, BX
TESTB DL, DL TESTB DL, DL
JZ main_loop JZ main_loop
MOVQ ctx+0(FP), AX MOVQ ctx+0(FP), AX
SUBQ 16(AX), SI SUBQ 16(AX), BX
SHLQ $0x02, SI SHLQ $0x02, BX
MOVQ SI, 40(AX) MOVQ BX, 40(AX)
RET RET
// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) // func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8
XORQ DX, DX
// Preload values // Preload values
MOVQ ctx+0(FP), CX MOVQ ctx+0(FP), CX
MOVBQZX 8(CX), DI MOVBQZX 8(CX), DI
MOVQ 16(CX), BX MOVQ 16(CX), BX
MOVQ 48(CX), SI MOVQ 48(CX), SI
MOVQ 24(CX), R9 MOVQ 24(CX), R8
MOVQ 32(CX), R10 MOVQ 32(CX), R9
MOVQ (CX), R11 MOVQ (CX), R10
// Main loop // Main loop
main_loop: main_loop:
MOVQ BX, R8 XORL DX, DX
CMPQ R8, SI CMPQ BX, SI
SETGE DL SETGE DL
// br0.fillFast32() // br0.fillFast32()
MOVQ 32(R11), R12 MOVQ 32(R10), R11
MOVBQZX 40(R11), R13 MOVBQZX 40(R10), R12
CMPQ R13, $0x20 CMPQ R12, $0x20
JBE skip_fill0 JBE skip_fill0
MOVQ 24(R11), R14 MOVQ 24(R10), R13
SUBQ $0x20, R13 SUBQ $0x20, R12
SUBQ $0x04, R14 SUBQ $0x04, R13
MOVQ (R11), R15 MOVQ (R10), R14
// b.value |= uint64(low) << (b.bitsRead & 63) // b.value |= uint64(low) << (b.bitsRead & 63)
MOVL (R14)(R15*1), R15 MOVL (R13)(R14*1), R14
MOVQ R13, CX MOVQ R12, CX
SHLQ CL, R15 SHLQ CL, R14
MOVQ R14, 24(R11) MOVQ R13, 24(R10)
ORQ R15, R12 ORQ R14, R11
// exhausted = exhausted || (br0.off < 4) // exhausted += (br0.off < 4)
CMPQ R14, $0x04 CMPQ R13, $0x04
SETLT AL ADCB $+0, DL
ORB AL, DL
skip_fill0: skip_fill0:
// val0 := br0.peekTopBits(peekBits) // val0 := br0.peekTopBits(peekBits)
MOVQ R12, R14 MOVQ R11, R13
MOVQ DI, CX MOVQ DI, CX
SHRQ CL, R14 SHRQ CL, R13
// v0 := table[val0&mask] // v0 := table[val0&mask]
MOVW (R10)(R14*2), CX MOVW (R9)(R13*2), CX
// br0.advance(uint8(v0.entry) // br0.advance(uint8(v0.entry)
MOVB CH, AL MOVB CH, AL
SHLQ CL, R12 SHLQ CL, R11
ADDB CL, R13 ADDB CL, R12
// val1 := br0.peekTopBits(peekBits) // val1 := br0.peekTopBits(peekBits)
MOVQ R12, R14 MOVQ R11, R13
MOVQ DI, CX MOVQ DI, CX
SHRQ CL, R14 SHRQ CL, R13
// v1 := table[val0&mask] // v1 := table[val0&mask]
MOVW (R10)(R14*2), CX MOVW (R9)(R13*2), CX
// br0.advance(uint8(v1.entry) // br0.advance(uint8(v1.entry)
MOVB CH, AH MOVB CH, AH
SHLQ CL, R12 SHLQ CL, R11
ADDB CL, R13 ADDB CL, R12
BSWAPL AX BSWAPL AX
// val2 := br0.peekTopBits(peekBits) // val2 := br0.peekTopBits(peekBits)
MOVQ R12, R14 MOVQ R11, R13
MOVQ DI, CX MOVQ DI, CX
SHRQ CL, R14 SHRQ CL, R13
// v2 := table[val0&mask] // v2 := table[val0&mask]
MOVW (R10)(R14*2), CX MOVW (R9)(R13*2), CX
// br0.advance(uint8(v2.entry) // br0.advance(uint8(v2.entry)
MOVB CH, AH MOVB CH, AH
SHLQ CL, R12 SHLQ CL, R11
ADDB CL, R13 ADDB CL, R12
// val3 := br0.peekTopBits(peekBits) // val3 := br0.peekTopBits(peekBits)
MOVQ R12, R14 MOVQ R11, R13
MOVQ DI, CX MOVQ DI, CX
SHRQ CL, R14 SHRQ CL, R13
// v3 := table[val0&mask] // v3 := table[val0&mask]
MOVW (R10)(R14*2), CX MOVW (R9)(R13*2), CX
// br0.advance(uint8(v3.entry) // br0.advance(uint8(v3.entry)
MOVB CH, AL MOVB CH, AL
SHLQ CL, R12 SHLQ CL, R11
ADDB CL, R13 ADDB CL, R12
BSWAPL AX BSWAPL AX
// these four writes get coalesced // these four writes get coalesced
@ -365,88 +354,86 @@ skip_fill0:
// out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
// out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8)
// out[id * dstEvery + 4] = uint8(v3.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8)
MOVL AX, (R8) MOVL AX, (BX)
// update the bitreader structure // update the bitreader structure
MOVQ R12, 32(R11) MOVQ R11, 32(R10)
MOVB R13, 40(R11) MOVB R12, 40(R10)
ADDQ R9, R8
// br1.fillFast32() // br1.fillFast32()
MOVQ 80(R11), R12 MOVQ 80(R10), R11
MOVBQZX 88(R11), R13 MOVBQZX 88(R10), R12
CMPQ R13, $0x20 CMPQ R12, $0x20
JBE skip_fill1 JBE skip_fill1
MOVQ 72(R11), R14 MOVQ 72(R10), R13
SUBQ $0x20, R13 SUBQ $0x20, R12
SUBQ $0x04, R14 SUBQ $0x04, R13
MOVQ 48(R11), R15 MOVQ 48(R10), R14
// b.value |= uint64(low) << (b.bitsRead & 63) // b.value |= uint64(low) << (b.bitsRead & 63)
MOVL (R14)(R15*1), R15 MOVL (R13)(R14*1), R14
MOVQ R13, CX MOVQ R12, CX
SHLQ CL, R15 SHLQ CL, R14
MOVQ R14, 72(R11) MOVQ R13, 72(R10)
ORQ R15, R12 ORQ R14, R11
// exhausted = exhausted || (br1.off < 4) // exhausted += (br1.off < 4)
CMPQ R14, $0x04 CMPQ R13, $0x04
SETLT AL ADCB $+0, DL
ORB AL, DL
skip_fill1: skip_fill1:
// val0 := br1.peekTopBits(peekBits) // val0 := br1.peekTopBits(peekBits)
MOVQ R12, R14 MOVQ R11, R13
MOVQ DI, CX MOVQ DI, CX
SHRQ CL, R14 SHRQ CL, R13
// v0 := table[val0&mask] // v0 := table[val0&mask]
MOVW (R10)(R14*2), CX MOVW (R9)(R13*2), CX
// br1.advance(uint8(v0.entry) // br1.advance(uint8(v0.entry)
MOVB CH, AL MOVB CH, AL
SHLQ CL, R12 SHLQ CL, R11
ADDB CL, R13 ADDB CL, R12
// val1 := br1.peekTopBits(peekBits) // val1 := br1.peekTopBits(peekBits)
MOVQ R12, R14 MOVQ R11, R13
MOVQ DI, CX MOVQ DI, CX
SHRQ CL, R14 SHRQ CL, R13
// v1 := table[val0&mask] // v1 := table[val0&mask]
MOVW (R10)(R14*2), CX MOVW (R9)(R13*2), CX
// br1.advance(uint8(v1.entry) // br1.advance(uint8(v1.entry)
MOVB CH, AH MOVB CH, AH
SHLQ CL, R12 SHLQ CL, R11
ADDB CL, R13 ADDB CL, R12
BSWAPL AX BSWAPL AX
// val2 := br1.peekTopBits(peekBits) // val2 := br1.peekTopBits(peekBits)
MOVQ R12, R14 MOVQ R11, R13
MOVQ DI, CX MOVQ DI, CX
SHRQ CL, R14 SHRQ CL, R13
// v2 := table[val0&mask] // v2 := table[val0&mask]
MOVW (R10)(R14*2), CX MOVW (R9)(R13*2), CX
// br1.advance(uint8(v2.entry) // br1.advance(uint8(v2.entry)
MOVB CH, AH MOVB CH, AH
SHLQ CL, R12 SHLQ CL, R11
ADDB CL, R13 ADDB CL, R12
// val3 := br1.peekTopBits(peekBits) // val3 := br1.peekTopBits(peekBits)
MOVQ R12, R14 MOVQ R11, R13
MOVQ DI, CX MOVQ DI, CX
SHRQ CL, R14 SHRQ CL, R13
// v3 := table[val0&mask] // v3 := table[val0&mask]
MOVW (R10)(R14*2), CX MOVW (R9)(R13*2), CX
// br1.advance(uint8(v3.entry) // br1.advance(uint8(v3.entry)
MOVB CH, AL MOVB CH, AL
SHLQ CL, R12 SHLQ CL, R11
ADDB CL, R13 ADDB CL, R12
BSWAPL AX BSWAPL AX
// these four writes get coalesced // these four writes get coalesced
@ -454,88 +441,86 @@ skip_fill1:
// out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
// out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8)
// out[id * dstEvery + 4] = uint8(v3.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8)
MOVL AX, (R8) MOVL AX, (BX)(R8*1)
// update the bitreader structure // update the bitreader structure
MOVQ R12, 80(R11) MOVQ R11, 80(R10)
MOVB R13, 88(R11) MOVB R12, 88(R10)
ADDQ R9, R8
// br2.fillFast32() // br2.fillFast32()
MOVQ 128(R11), R12 MOVQ 128(R10), R11
MOVBQZX 136(R11), R13 MOVBQZX 136(R10), R12
CMPQ R13, $0x20 CMPQ R12, $0x20
JBE skip_fill2 JBE skip_fill2
MOVQ 120(R11), R14 MOVQ 120(R10), R13
SUBQ $0x20, R13 SUBQ $0x20, R12
SUBQ $0x04, R14 SUBQ $0x04, R13
MOVQ 96(R11), R15 MOVQ 96(R10), R14
// b.value |= uint64(low) << (b.bitsRead & 63) // b.value |= uint64(low) << (b.bitsRead & 63)
MOVL (R14)(R15*1), R15 MOVL (R13)(R14*1), R14
MOVQ R13, CX MOVQ R12, CX
SHLQ CL, R15 SHLQ CL, R14
MOVQ R14, 120(R11) MOVQ R13, 120(R10)
ORQ R15, R12 ORQ R14, R11
// exhausted = exhausted || (br2.off < 4) // exhausted += (br2.off < 4)
CMPQ R14, $0x04 CMPQ R13, $0x04
SETLT AL ADCB $+0, DL
ORB AL, DL
skip_fill2: skip_fill2:
// val0 := br2.peekTopBits(peekBits) // val0 := br2.peekTopBits(peekBits)
MOVQ R12, R14 MOVQ R11, R13
MOVQ DI, CX MOVQ DI, CX
SHRQ CL, R14 SHRQ CL, R13
// v0 := table[val0&mask] // v0 := table[val0&mask]
MOVW (R10)(R14*2), CX MOVW (R9)(R13*2), CX
// br2.advance(uint8(v0.entry) // br2.advance(uint8(v0.entry)
MOVB CH, AL MOVB CH, AL
SHLQ CL, R12 SHLQ CL, R11
ADDB CL, R13 ADDB CL, R12
// val1 := br2.peekTopBits(peekBits) // val1 := br2.peekTopBits(peekBits)
MOVQ R12, R14 MOVQ R11, R13
MOVQ DI, CX MOVQ DI, CX
SHRQ CL, R14 SHRQ CL, R13
// v1 := table[val0&mask] // v1 := table[val0&mask]
MOVW (R10)(R14*2), CX MOVW (R9)(R13*2), CX
// br2.advance(uint8(v1.entry) // br2.advance(uint8(v1.entry)
MOVB CH, AH MOVB CH, AH
SHLQ CL, R12 SHLQ CL, R11
ADDB CL, R13 ADDB CL, R12
BSWAPL AX BSWAPL AX
// val2 := br2.peekTopBits(peekBits) // val2 := br2.peekTopBits(peekBits)
MOVQ R12, R14 MOVQ R11, R13
MOVQ DI, CX MOVQ DI, CX
SHRQ CL, R14 SHRQ CL, R13
// v2 := table[val0&mask] // v2 := table[val0&mask]
MOVW (R10)(R14*2), CX MOVW (R9)(R13*2), CX
// br2.advance(uint8(v2.entry) // br2.advance(uint8(v2.entry)
MOVB CH, AH MOVB CH, AH
SHLQ CL, R12 SHLQ CL, R11
ADDB CL, R13 ADDB CL, R12
// val3 := br2.peekTopBits(peekBits) // val3 := br2.peekTopBits(peekBits)
MOVQ R12, R14 MOVQ R11, R13
MOVQ DI, CX MOVQ DI, CX
SHRQ CL, R14 SHRQ CL, R13
// v3 := table[val0&mask] // v3 := table[val0&mask]
MOVW (R10)(R14*2), CX MOVW (R9)(R13*2), CX
// br2.advance(uint8(v3.entry) // br2.advance(uint8(v3.entry)
MOVB CH, AL MOVB CH, AL
SHLQ CL, R12 SHLQ CL, R11
ADDB CL, R13 ADDB CL, R12
BSWAPL AX BSWAPL AX
// these four writes get coalesced // these four writes get coalesced
@ -543,88 +528,86 @@ skip_fill2:
// out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
// out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8)
// out[id * dstEvery + 4] = uint8(v3.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8)
MOVL AX, (R8) MOVL AX, (BX)(R8*2)
// update the bitreader structure // update the bitreader structure
MOVQ R12, 128(R11) MOVQ R11, 128(R10)
MOVB R13, 136(R11) MOVB R12, 136(R10)
ADDQ R9, R8
// br3.fillFast32() // br3.fillFast32()
MOVQ 176(R11), R12 MOVQ 176(R10), R11
MOVBQZX 184(R11), R13 MOVBQZX 184(R10), R12
CMPQ R13, $0x20 CMPQ R12, $0x20
JBE skip_fill3 JBE skip_fill3
MOVQ 168(R11), R14 MOVQ 168(R10), R13
SUBQ $0x20, R13 SUBQ $0x20, R12
SUBQ $0x04, R14 SUBQ $0x04, R13
MOVQ 144(R11), R15 MOVQ 144(R10), R14
// b.value |= uint64(low) << (b.bitsRead & 63) // b.value |= uint64(low) << (b.bitsRead & 63)
MOVL (R14)(R15*1), R15 MOVL (R13)(R14*1), R14
MOVQ R13, CX MOVQ R12, CX
SHLQ CL, R15 SHLQ CL, R14
MOVQ R14, 168(R11) MOVQ R13, 168(R10)
ORQ R15, R12 ORQ R14, R11
// exhausted = exhausted || (br3.off < 4) // exhausted += (br3.off < 4)
CMPQ R14, $0x04 CMPQ R13, $0x04
SETLT AL ADCB $+0, DL
ORB AL, DL
skip_fill3: skip_fill3:
// val0 := br3.peekTopBits(peekBits) // val0 := br3.peekTopBits(peekBits)
MOVQ R12, R14 MOVQ R11, R13
MOVQ DI, CX MOVQ DI, CX
SHRQ CL, R14 SHRQ CL, R13
// v0 := table[val0&mask] // v0 := table[val0&mask]
MOVW (R10)(R14*2), CX MOVW (R9)(R13*2), CX
// br3.advance(uint8(v0.entry) // br3.advance(uint8(v0.entry)
MOVB CH, AL MOVB CH, AL
SHLQ CL, R12 SHLQ CL, R11
ADDB CL, R13 ADDB CL, R12
// val1 := br3.peekTopBits(peekBits) // val1 := br3.peekTopBits(peekBits)
MOVQ R12, R14 MOVQ R11, R13
MOVQ DI, CX MOVQ DI, CX
SHRQ CL, R14 SHRQ CL, R13
// v1 := table[val0&mask] // v1 := table[val0&mask]
MOVW (R10)(R14*2), CX MOVW (R9)(R13*2), CX
// br3.advance(uint8(v1.entry) // br3.advance(uint8(v1.entry)
MOVB CH, AH MOVB CH, AH
SHLQ CL, R12 SHLQ CL, R11
ADDB CL, R13 ADDB CL, R12
BSWAPL AX BSWAPL AX
// val2 := br3.peekTopBits(peekBits) // val2 := br3.peekTopBits(peekBits)
MOVQ R12, R14 MOVQ R11, R13
MOVQ DI, CX MOVQ DI, CX
SHRQ CL, R14 SHRQ CL, R13
// v2 := table[val0&mask] // v2 := table[val0&mask]
MOVW (R10)(R14*2), CX MOVW (R9)(R13*2), CX
// br3.advance(uint8(v2.entry) // br3.advance(uint8(v2.entry)
MOVB CH, AH MOVB CH, AH
SHLQ CL, R12 SHLQ CL, R11
ADDB CL, R13 ADDB CL, R12
// val3 := br3.peekTopBits(peekBits) // val3 := br3.peekTopBits(peekBits)
MOVQ R12, R14 MOVQ R11, R13
MOVQ DI, CX MOVQ DI, CX
SHRQ CL, R14 SHRQ CL, R13
// v3 := table[val0&mask] // v3 := table[val0&mask]
MOVW (R10)(R14*2), CX MOVW (R9)(R13*2), CX
// br3.advance(uint8(v3.entry) // br3.advance(uint8(v3.entry)
MOVB CH, AL MOVB CH, AL
SHLQ CL, R12 SHLQ CL, R11
ADDB CL, R13 ADDB CL, R12
BSWAPL AX BSWAPL AX
// these four writes get coalesced // these four writes get coalesced
@ -632,11 +615,12 @@ skip_fill3:
// out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
// out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8)
// out[id * dstEvery + 4] = uint8(v3.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8)
MOVL AX, (R8) LEAQ (R8)(R8*2), CX
MOVL AX, (BX)(CX*1)
// update the bitreader structure // update the bitreader structure
MOVQ R12, 176(R11) MOVQ R11, 176(R10)
MOVB R13, 184(R11) MOVB R12, 184(R10)
ADDQ $0x04, BX ADDQ $0x04, BX
TESTB DL, DL TESTB DL, DL
JZ main_loop JZ main_loop
@ -652,7 +636,7 @@ TEXT ·decompress1x_main_loop_amd64(SB), $0-8
MOVQ 16(CX), DX MOVQ 16(CX), DX
MOVQ 24(CX), BX MOVQ 24(CX), BX
CMPQ BX, $0x04 CMPQ BX, $0x04
JB error_max_decoded_size_exeeded JB error_max_decoded_size_exceeded
LEAQ (DX)(BX*1), BX LEAQ (DX)(BX*1), BX
MOVQ (CX), SI MOVQ (CX), SI
MOVQ (SI), R8 MOVQ (SI), R8
@ -667,7 +651,7 @@ main_loop:
// Check if we have room for 4 bytes in the output buffer // Check if we have room for 4 bytes in the output buffer
LEAQ 4(DX), CX LEAQ 4(DX), CX
CMPQ CX, BX CMPQ CX, BX
JGE error_max_decoded_size_exeeded JGE error_max_decoded_size_exceeded
// Decode 4 values // Decode 4 values
CMPQ R11, $0x20 CMPQ R11, $0x20
@ -744,7 +728,7 @@ loop_condition:
RET RET
// Report error // Report error
error_max_decoded_size_exeeded: error_max_decoded_size_exceeded:
MOVQ ctx+0(FP), AX MOVQ ctx+0(FP), AX
MOVQ $-1, CX MOVQ $-1, CX
MOVQ CX, 40(AX) MOVQ CX, 40(AX)
@ -757,7 +741,7 @@ TEXT ·decompress1x_main_loop_bmi2(SB), $0-8
MOVQ 16(CX), DX MOVQ 16(CX), DX
MOVQ 24(CX), BX MOVQ 24(CX), BX
CMPQ BX, $0x04 CMPQ BX, $0x04
JB error_max_decoded_size_exeeded JB error_max_decoded_size_exceeded
LEAQ (DX)(BX*1), BX LEAQ (DX)(BX*1), BX
MOVQ (CX), SI MOVQ (CX), SI
MOVQ (SI), R8 MOVQ (SI), R8
@ -772,7 +756,7 @@ main_loop:
// Check if we have room for 4 bytes in the output buffer // Check if we have room for 4 bytes in the output buffer
LEAQ 4(DX), CX LEAQ 4(DX), CX
CMPQ CX, BX CMPQ CX, BX
JGE error_max_decoded_size_exeeded JGE error_max_decoded_size_exceeded
// Decode 4 values // Decode 4 values
CMPQ R11, $0x20 CMPQ R11, $0x20
@ -839,7 +823,7 @@ loop_condition:
RET RET
// Report error // Report error
error_max_decoded_size_exeeded: error_max_decoded_size_exceeded:
MOVQ ctx+0(FP), AX MOVQ ctx+0(FP), AX
MOVQ $-1, CX MOVQ $-1, CX
MOVQ CX, 40(AX) MOVQ CX, 40(AX)

View File

@ -82,8 +82,9 @@ type blockDec struct {
err error err error
// Check against this crc // Check against this crc, if hasCRC is true.
checkCRC []byte checkCRC uint32
hasCRC bool
// Frame to use for singlethreaded decoding. // Frame to use for singlethreaded decoding.
// Should not be used by the decoder itself since parent may be another frame. // Should not be used by the decoder itself since parent may be another frame.
@ -191,16 +192,14 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
} }
// Read block data. // Read block data.
if cap(b.dataStorage) < cSize { if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize {
// byteBuf doesn't need a destination buffer.
if b.lowMem || cSize > maxCompressedBlockSize { if b.lowMem || cSize > maxCompressedBlockSize {
b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc)
} else { } else {
b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc)
} }
} }
if cap(b.dst) <= maxSize {
b.dst = make([]byte, 0, maxSize+1)
}
b.data, err = br.readBig(cSize, b.dataStorage) b.data, err = br.readBig(cSize, b.dataStorage)
if err != nil { if err != nil {
if debugDecoder { if debugDecoder {
@ -209,6 +208,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
} }
return err return err
} }
if cap(b.dst) <= maxSize {
b.dst = make([]byte, 0, maxSize+1)
}
return nil return nil
} }

View File

@ -4,7 +4,6 @@
package zstd package zstd
import ( import (
"bytes"
"encoding/binary" "encoding/binary"
"errors" "errors"
"io" "io"
@ -102,8 +101,8 @@ func (h *Header) Decode(in []byte) error {
} }
h.HeaderSize += 4 h.HeaderSize += 4
b, in := in[:4], in[4:] b, in := in[:4], in[4:]
if !bytes.Equal(b, frameMagic) { if string(b) != frameMagic {
if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 {
return ErrMagicMismatch return ErrMagicMismatch
} }
if len(in) < 4 { if len(in) < 4 {
@ -153,7 +152,7 @@ func (h *Header) Decode(in []byte) error {
} }
b, in = in[:size], in[size:] b, in = in[:size], in[size:]
h.HeaderSize += int(size) h.HeaderSize += int(size)
switch size { switch len(b) {
case 1: case 1:
h.DictionaryID = uint32(b[0]) h.DictionaryID = uint32(b[0])
case 2: case 2:
@ -183,7 +182,7 @@ func (h *Header) Decode(in []byte) error {
} }
b, in = in[:fcsSize], in[fcsSize:] b, in = in[:fcsSize], in[fcsSize:]
h.HeaderSize += int(fcsSize) h.HeaderSize += int(fcsSize)
switch fcsSize { switch len(b) {
case 1: case 1:
h.FrameContentSize = uint64(b[0]) h.FrameContentSize = uint64(b[0])
case 2: case 2:

View File

@ -5,7 +5,6 @@
package zstd package zstd
import ( import (
"bytes"
"context" "context"
"encoding/binary" "encoding/binary"
"io" "io"
@ -41,8 +40,7 @@ type Decoder struct {
frame *frameDec frame *frameDec
// Custom dictionaries. // Custom dictionaries.
// Always uses copies. dicts map[uint32]*dict
dicts map[uint32]dict
// streamWg is the waitgroup for all streams // streamWg is the waitgroup for all streams
streamWg sync.WaitGroup streamWg sync.WaitGroup
@ -104,7 +102,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
} }
// Transfer option dicts. // Transfer option dicts.
d.dicts = make(map[uint32]dict, len(d.o.dicts)) d.dicts = make(map[uint32]*dict, len(d.o.dicts))
for _, dc := range d.o.dicts { for _, dc := range d.o.dicts {
d.dicts[dc.id] = dc d.dicts[dc.id] = dc
} }
@ -342,15 +340,8 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
} }
return dst, err return dst, err
} }
if frame.DictionaryID != nil { if err = d.setDict(frame); err != nil {
dict, ok := d.dicts[*frame.DictionaryID] return nil, err
if !ok {
return nil, ErrUnknownDictionary
}
if debugDecoder {
println("setting dict", frame.DictionaryID)
}
frame.history.setDict(&dict)
} }
if frame.WindowSize > d.o.maxWindowSize { if frame.WindowSize > d.o.maxWindowSize {
if debugDecoder { if debugDecoder {
@ -459,7 +450,11 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
} }
if !d.o.ignoreChecksum && len(next.b) > 0 { if d.o.ignoreChecksum {
return true
}
if len(next.b) > 0 {
n, err := d.current.crc.Write(next.b) n, err := d.current.crc.Write(next.b)
if err == nil { if err == nil {
if n != len(next.b) { if n != len(next.b) {
@ -467,18 +462,16 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
} }
} }
} }
if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 { if next.err == nil && next.d != nil && next.d.hasCRC {
got := d.current.crc.Sum64() got := uint32(d.current.crc.Sum64())
var tmp [4]byte if got != next.d.checkCRC {
binary.LittleEndian.PutUint32(tmp[:], uint32(got))
if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) {
if debugDecoder { if debugDecoder {
println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)") printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC)
} }
d.current.err = ErrCRCMismatch d.current.err = ErrCRCMismatch
} else { } else {
if debugDecoder { if debugDecoder {
println("CRC ok", tmp[:]) printf("CRC ok %08x\n", got)
} }
} }
} }
@ -494,18 +487,12 @@ func (d *Decoder) nextBlockSync() (ok bool) {
if !d.syncStream.inFrame { if !d.syncStream.inFrame {
d.frame.history.reset() d.frame.history.reset()
d.current.err = d.frame.reset(&d.syncStream.br) d.current.err = d.frame.reset(&d.syncStream.br)
if d.current.err == nil {
d.current.err = d.setDict(d.frame)
}
if d.current.err != nil { if d.current.err != nil {
return false return false
} }
if d.frame.DictionaryID != nil {
dict, ok := d.dicts[*d.frame.DictionaryID]
if !ok {
d.current.err = ErrUnknownDictionary
return false
} else {
d.frame.history.setDict(&dict)
}
}
if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize {
d.current.err = ErrDecoderSizeExceeded d.current.err = ErrDecoderSizeExceeded
return false return false
@ -864,13 +851,8 @@ decodeStream:
if debugDecoder && err != nil { if debugDecoder && err != nil {
println("Frame decoder returned", err) println("Frame decoder returned", err)
} }
if err == nil && frame.DictionaryID != nil { if err == nil {
dict, ok := d.dicts[*frame.DictionaryID] err = d.setDict(frame)
if !ok {
err = ErrUnknownDictionary
} else {
frame.history.setDict(&dict)
}
} }
if err == nil && d.frame.WindowSize > d.o.maxWindowSize { if err == nil && d.frame.WindowSize > d.o.maxWindowSize {
if debugDecoder { if debugDecoder {
@ -918,18 +900,22 @@ decodeStream:
println("next block returned error:", err) println("next block returned error:", err)
} }
dec.err = err dec.err = err
dec.checkCRC = nil dec.hasCRC = false
if dec.Last && frame.HasCheckSum && err == nil { if dec.Last && frame.HasCheckSum && err == nil {
crc, err := frame.rawInput.readSmall(4) crc, err := frame.rawInput.readSmall(4)
if err != nil { if len(crc) < 4 {
if err == nil {
err = io.ErrUnexpectedEOF
}
println("CRC missing?", err) println("CRC missing?", err)
dec.err = err dec.err = err
} } else {
var tmp [4]byte dec.checkCRC = binary.LittleEndian.Uint32(crc)
copy(tmp[:], crc) dec.hasCRC = true
dec.checkCRC = tmp[:] if debugDecoder {
if debugDecoder { printf("found crc to check: %08x\n", dec.checkCRC)
println("found crc to check:", dec.checkCRC) }
} }
} }
err = dec.err err = dec.err
@ -948,3 +934,20 @@ decodeStream:
hist.reset() hist.reset()
d.frame.history.b = frameHistCache d.frame.history.b = frameHistCache
} }
func (d *Decoder) setDict(frame *frameDec) (err error) {
dict, ok := d.dicts[frame.DictionaryID]
if ok {
if debugDecoder {
println("setting dict", frame.DictionaryID)
}
frame.history.setDict(dict)
} else if frame.DictionaryID != 0 {
// A zero or missing dictionary id is ambiguous:
// either dictionary zero, or no dictionary. In particular,
// zstd --patch-from uses this id for the source file,
// so only return an error if the dictionary id is not zero.
err = ErrUnknownDictionary
}
return err
}

View File

@ -6,6 +6,8 @@ package zstd
import ( import (
"errors" "errors"
"fmt"
"math/bits"
"runtime" "runtime"
) )
@ -18,7 +20,7 @@ type decoderOptions struct {
concurrent int concurrent int
maxDecodedSize uint64 maxDecodedSize uint64
maxWindowSize uint64 maxWindowSize uint64
dicts []dict dicts []*dict
ignoreChecksum bool ignoreChecksum bool
limitToCap bool limitToCap bool
decodeBufsBelow int decodeBufsBelow int
@ -85,7 +87,13 @@ func WithDecoderMaxMemory(n uint64) DOption {
} }
// WithDecoderDicts allows to register one or more dictionaries for the decoder. // WithDecoderDicts allows to register one or more dictionaries for the decoder.
// If several dictionaries with the same ID is provided the last one will be used. //
// Each slice in dict must be in the [dictionary format] produced by
// "zstd --train" from the Zstandard reference implementation.
//
// If several dictionaries with the same ID are provided, the last one will be used.
//
// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
func WithDecoderDicts(dicts ...[]byte) DOption { func WithDecoderDicts(dicts ...[]byte) DOption {
return func(o *decoderOptions) error { return func(o *decoderOptions) error {
for _, b := range dicts { for _, b := range dicts {
@ -93,12 +101,24 @@ func WithDecoderDicts(dicts ...[]byte) DOption {
if err != nil { if err != nil {
return err return err
} }
o.dicts = append(o.dicts, *d) o.dicts = append(o.dicts, d)
} }
return nil return nil
} }
} }
// WithEncoderDictRaw registers a dictionary that may be used by the decoder.
// The slice content can be arbitrary data.
func WithDecoderDictRaw(id uint32, content []byte) DOption {
return func(o *decoderOptions) error {
if bits.UintSize > 32 && uint(len(content)) > dictMaxLength {
return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content))
}
o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}})
return nil
}
}
// WithDecoderMaxWindow allows to set a maximum window size for decodes. // WithDecoderMaxWindow allows to set a maximum window size for decodes.
// This allows rejecting packets that will cause big memory usage. // This allows rejecting packets that will cause big memory usage.
// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. // The Decoder will likely allocate more memory based on the WithDecoderLowmem setting.

View File

@ -1,7 +1,6 @@
package zstd package zstd
import ( import (
"bytes"
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt" "fmt"
@ -20,7 +19,10 @@ type dict struct {
content []byte content []byte
} }
var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec} const dictMagic = "\x37\xa4\x30\xec"
// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB.
const dictMaxLength = 1 << 31
// ID returns the dictionary id or 0 if d is nil. // ID returns the dictionary id or 0 if d is nil.
func (d *dict) ID() uint32 { func (d *dict) ID() uint32 {
@ -50,7 +52,7 @@ func loadDict(b []byte) (*dict, error) {
ofDec: sequenceDec{fse: &fseDecoder{}}, ofDec: sequenceDec{fse: &fseDecoder{}},
mlDec: sequenceDec{fse: &fseDecoder{}}, mlDec: sequenceDec{fse: &fseDecoder{}},
} }
if !bytes.Equal(b[:4], dictMagic[:]) { if string(b[:4]) != dictMagic {
return nil, ErrMagicMismatch return nil, ErrMagicMismatch
} }
d.id = binary.LittleEndian.Uint32(b[4:8]) d.id = binary.LittleEndian.Uint32(b[4:8])

View File

@ -16,6 +16,7 @@ type fastBase struct {
cur int32 cur int32
// maximum offset. Should be at least 2x block size. // maximum offset. Should be at least 2x block size.
maxMatchOff int32 maxMatchOff int32
bufferReset int32
hist []byte hist []byte
crc *xxhash.Digest crc *xxhash.Digest
tmp [8]byte tmp [8]byte
@ -56,8 +57,8 @@ func (e *fastBase) Block() *blockEnc {
} }
func (e *fastBase) addBlock(src []byte) int32 { func (e *fastBase) addBlock(src []byte) int32 {
if debugAsserts && e.cur > bufferReset { if debugAsserts && e.cur > e.bufferReset {
panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset)) panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset))
} }
// check if we have space already // check if we have space already
if len(e.hist)+len(src) > cap(e.hist) { if len(e.hist)+len(src) > cap(e.hist) {
@ -126,24 +127,7 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize))
} }
} }
a := src[s:] return int32(matchLen(src[s:], src[t:]))
b := src[t:]
b = b[:len(a)]
end := int32((len(a) >> 3) << 3)
for i := int32(0); i < end; i += 8 {
if diff := load6432(a, i) ^ load6432(b, i); diff != 0 {
return i + int32(bits.TrailingZeros64(diff)>>3)
}
}
a = a[end:]
b = b[end:]
for i := range a {
if a[i] != b[i] {
return int32(i) + end
}
}
return int32(len(a)) + end
} }
// Reset the encoding table. // Reset the encoding table.
@ -171,7 +155,7 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) {
// We offset current position so everything will be out of reach. // We offset current position so everything will be out of reach.
// If above reset line, history will be purged. // If above reset line, history will be purged.
if e.cur < bufferReset { if e.cur < e.bufferReset {
e.cur += e.maxMatchOff + int32(len(e.hist)) e.cur += e.maxMatchOff + int32(len(e.hist))
} }
e.hist = e.hist[:0] e.hist = e.hist[:0]

View File

@ -85,14 +85,10 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
) )
// Protect against e.cur wraparound. // Protect against e.cur wraparound.
for e.cur >= bufferReset { for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 { if len(e.hist) == 0 {
for i := range e.table[:] { e.table = [bestShortTableSize]prevEntry{}
e.table[i] = prevEntry{} e.longTable = [bestLongTableSize]prevEntry{}
}
for i := range e.longTable[:] {
e.longTable[i] = prevEntry{}
}
e.cur = e.maxMatchOff e.cur = e.maxMatchOff
break break
} }
@ -193,8 +189,8 @@ encodeLoop:
panic("offset0 was 0") panic("offset0 was 0")
} }
bestOf := func(a, b match) match { bestOf := func(a, b *match) *match {
if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 { if a.est-b.est+(a.s-b.s)*bitsPerByte>>10 < 0 {
return a return a
} }
return b return b
@ -220,22 +216,26 @@ encodeLoop:
return m return m
} }
best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) m1 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)
best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) m2 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)
best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)) m3 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)
m4 := matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)
best := bestOf(bestOf(&m1, &m2), bestOf(&m3, &m4))
if canRepeat && best.length < goodEnough { if canRepeat && best.length < goodEnough {
cv32 := uint32(cv >> 8) cv32 := uint32(cv >> 8)
spp := s + 1 spp := s + 1
best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) m1 := matchAt(spp-offset1, spp, cv32, 1)
best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) m2 := matchAt(spp-offset2, spp, cv32, 2)
best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) m3 := matchAt(spp-offset3, spp, cv32, 3)
best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3))
if best.length > 0 { if best.length > 0 {
cv32 = uint32(cv >> 24) cv32 = uint32(cv >> 24)
spp += 2 spp += 2
best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) m1 := matchAt(spp-offset1, spp, cv32, 1)
best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) m2 := matchAt(spp-offset2, spp, cv32, 2)
best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) m3 := matchAt(spp-offset3, spp, cv32, 3)
best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3))
} }
} }
// Load next and check... // Load next and check...
@ -262,26 +262,33 @@ encodeLoop:
candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
// Short at s+1 // Short at s+1
best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) m1 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)
// Long at s+1, s+2 // Long at s+1, s+2
best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)) m2 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)
best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) m3 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)
best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)) m4 := matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)
best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)) m5 := matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)
best = bestOf(bestOf(bestOf(best, &m1), &m2), bestOf(bestOf(&m3, &m4), &m5))
if false { if false {
// Short at s+3. // Short at s+3.
// Too often worse... // Too often worse...
best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)) m := matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)
best = bestOf(best, &m)
} }
// See if we can find a better match by checking where the current best ends. // See if we can find a better match by checking where the current best ends.
// Use that offset to see if we can find a better full match. // Use that offset to see if we can find a better full match.
if sAt := best.s + best.length; sAt < sLimit { if sAt := best.s + best.length; sAt < sLimit {
nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
candidateEnd := e.longTable[nextHashL] candidateEnd := e.longTable[nextHashL]
if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 { // Start check at a fixed offset to allow for a few mismatches.
bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1)) // For this compression level 2 yields the best results.
if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 { const skipBeginning = 2
bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1)) if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 {
m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
bestEnd := bestOf(best, &m)
if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 {
m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
bestEnd = bestOf(bestEnd, &m)
} }
best = bestEnd best = bestEnd
} }

View File

@ -62,14 +62,10 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) {
) )
// Protect against e.cur wraparound. // Protect against e.cur wraparound.
for e.cur >= bufferReset { for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 { if len(e.hist) == 0 {
for i := range e.table[:] { e.table = [betterShortTableSize]tableEntry{}
e.table[i] = tableEntry{} e.longTable = [betterLongTableSize]prevEntry{}
}
for i := range e.longTable[:] {
e.longTable[i] = prevEntry{}
}
e.cur = e.maxMatchOff e.cur = e.maxMatchOff
break break
} }
@ -587,7 +583,7 @@ func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) {
) )
// Protect against e.cur wraparound. // Protect against e.cur wraparound.
for e.cur >= bufferReset { for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 { if len(e.hist) == 0 {
for i := range e.table[:] { for i := range e.table[:] {
e.table[i] = tableEntry{} e.table[i] = tableEntry{}

View File

@ -44,14 +44,10 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) {
) )
// Protect against e.cur wraparound. // Protect against e.cur wraparound.
for e.cur >= bufferReset { for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 { if len(e.hist) == 0 {
for i := range e.table[:] { e.table = [dFastShortTableSize]tableEntry{}
e.table[i] = tableEntry{} e.longTable = [dFastLongTableSize]tableEntry{}
}
for i := range e.longTable[:] {
e.longTable[i] = tableEntry{}
}
e.cur = e.maxMatchOff e.cur = e.maxMatchOff
break break
} }
@ -388,7 +384,7 @@ func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
) )
// Protect against e.cur wraparound. // Protect against e.cur wraparound.
if e.cur >= bufferReset { if e.cur >= e.bufferReset {
for i := range e.table[:] { for i := range e.table[:] {
e.table[i] = tableEntry{} e.table[i] = tableEntry{}
} }
@ -685,7 +681,7 @@ encodeLoop:
} }
// We do not store history, so we must offset e.cur to avoid false matches for next user. // We do not store history, so we must offset e.cur to avoid false matches for next user.
if e.cur < bufferReset { if e.cur < e.bufferReset {
e.cur += int32(len(src)) e.cur += int32(len(src))
} }
} }
@ -700,7 +696,7 @@ func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) {
) )
// Protect against e.cur wraparound. // Protect against e.cur wraparound.
for e.cur >= bufferReset { for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 { if len(e.hist) == 0 {
for i := range e.table[:] { for i := range e.table[:] {
e.table[i] = tableEntry{} e.table[i] = tableEntry{}

View File

@ -43,7 +43,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
) )
// Protect against e.cur wraparound. // Protect against e.cur wraparound.
for e.cur >= bufferReset { for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 { if len(e.hist) == 0 {
for i := range e.table[:] { for i := range e.table[:] {
e.table[i] = tableEntry{} e.table[i] = tableEntry{}
@ -310,7 +310,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
} }
// Protect against e.cur wraparound. // Protect against e.cur wraparound.
if e.cur >= bufferReset { if e.cur >= e.bufferReset {
for i := range e.table[:] { for i := range e.table[:] {
e.table[i] = tableEntry{} e.table[i] = tableEntry{}
} }
@ -538,7 +538,7 @@ encodeLoop:
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
} }
// We do not store history, so we must offset e.cur to avoid false matches for next user. // We do not store history, so we must offset e.cur to avoid false matches for next user.
if e.cur < bufferReset { if e.cur < e.bufferReset {
e.cur += int32(len(src)) e.cur += int32(len(src))
} }
} }
@ -555,11 +555,9 @@ func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) {
return return
} }
// Protect against e.cur wraparound. // Protect against e.cur wraparound.
for e.cur >= bufferReset { for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 { if len(e.hist) == 0 {
for i := range e.table[:] { e.table = [tableSize]tableEntry{}
e.table[i] = tableEntry{}
}
e.cur = e.maxMatchOff e.cur = e.maxMatchOff
break break
} }

View File

@ -8,6 +8,7 @@ import (
"crypto/rand" "crypto/rand"
"fmt" "fmt"
"io" "io"
"math"
rdebug "runtime/debug" rdebug "runtime/debug"
"sync" "sync"
@ -639,3 +640,37 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
} }
return dst return dst
} }
// MaxEncodedSize returns the expected maximum
// size of an encoded block or stream.
func (e *Encoder) MaxEncodedSize(size int) int {
frameHeader := 4 + 2 // magic + frame header & window descriptor
if e.o.dict != nil {
frameHeader += 4
}
// Frame content size:
if size < 256 {
frameHeader++
} else if size < 65536+256 {
frameHeader += 2
} else if size < math.MaxInt32 {
frameHeader += 4
} else {
frameHeader += 8
}
// Final crc
if e.o.crc {
frameHeader += 4
}
// Max overhead is 3 bytes/block.
// There cannot be 0 blocks.
blocks := (size + e.o.blockSize) / e.o.blockSize
// Combine, add padding.
maxSz := frameHeader + 3*blocks + size
if e.o.pad > 1 {
maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad))
}
return maxSz
}

View File

@ -3,6 +3,8 @@ package zstd
import ( import (
"errors" "errors"
"fmt" "fmt"
"math"
"math/bits"
"runtime" "runtime"
"strings" "strings"
) )
@ -47,22 +49,22 @@ func (o encoderOptions) encoder() encoder {
switch o.level { switch o.level {
case SpeedFastest: case SpeedFastest:
if o.dict != nil { if o.dict != nil {
return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
} }
return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
case SpeedDefault: case SpeedDefault:
if o.dict != nil { if o.dict != nil {
return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}} return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}}
} }
return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
case SpeedBetterCompression: case SpeedBetterCompression:
if o.dict != nil { if o.dict != nil {
return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
} }
return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
case SpeedBestCompression: case SpeedBestCompression:
return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
} }
panic("unknown compression level") panic("unknown compression level")
} }
@ -304,7 +306,13 @@ func WithLowerEncoderMem(b bool) EOption {
} }
// WithEncoderDict allows to register a dictionary that will be used for the encode. // WithEncoderDict allows to register a dictionary that will be used for the encode.
//
// The slice dict must be in the [dictionary format] produced by
// "zstd --train" from the Zstandard reference implementation.
//
// The encoder *may* choose to use no dictionary instead for certain payloads. // The encoder *may* choose to use no dictionary instead for certain payloads.
//
// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
func WithEncoderDict(dict []byte) EOption { func WithEncoderDict(dict []byte) EOption {
return func(o *encoderOptions) error { return func(o *encoderOptions) error {
d, err := loadDict(dict) d, err := loadDict(dict)
@ -315,3 +323,17 @@ func WithEncoderDict(dict []byte) EOption {
return nil return nil
} }
} }
// WithEncoderDictRaw registers a dictionary that may be used by the encoder.
//
// The slice content may contain arbitrary data. It will be used as an initial
// history.
func WithEncoderDictRaw(id uint32, content []byte) EOption {
return func(o *encoderOptions) error {
if bits.UintSize > 32 && uint(len(content)) > dictMaxLength {
return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content))
}
o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}
return nil
}
}

View File

@ -5,7 +5,7 @@
package zstd package zstd
import ( import (
"bytes" "encoding/binary"
"encoding/hex" "encoding/hex"
"errors" "errors"
"io" "io"
@ -29,7 +29,7 @@ type frameDec struct {
FrameContentSize uint64 FrameContentSize uint64
DictionaryID *uint32 DictionaryID uint32
HasCheckSum bool HasCheckSum bool
SingleSegment bool SingleSegment bool
} }
@ -43,9 +43,9 @@ const (
MaxWindowSize = 1 << 29 MaxWindowSize = 1 << 29
) )
var ( const (
frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} frameMagic = "\x28\xb5\x2f\xfd"
skippableFrameMagic = []byte{0x2a, 0x4d, 0x18} skippableFrameMagic = "\x2a\x4d\x18"
) )
func newFrameDec(o decoderOptions) *frameDec { func newFrameDec(o decoderOptions) *frameDec {
@ -89,9 +89,9 @@ func (d *frameDec) reset(br byteBuffer) error {
copy(signature[1:], b) copy(signature[1:], b)
} }
if !bytes.Equal(signature[1:4], skippableFrameMagic) || signature[0]&0xf0 != 0x50 { if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 {
if debugDecoder { if debugDecoder {
println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString(skippableFrameMagic)) println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic)))
} }
// Break if not skippable frame. // Break if not skippable frame.
break break
@ -114,9 +114,9 @@ func (d *frameDec) reset(br byteBuffer) error {
return err return err
} }
} }
if !bytes.Equal(signature[:], frameMagic) { if string(signature[:]) != frameMagic {
if debugDecoder { if debugDecoder {
println("Got magic numbers: ", signature, "want:", frameMagic) println("Got magic numbers: ", signature, "want:", []byte(frameMagic))
} }
return ErrMagicMismatch return ErrMagicMismatch
} }
@ -155,7 +155,7 @@ func (d *frameDec) reset(br byteBuffer) error {
// Read Dictionary_ID // Read Dictionary_ID
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id
d.DictionaryID = nil d.DictionaryID = 0
if size := fhd & 3; size != 0 { if size := fhd & 3; size != 0 {
if size == 3 { if size == 3 {
size = 4 size = 4
@ -167,7 +167,7 @@ func (d *frameDec) reset(br byteBuffer) error {
return err return err
} }
var id uint32 var id uint32
switch size { switch len(b) {
case 1: case 1:
id = uint32(b[0]) id = uint32(b[0])
case 2: case 2:
@ -178,11 +178,7 @@ func (d *frameDec) reset(br byteBuffer) error {
if debugDecoder { if debugDecoder {
println("Dict size", size, "ID:", id) println("Dict size", size, "ID:", id)
} }
if id > 0 { d.DictionaryID = id
// ID 0 means "sorry, no dictionary anyway".
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
d.DictionaryID = &id
}
} }
// Read Frame_Content_Size // Read Frame_Content_Size
@ -204,7 +200,7 @@ func (d *frameDec) reset(br byteBuffer) error {
println("Reading Frame content", err) println("Reading Frame content", err)
return err return err
} }
switch fcsSize { switch len(b) {
case 1: case 1:
d.FrameContentSize = uint64(b[0]) d.FrameContentSize = uint64(b[0])
case 2: case 2:
@ -305,7 +301,7 @@ func (d *frameDec) checkCRC() error {
} }
// We can overwrite upper tmp now // We can overwrite upper tmp now
want, err := d.rawInput.readSmall(4) buf, err := d.rawInput.readSmall(4)
if err != nil { if err != nil {
println("CRC missing?", err) println("CRC missing?", err)
return err return err
@ -315,22 +311,17 @@ func (d *frameDec) checkCRC() error {
return nil return nil
} }
var tmp [4]byte want := binary.LittleEndian.Uint32(buf[:4])
got := d.crc.Sum64() got := uint32(d.crc.Sum64())
// Flip to match file order.
tmp[0] = byte(got >> 0)
tmp[1] = byte(got >> 8)
tmp[2] = byte(got >> 16)
tmp[3] = byte(got >> 24)
if !bytes.Equal(tmp[:], want) { if got != want {
if debugDecoder { if debugDecoder {
println("CRC Check Failed:", tmp[:], "!=", want) printf("CRC check failed: got %08x, want %08x\n", got, want)
} }
return ErrCRCMismatch return ErrCRCMismatch
} }
if debugDecoder { if debugDecoder {
println("CRC ok", tmp[:]) printf("CRC ok %08x\n", got)
} }
return nil return nil
} }

View File

@ -2,12 +2,7 @@
VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package.
xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash)
xxhash is a Go implementation of the 64-bit
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
high-quality hashing algorithm that is much faster than anything in the Go high-quality hashing algorithm that is much faster than anything in the Go
standard library. standard library.
@ -28,31 +23,49 @@ func (*Digest) WriteString(string) (int, error)
func (*Digest) Sum64() uint64 func (*Digest) Sum64() uint64
``` ```
This implementation provides a fast pure-Go implementation and an even faster The package is written with optimized pure Go and also contains even faster
assembly implementation for amd64. assembly implementations for amd64 and arm64. If desired, the `purego` build tag
opts into using the Go code even on those architectures.
[xxHash]: http://cyan4973.github.io/xxHash/
## Compatibility
This package is in a module and the latest code is in version 2 of the module.
You need a version of Go with at least "minimal module compatibility" to use
github.com/cespare/xxhash/v2:
* 1.9.7+ for Go 1.9
* 1.10.3+ for Go 1.10
* Go 1.11 or later
I recommend using the latest release of Go.
## Benchmarks ## Benchmarks
Here are some quick benchmarks comparing the pure-Go and assembly Here are some quick benchmarks comparing the pure-Go and assembly
implementations of Sum64. implementations of Sum64.
| input size | purego | asm | | input size | purego | asm |
| --- | --- | --- | | ---------- | --------- | --------- |
| 5 B | 979.66 MB/s | 1291.17 MB/s | | 4 B | 1.3 GB/s | 1.2 GB/s |
| 100 B | 7475.26 MB/s | 7973.40 MB/s | | 16 B | 2.9 GB/s | 3.5 GB/s |
| 4 KB | 17573.46 MB/s | 17602.65 MB/s | | 100 B | 6.9 GB/s | 8.1 GB/s |
| 10 MB | 17131.46 MB/s | 17142.16 MB/s | | 4 KB | 11.7 GB/s | 16.7 GB/s |
| 10 MB | 12.0 GB/s | 17.3 GB/s |
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
the following commands under Go 1.11.2: CPU using the following commands under Go 1.19.2:
``` ```
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
$ go test -benchtime 10s -bench '/xxhash,direct,bytes' benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
``` ```
## Projects using this package ## Projects using this package
- [InfluxDB](https://github.com/influxdata/influxdb) - [InfluxDB](https://github.com/influxdata/influxdb)
- [Prometheus](https://github.com/prometheus/prometheus) - [Prometheus](https://github.com/prometheus/prometheus)
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
- [FreeCache](https://github.com/coocood/freecache) - [FreeCache](https://github.com/coocood/freecache)
- [FastCache](https://github.com/VictoriaMetrics/fastcache)

View File

@ -18,19 +18,11 @@ const (
prime5 uint64 = 2870177450012600261 prime5 uint64 = 2870177450012600261
) )
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where // Store the primes in an array as well.
// possible in the Go code is worth a small (but measurable) performance boost //
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for // The consts are used when possible in Go code to avoid MOVs but we need a
// convenience in the Go code in a few places where we need to intentionally // contiguous array of the assembly code.
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
// result overflows a uint64).
var (
prime1v = prime1
prime2v = prime2
prime3v = prime3
prime4v = prime4
prime5v = prime5
)
// Digest implements hash.Hash64. // Digest implements hash.Hash64.
type Digest struct { type Digest struct {
@ -52,10 +44,10 @@ func New() *Digest {
// Reset clears the Digest's state so that it can be reused. // Reset clears the Digest's state so that it can be reused.
func (d *Digest) Reset() { func (d *Digest) Reset() {
d.v1 = prime1v + prime2 d.v1 = primes[0] + prime2
d.v2 = prime2 d.v2 = prime2
d.v3 = 0 d.v3 = 0
d.v4 = -prime1v d.v4 = -primes[0]
d.total = 0 d.total = 0
d.n = 0 d.n = 0
} }
@ -71,21 +63,23 @@ func (d *Digest) Write(b []byte) (n int, err error) {
n = len(b) n = len(b)
d.total += uint64(n) d.total += uint64(n)
memleft := d.mem[d.n&(len(d.mem)-1):]
if d.n+n < 32 { if d.n+n < 32 {
// This new data doesn't even fill the current block. // This new data doesn't even fill the current block.
copy(d.mem[d.n:], b) copy(memleft, b)
d.n += n d.n += n
return return
} }
if d.n > 0 { if d.n > 0 {
// Finish off the partial block. // Finish off the partial block.
copy(d.mem[d.n:], b) c := copy(memleft, b)
d.v1 = round(d.v1, u64(d.mem[0:8])) d.v1 = round(d.v1, u64(d.mem[0:8]))
d.v2 = round(d.v2, u64(d.mem[8:16])) d.v2 = round(d.v2, u64(d.mem[8:16]))
d.v3 = round(d.v3, u64(d.mem[16:24])) d.v3 = round(d.v3, u64(d.mem[16:24]))
d.v4 = round(d.v4, u64(d.mem[24:32])) d.v4 = round(d.v4, u64(d.mem[24:32]))
b = b[32-d.n:] b = b[c:]
d.n = 0 d.n = 0
} }
@ -135,21 +129,20 @@ func (d *Digest) Sum64() uint64 {
h += d.total h += d.total
i, end := 0, d.n b := d.mem[:d.n&(len(d.mem)-1)]
for ; i+8 <= end; i += 8 { for ; len(b) >= 8; b = b[8:] {
k1 := round(0, u64(d.mem[i:i+8])) k1 := round(0, u64(b[:8]))
h ^= k1 h ^= k1
h = rol27(h)*prime1 + prime4 h = rol27(h)*prime1 + prime4
} }
if i+4 <= end { if len(b) >= 4 {
h ^= uint64(u32(d.mem[i:i+4])) * prime1 h ^= uint64(u32(b[:4])) * prime1
h = rol23(h)*prime2 + prime3 h = rol23(h)*prime2 + prime3
i += 4 b = b[4:]
} }
for i < end { for ; len(b) > 0; b = b[1:] {
h ^= uint64(d.mem[i]) * prime5 h ^= uint64(b[0]) * prime5
h = rol11(h) * prime1 h = rol11(h) * prime1
i++
} }
h ^= h >> 33 h ^= h >> 33

View File

@ -1,3 +1,4 @@
//go:build !appengine && gc && !purego && !noasm
// +build !appengine // +build !appengine
// +build gc // +build gc
// +build !purego // +build !purego
@ -5,212 +6,205 @@
#include "textflag.h" #include "textflag.h"
// Register allocation: // Registers:
// AX h #define h AX
// SI pointer to advance through b #define d AX
// DX n #define p SI // pointer to advance through b
// BX loop end #define n DX
// R8 v1, k1 #define end BX // loop end
// R9 v2 #define v1 R8
// R10 v3 #define v2 R9
// R11 v4 #define v3 R10
// R12 tmp #define v4 R11
// R13 prime1v #define x R12
// R14 prime2v #define prime1 R13
// DI prime4v #define prime2 R14
#define prime4 DI
// round reads from and advances the buffer pointer in SI. #define round(acc, x) \
// It assumes that R13 has prime1v and R14 has prime2v. IMULQ prime2, x \
#define round(r) \ ADDQ x, acc \
MOVQ (SI), R12 \ ROLQ $31, acc \
ADDQ $8, SI \ IMULQ prime1, acc
IMULQ R14, R12 \
ADDQ R12, r \
ROLQ $31, r \
IMULQ R13, r
// mergeRound applies a merge round on the two registers acc and val. // round0 performs the operation x = round(0, x).
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. #define round0(x) \
#define mergeRound(acc, val) \ IMULQ prime2, x \
IMULQ R14, val \ ROLQ $31, x \
ROLQ $31, val \ IMULQ prime1, x
IMULQ R13, val \
XORQ val, acc \ // mergeRound applies a merge round on the two registers acc and x.
IMULQ R13, acc \ // It assumes that prime1, prime2, and prime4 have been loaded.
ADDQ DI, acc #define mergeRound(acc, x) \
round0(x) \
XORQ x, acc \
IMULQ prime1, acc \
ADDQ prime4, acc
// blockLoop processes as many 32-byte blocks as possible,
// updating v1, v2, v3, and v4. It assumes that there is at least one block
// to process.
#define blockLoop() \
loop: \
MOVQ +0(p), x \
round(v1, x) \
MOVQ +8(p), x \
round(v2, x) \
MOVQ +16(p), x \
round(v3, x) \
MOVQ +24(p), x \
round(v4, x) \
ADDQ $32, p \
CMPQ p, end \
JLE loop
// func Sum64(b []byte) uint64 // func Sum64(b []byte) uint64
TEXT ·Sum64(SB), NOSPLIT, $0-32 TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
// Load fixed primes. // Load fixed primes.
MOVQ ·prime1v(SB), R13 MOVQ ·primes+0(SB), prime1
MOVQ ·prime2v(SB), R14 MOVQ ·primes+8(SB), prime2
MOVQ ·prime4v(SB), DI MOVQ ·primes+24(SB), prime4
// Load slice. // Load slice.
MOVQ b_base+0(FP), SI MOVQ b_base+0(FP), p
MOVQ b_len+8(FP), DX MOVQ b_len+8(FP), n
LEAQ (SI)(DX*1), BX LEAQ (p)(n*1), end
// The first loop limit will be len(b)-32. // The first loop limit will be len(b)-32.
SUBQ $32, BX SUBQ $32, end
// Check whether we have at least one block. // Check whether we have at least one block.
CMPQ DX, $32 CMPQ n, $32
JLT noBlocks JLT noBlocks
// Set up initial state (v1, v2, v3, v4). // Set up initial state (v1, v2, v3, v4).
MOVQ R13, R8 MOVQ prime1, v1
ADDQ R14, R8 ADDQ prime2, v1
MOVQ R14, R9 MOVQ prime2, v2
XORQ R10, R10 XORQ v3, v3
XORQ R11, R11 XORQ v4, v4
SUBQ R13, R11 SUBQ prime1, v4
// Loop until SI > BX. blockLoop()
blockLoop:
round(R8)
round(R9)
round(R10)
round(R11)
CMPQ SI, BX MOVQ v1, h
JLE blockLoop ROLQ $1, h
MOVQ v2, x
ROLQ $7, x
ADDQ x, h
MOVQ v3, x
ROLQ $12, x
ADDQ x, h
MOVQ v4, x
ROLQ $18, x
ADDQ x, h
MOVQ R8, AX mergeRound(h, v1)
ROLQ $1, AX mergeRound(h, v2)
MOVQ R9, R12 mergeRound(h, v3)
ROLQ $7, R12 mergeRound(h, v4)
ADDQ R12, AX
MOVQ R10, R12
ROLQ $12, R12
ADDQ R12, AX
MOVQ R11, R12
ROLQ $18, R12
ADDQ R12, AX
mergeRound(AX, R8)
mergeRound(AX, R9)
mergeRound(AX, R10)
mergeRound(AX, R11)
JMP afterBlocks JMP afterBlocks
noBlocks: noBlocks:
MOVQ ·prime5v(SB), AX MOVQ ·primes+32(SB), h
afterBlocks: afterBlocks:
ADDQ DX, AX ADDQ n, h
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. ADDQ $24, end
ADDQ $24, BX CMPQ p, end
JG try4
CMPQ SI, BX loop8:
JG fourByte MOVQ (p), x
ADDQ $8, p
round0(x)
XORQ x, h
ROLQ $27, h
IMULQ prime1, h
ADDQ prime4, h
wordLoop: CMPQ p, end
// Calculate k1. JLE loop8
MOVQ (SI), R8
ADDQ $8, SI
IMULQ R14, R8
ROLQ $31, R8
IMULQ R13, R8
XORQ R8, AX try4:
ROLQ $27, AX ADDQ $4, end
IMULQ R13, AX CMPQ p, end
ADDQ DI, AX JG try1
CMPQ SI, BX MOVL (p), x
JLE wordLoop ADDQ $4, p
IMULQ prime1, x
XORQ x, h
fourByte: ROLQ $23, h
ADDQ $4, BX IMULQ prime2, h
CMPQ SI, BX ADDQ ·primes+16(SB), h
JG singles
MOVL (SI), R8 try1:
ADDQ $4, SI ADDQ $4, end
IMULQ R13, R8 CMPQ p, end
XORQ R8, AX
ROLQ $23, AX
IMULQ R14, AX
ADDQ ·prime3v(SB), AX
singles:
ADDQ $4, BX
CMPQ SI, BX
JGE finalize JGE finalize
singlesLoop: loop1:
MOVBQZX (SI), R12 MOVBQZX (p), x
ADDQ $1, SI ADDQ $1, p
IMULQ ·prime5v(SB), R12 IMULQ ·primes+32(SB), x
XORQ R12, AX XORQ x, h
ROLQ $11, h
IMULQ prime1, h
ROLQ $11, AX CMPQ p, end
IMULQ R13, AX JL loop1
CMPQ SI, BX
JL singlesLoop
finalize: finalize:
MOVQ AX, R12 MOVQ h, x
SHRQ $33, R12 SHRQ $33, x
XORQ R12, AX XORQ x, h
IMULQ R14, AX IMULQ prime2, h
MOVQ AX, R12 MOVQ h, x
SHRQ $29, R12 SHRQ $29, x
XORQ R12, AX XORQ x, h
IMULQ ·prime3v(SB), AX IMULQ ·primes+16(SB), h
MOVQ AX, R12 MOVQ h, x
SHRQ $32, R12 SHRQ $32, x
XORQ R12, AX XORQ x, h
MOVQ AX, ret+24(FP) MOVQ h, ret+24(FP)
RET RET
// writeBlocks uses the same registers as above except that it uses AX to store
// the d pointer.
// func writeBlocks(d *Digest, b []byte) int // func writeBlocks(d *Digest, b []byte) int
TEXT ·writeBlocks(SB), NOSPLIT, $0-40 TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
// Load fixed primes needed for round. // Load fixed primes needed for round.
MOVQ ·prime1v(SB), R13 MOVQ ·primes+0(SB), prime1
MOVQ ·prime2v(SB), R14 MOVQ ·primes+8(SB), prime2
// Load slice. // Load slice.
MOVQ b_base+8(FP), SI MOVQ b_base+8(FP), p
MOVQ b_len+16(FP), DX MOVQ b_len+16(FP), n
LEAQ (SI)(DX*1), BX LEAQ (p)(n*1), end
SUBQ $32, BX SUBQ $32, end
// Load vN from d. // Load vN from d.
MOVQ d+0(FP), AX MOVQ s+0(FP), d
MOVQ 0(AX), R8 // v1 MOVQ 0(d), v1
MOVQ 8(AX), R9 // v2 MOVQ 8(d), v2
MOVQ 16(AX), R10 // v3 MOVQ 16(d), v3
MOVQ 24(AX), R11 // v4 MOVQ 24(d), v4
// We don't need to check the loop condition here; this function is // We don't need to check the loop condition here; this function is
// always called with at least one block of data to process. // always called with at least one block of data to process.
blockLoop: blockLoop()
round(R8)
round(R9)
round(R10)
round(R11)
CMPQ SI, BX
JLE blockLoop
// Copy vN back to d. // Copy vN back to d.
MOVQ R8, 0(AX) MOVQ v1, 0(d)
MOVQ R9, 8(AX) MOVQ v2, 8(d)
MOVQ R10, 16(AX) MOVQ v3, 16(d)
MOVQ R11, 24(AX) MOVQ v4, 24(d)
// The number of bytes written is SI minus the old base pointer. // The number of bytes written is p minus the old base pointer.
SUBQ b_base+8(FP), SI SUBQ b_base+8(FP), p
MOVQ SI, ret+32(FP) MOVQ p, ret+32(FP)
RET RET

View File

@ -1,13 +1,17 @@
// +build gc,!purego,!noasm //go:build !appengine && gc && !purego && !noasm
// +build !appengine
// +build gc
// +build !purego
// +build !noasm
#include "textflag.h" #include "textflag.h"
// Register allocation. // Registers:
#define digest R1 #define digest R1
#define h R2 // Return value. #define h R2 // return value
#define p R3 // Input pointer. #define p R3 // input pointer
#define len R4 #define n R4 // input length
#define nblocks R5 // len / 32. #define nblocks R5 // n / 32
#define prime1 R7 #define prime1 R7
#define prime2 R8 #define prime2 R8
#define prime3 R9 #define prime3 R9
@ -25,60 +29,52 @@
#define round(acc, x) \ #define round(acc, x) \
MADD prime2, acc, x, acc \ MADD prime2, acc, x, acc \
ROR $64-31, acc \ ROR $64-31, acc \
MUL prime1, acc \ MUL prime1, acc
// x = round(0, x). // round0 performs the operation x = round(0, x).
#define round0(x) \ #define round0(x) \
MUL prime2, x \ MUL prime2, x \
ROR $64-31, x \ ROR $64-31, x \
MUL prime1, x \ MUL prime1, x
#define mergeRound(x) \ #define mergeRound(acc, x) \
round0(x) \ round0(x) \
EOR x, h \ EOR x, acc \
MADD h, prime4, prime1, h \ MADD acc, prime4, prime1, acc
// Update v[1-4] with 32-byte blocks. Assumes len >= 32. // blockLoop processes as many 32-byte blocks as possible,
#define blocksLoop() \ // updating v1, v2, v3, and v4. It assumes that n >= 32.
LSR $5, len, nblocks \ #define blockLoop() \
PCALIGN $16 \ LSR $5, n, nblocks \
loop: \ PCALIGN $16 \
LDP.P 32(p), (x1, x2) \ loop: \
round(v1, x1) \ LDP.P 16(p), (x1, x2) \
LDP -16(p), (x3, x4) \ LDP.P 16(p), (x3, x4) \
round(v2, x2) \ round(v1, x1) \
SUB $1, nblocks \ round(v2, x2) \
round(v3, x3) \ round(v3, x3) \
round(v4, x4) \ round(v4, x4) \
CBNZ nblocks, loop \ SUB $1, nblocks \
CBNZ nblocks, loop
// The primes are repeated here to ensure that they're stored
// in a contiguous array, so we can load them with LDP.
DATA primes<> +0(SB)/8, $11400714785074694791
DATA primes<> +8(SB)/8, $14029467366897019727
DATA primes<>+16(SB)/8, $1609587929392839161
DATA primes<>+24(SB)/8, $9650029242287828579
DATA primes<>+32(SB)/8, $2870177450012600261
GLOBL primes<>(SB), NOPTR+RODATA, $40
// func Sum64(b []byte) uint64 // func Sum64(b []byte) uint64
TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32 TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
LDP b_base+0(FP), (p, len) LDP b_base+0(FP), (p, n)
LDP primes<> +0(SB), (prime1, prime2) LDP ·primes+0(SB), (prime1, prime2)
LDP primes<>+16(SB), (prime3, prime4) LDP ·primes+16(SB), (prime3, prime4)
MOVD primes<>+32(SB), prime5 MOVD ·primes+32(SB), prime5
CMP $32, len CMP $32, n
CSEL LO, prime5, ZR, h // if len < 32 { h = prime5 } else { h = 0 } CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
BLO afterLoop BLT afterLoop
ADD prime1, prime2, v1 ADD prime1, prime2, v1
MOVD prime2, v2 MOVD prime2, v2
MOVD $0, v3 MOVD $0, v3
NEG prime1, v4 NEG prime1, v4
blocksLoop() blockLoop()
ROR $64-1, v1, x1 ROR $64-1, v1, x1
ROR $64-7, v2, x2 ROR $64-7, v2, x2
@ -88,71 +84,75 @@ TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32
ADD x3, x4 ADD x3, x4
ADD x2, x4, h ADD x2, x4, h
mergeRound(v1) mergeRound(h, v1)
mergeRound(v2) mergeRound(h, v2)
mergeRound(v3) mergeRound(h, v3)
mergeRound(v4) mergeRound(h, v4)
afterLoop: afterLoop:
ADD len, h ADD n, h
TBZ $4, len, try8 TBZ $4, n, try8
LDP.P 16(p), (x1, x2) LDP.P 16(p), (x1, x2)
round0(x1) round0(x1)
// NOTE: here and below, sequencing the EOR after the ROR (using a
// rotated register) is worth a small but measurable speedup for small
// inputs.
ROR $64-27, h ROR $64-27, h
EOR x1 @> 64-27, h, h EOR x1 @> 64-27, h, h
MADD h, prime4, prime1, h MADD h, prime4, prime1, h
round0(x2) round0(x2)
ROR $64-27, h ROR $64-27, h
EOR x2 @> 64-27, h EOR x2 @> 64-27, h, h
MADD h, prime4, prime1, h MADD h, prime4, prime1, h
try8: try8:
TBZ $3, len, try4 TBZ $3, n, try4
MOVD.P 8(p), x1 MOVD.P 8(p), x1
round0(x1) round0(x1)
ROR $64-27, h ROR $64-27, h
EOR x1 @> 64-27, h EOR x1 @> 64-27, h, h
MADD h, prime4, prime1, h MADD h, prime4, prime1, h
try4: try4:
TBZ $2, len, try2 TBZ $2, n, try2
MOVWU.P 4(p), x2 MOVWU.P 4(p), x2
MUL prime1, x2 MUL prime1, x2
ROR $64-23, h ROR $64-23, h
EOR x2 @> 64-23, h EOR x2 @> 64-23, h, h
MADD h, prime3, prime2, h MADD h, prime3, prime2, h
try2: try2:
TBZ $1, len, try1 TBZ $1, n, try1
MOVHU.P 2(p), x3 MOVHU.P 2(p), x3
AND $255, x3, x1 AND $255, x3, x1
LSR $8, x3, x2 LSR $8, x3, x2
MUL prime5, x1 MUL prime5, x1
ROR $64-11, h ROR $64-11, h
EOR x1 @> 64-11, h EOR x1 @> 64-11, h, h
MUL prime1, h MUL prime1, h
MUL prime5, x2 MUL prime5, x2
ROR $64-11, h ROR $64-11, h
EOR x2 @> 64-11, h EOR x2 @> 64-11, h, h
MUL prime1, h MUL prime1, h
try1: try1:
TBZ $0, len, end TBZ $0, n, finalize
MOVBU (p), x4 MOVBU (p), x4
MUL prime5, x4 MUL prime5, x4
ROR $64-11, h ROR $64-11, h
EOR x4 @> 64-11, h EOR x4 @> 64-11, h, h
MUL prime1, h MUL prime1, h
end: finalize:
EOR h >> 33, h EOR h >> 33, h
MUL prime2, h MUL prime2, h
EOR h >> 29, h EOR h >> 29, h
@ -163,24 +163,22 @@ end:
RET RET
// func writeBlocks(d *Digest, b []byte) int // func writeBlocks(d *Digest, b []byte) int
// TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
// Assumes len(b) >= 32. LDP ·primes+0(SB), (prime1, prime2)
TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40
LDP primes<>(SB), (prime1, prime2)
// Load state. Assume v[1-4] are stored contiguously. // Load state. Assume v[1-4] are stored contiguously.
MOVD d+0(FP), digest MOVD d+0(FP), digest
LDP 0(digest), (v1, v2) LDP 0(digest), (v1, v2)
LDP 16(digest), (v3, v4) LDP 16(digest), (v3, v4)
LDP b_base+8(FP), (p, len) LDP b_base+8(FP), (p, n)
blocksLoop() blockLoop()
// Store updated state. // Store updated state.
STP (v1, v2), 0(digest) STP (v1, v2), 0(digest)
STP (v3, v4), 16(digest) STP (v3, v4), 16(digest)
BIC $31, len BIC $31, n
MOVD len, ret+32(FP) MOVD n, ret+32(FP)
RET RET

View File

@ -13,4 +13,4 @@ package xxhash
func Sum64(b []byte) uint64 func Sum64(b []byte) uint64
//go:noescape //go:noescape
func writeBlocks(d *Digest, b []byte) int func writeBlocks(s *Digest, b []byte) int

View File

@ -15,10 +15,10 @@ func Sum64(b []byte) uint64 {
var h uint64 var h uint64
if n >= 32 { if n >= 32 {
v1 := prime1v + prime2 v1 := primes[0] + prime2
v2 := prime2 v2 := prime2
v3 := uint64(0) v3 := uint64(0)
v4 := -prime1v v4 := -primes[0]
for len(b) >= 32 { for len(b) >= 32 {
v1 = round(v1, u64(b[0:8:len(b)])) v1 = round(v1, u64(b[0:8:len(b)]))
v2 = round(v2, u64(b[8:16:len(b)])) v2 = round(v2, u64(b[8:16:len(b)]))
@ -37,19 +37,18 @@ func Sum64(b []byte) uint64 {
h += uint64(n) h += uint64(n)
i, end := 0, len(b) for ; len(b) >= 8; b = b[8:] {
for ; i+8 <= end; i += 8 { k1 := round(0, u64(b[:8]))
k1 := round(0, u64(b[i:i+8:len(b)]))
h ^= k1 h ^= k1
h = rol27(h)*prime1 + prime4 h = rol27(h)*prime1 + prime4
} }
if i+4 <= end { if len(b) >= 4 {
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 h ^= uint64(u32(b[:4])) * prime1
h = rol23(h)*prime2 + prime3 h = rol23(h)*prime2 + prime3
i += 4 b = b[4:]
} }
for ; i < end; i++ { for ; len(b) > 0; b = b[1:] {
h ^= uint64(b[i]) * prime5 h ^= uint64(b[0]) * prime5
h = rol11(h) * prime1 h = rol11(h) * prime1
} }

View File

@ -320,10 +320,6 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP) MOVQ $0x00000004, ret+24(FP)
RET RET
// Return with not enough output space error
MOVQ $0x00000005, ret+24(FP)
RET
// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: CMOV // Requires: CMOV
TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
@ -617,10 +613,6 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP) MOVQ $0x00000004, ret+24(FP)
RET RET
// Return with not enough output space error
MOVQ $0x00000005, ret+24(FP)
RET
// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: BMI, BMI2, CMOV // Requires: BMI, BMI2, CMOV
TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
@ -897,10 +889,6 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP) MOVQ $0x00000004, ret+24(FP)
RET RET
// Return with not enough output space error
MOVQ $0x00000005, ret+24(FP)
RET
// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: BMI, BMI2, CMOV // Requires: BMI, BMI2, CMOV
TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
@ -1152,10 +1140,6 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP) MOVQ $0x00000004, ret+24(FP)
RET RET
// Return with not enough output space error
MOVQ $0x00000005, ret+24(FP)
RET
// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool // func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
// Requires: SSE // Requires: SSE
TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9
@ -1389,8 +1373,7 @@ loop_finished:
MOVQ ctx+0(FP), AX MOVQ ctx+0(FP), AX
MOVQ DX, 24(AX) MOVQ DX, 24(AX)
MOVQ DI, 104(AX) MOVQ DI, 104(AX)
MOVQ 80(AX), CX SUBQ 80(AX), SI
SUBQ CX, SI
MOVQ SI, 112(AX) MOVQ SI, 112(AX)
RET RET
@ -1402,8 +1385,7 @@ error_match_off_too_big:
MOVQ ctx+0(FP), AX MOVQ ctx+0(FP), AX
MOVQ DX, 24(AX) MOVQ DX, 24(AX)
MOVQ DI, 104(AX) MOVQ DI, 104(AX)
MOVQ 80(AX), CX SUBQ 80(AX), SI
SUBQ CX, SI
MOVQ SI, 112(AX) MOVQ SI, 112(AX)
RET RET
@ -1747,8 +1729,7 @@ loop_finished:
MOVQ ctx+0(FP), AX MOVQ ctx+0(FP), AX
MOVQ DX, 24(AX) MOVQ DX, 24(AX)
MOVQ DI, 104(AX) MOVQ DI, 104(AX)
MOVQ 80(AX), CX SUBQ 80(AX), SI
SUBQ CX, SI
MOVQ SI, 112(AX) MOVQ SI, 112(AX)
RET RET
@ -1760,8 +1741,7 @@ error_match_off_too_big:
MOVQ ctx+0(FP), AX MOVQ ctx+0(FP), AX
MOVQ DX, 24(AX) MOVQ DX, 24(AX)
MOVQ DI, 104(AX) MOVQ DI, 104(AX)
MOVQ 80(AX), CX SUBQ 80(AX), SI
SUBQ CX, SI
MOVQ SI, 112(AX) MOVQ SI, 112(AX)
RET RET

View File

@ -36,9 +36,6 @@ const forcePreDef = false
// zstdMinMatch is the minimum zstd match length. // zstdMinMatch is the minimum zstd match length.
const zstdMinMatch = 3 const zstdMinMatch = 3
// Reset the buffer offset when reaching this.
const bufferReset = math.MaxInt32 - MaxWindowSize
// fcsUnknown is used for unknown frame content size. // fcsUnknown is used for unknown frame content size.
const fcsUnknown = math.MaxUint64 const fcsUnknown = math.MaxUint64
@ -75,7 +72,6 @@ var (
ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit")
// ErrUnknownDictionary is returned if the dictionary ID is unknown. // ErrUnknownDictionary is returned if the dictionary ID is unknown.
// For the time being dictionaries are not supported.
ErrUnknownDictionary = errors.New("unknown dictionary") ErrUnknownDictionary = errors.New("unknown dictionary")
// ErrFrameSizeExceeded is returned if the stated frame size is exceeded. // ErrFrameSizeExceeded is returned if the stated frame size is exceeded.
@ -110,26 +106,25 @@ func printf(format string, a ...interface{}) {
} }
} }
// matchLen returns the maximum length. // matchLen returns the maximum common prefix length of a and b.
// a must be the shortest of the two. // a must be the shortest of the two.
// The function also returns whether all bytes matched. func matchLen(a, b []byte) (n int) {
func matchLen(a, b []byte) int { for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
b = b[:len(a)] diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
for i := 0; i < len(a)-7; i += 8 { if diff != 0 {
if diff := load64(a, i) ^ load64(b, i); diff != 0 { return n + bits.TrailingZeros64(diff)>>3
return i + (bits.TrailingZeros64(diff) >> 3)
} }
n += 8
} }
checked := (len(a) >> 3) << 3
a = a[checked:]
b = b[checked:]
for i := range a { for i := range a {
if a[i] != b[i] { if a[i] != b[i] {
return i + checked break
} }
n++
} }
return len(a) + checked return n
} }
func load3232(b []byte, i int32) uint32 { func load3232(b []byte, i int32) uint32 {
@ -140,10 +135,6 @@ func load6432(b []byte, i int32) uint64 {
return binary.LittleEndian.Uint64(b[i:]) return binary.LittleEndian.Uint64(b[i:])
} }
func load64(b []byte, i int) uint64 {
return binary.LittleEndian.Uint64(b[i:])
}
type byter interface { type byter interface {
Bytes() []byte Bytes() []byte
Len() int Len() int

View File

@ -21,8 +21,8 @@ import "bytes"
// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: // Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
// //
// go-fuzz-build github.com/prometheus/common/expfmt // go-fuzz-build github.com/prometheus/common/expfmt
// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz // go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
// //
// Further input samples should go in the folder fuzz/corpus. // Further input samples should go in the folder fuzz/corpus.
func Fuzz(in []byte) int { func Fuzz(in []byte) int {

View File

@ -46,20 +46,20 @@ import (
// missing features and peculiarities to avoid complications when switching from // missing features and peculiarities to avoid complications when switching from
// Prometheus to OpenMetrics or vice versa: // Prometheus to OpenMetrics or vice versa:
// //
// - Counters are expected to have the `_total` suffix in their metric name. In // - Counters are expected to have the `_total` suffix in their metric name. In
// the output, the suffix will be truncated from the `# TYPE` and `# HELP` // the output, the suffix will be truncated from the `# TYPE` and `# HELP`
// line. A counter with a missing `_total` suffix is not an error. However, // line. A counter with a missing `_total` suffix is not an error. However,
// its type will be set to `unknown` in that case to avoid invalid OpenMetrics // its type will be set to `unknown` in that case to avoid invalid OpenMetrics
// output. // output.
// //
// - No support for the following (optional) features: `# UNIT` line, `_created` // - No support for the following (optional) features: `# UNIT` line, `_created`
// line, info type, stateset type, gaugehistogram type. // line, info type, stateset type, gaugehistogram type.
// //
// - The size of exemplar labels is not checked (i.e. it's possible to create // - The size of exemplar labels is not checked (i.e. it's possible to create
// exemplars that are larger than allowed by the OpenMetrics specification). // exemplars that are larger than allowed by the OpenMetrics specification).
// //
// - The value of Counters is not checked. (OpenMetrics doesn't allow counters // - The value of Counters is not checked. (OpenMetrics doesn't allow counters
// with a `NaN` value.) // with a `NaN` value.)
func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) { func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) {
name := in.GetName() name := in.GetName()
if name == "" { if name == "" {

View File

@ -17,7 +17,6 @@ import (
"bufio" "bufio"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"math" "math"
"strconv" "strconv"
"strings" "strings"
@ -44,7 +43,7 @@ const (
var ( var (
bufPool = sync.Pool{ bufPool = sync.Pool{
New: func() interface{} { New: func() interface{} {
return bufio.NewWriter(ioutil.Discard) return bufio.NewWriter(io.Discard)
}, },
} }
numBufPool = sync.Pool{ numBufPool = sync.Pool{

View File

@ -11,18 +11,18 @@ Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are modification, are permitted provided that the following conditions are
met: met:
Redistributions of source code must retain the above copyright Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer. notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the the documentation and/or other materials provided with the
distribution. distribution.
Neither the name of the Open Knowledge Foundation Ltd. nor the Neither the name of the Open Knowledge Foundation Ltd. nor the
names of its contributors may be used to endorse or promote names of its contributors may be used to endorse or promote
products derived from this software without specific prior written products derived from this software without specific prior written
permission. permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ -35,8 +35,6 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
package goautoneg package goautoneg

View File

@ -55,19 +55,22 @@ ifneq ($(shell which gotestsum),)
endif endif
endif endif
PROMU_VERSION ?= 0.13.0 PROMU_VERSION ?= 0.14.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT := GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?= GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.45.2 GOLANGCI_LINT_VERSION ?= v1.49.0
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different. # windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
# If we're in CI and there is an Actions file, that means the linter # If we're in CI and there is an Actions file, that means the linter
# is being run in Actions, so we don't need to run it here. # is being run in Actions, so we don't need to run it here.
ifeq (,$(CIRCLE_JOB)) ifneq (,$(SKIP_GOLANGCI_LINT))
GOLANGCI_LINT :=
else ifeq (,$(CIRCLE_JOB))
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) else ifeq (,$(wildcard .github/workflows/golangci-lint.yml))
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint

View File

@ -380,6 +380,42 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
return cpuinfo, nil return cpuinfo, nil
} }
func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
// find the first "processor" line
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
return nil, errors.New("invalid cpuinfo file: " + firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
systemType := field[1]
i := 0
for scanner.Scan() {
line := scanner.Text()
if !strings.Contains(line, ":") {
continue
}
field := strings.SplitN(line, ": ", 2)
switch strings.TrimSpace(field[0]) {
case "processor":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
i = int(v)
cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
cpuinfo[i].Processor = uint(v)
cpuinfo[i].VendorID = systemType
case "CPU Family":
cpuinfo[i].CPUFamily = field[1]
case "Model Name":
cpuinfo[i].ModelName = field[1]
}
}
return cpuinfo, nil
}
func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info)) scanner := bufio.NewScanner(bytes.NewReader(info))

19
vendor/github.com/prometheus/procfs/cpuinfo_loong64.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
// Copyright 2022 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build linux
// +build linux
package procfs
var parseCPUInfo = parseCPUInfoLoong

View File

@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build linux && !386 && !amd64 && !arm && !arm64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x //go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x
// +build linux,!386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x // +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
package procfs package procfs

View File

@ -16,30 +16,29 @@
// //
// Example: // Example:
// //
// package main // package main
// //
// import ( // import (
// "fmt" // "fmt"
// "log" // "log"
// //
// "github.com/prometheus/procfs" // "github.com/prometheus/procfs"
// ) // )
// //
// func main() { // func main() {
// p, err := procfs.Self() // p, err := procfs.Self()
// if err != nil { // if err != nil {
// log.Fatalf("could not get process: %s", err) // log.Fatalf("could not get process: %s", err)
// } // }
// //
// stat, err := p.Stat() // stat, err := p.Stat()
// if err != nil { // if err != nil {
// log.Fatalf("could not get process stat: %s", err) // log.Fatalf("could not get process stat: %s", err)
// } // }
//
// fmt.Printf("command: %s\n", stat.Comm)
// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
// }
// //
// fmt.Printf("command: %s\n", stat.Comm)
// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
// }
package procfs package procfs

View File

@ -284,7 +284,8 @@ func parseMountStats(r io.Reader) ([]*Mount, error) {
} }
// parseMount parses an entry in /proc/[pid]/mountstats in the format: // parseMount parses an entry in /proc/[pid]/mountstats in the format:
// device [device] mounted on [mount] with fstype [type] //
// device [device] mounted on [mount] with fstype [type]
func parseMount(ss []string) (*Mount, error) { func parseMount(ss []string) (*Mount, error) {
if len(ss) < deviceEntryLen { if len(ss) < deviceEntryLen {
return nil, fmt.Errorf("invalid device entry: %v", ss) return nil, fmt.Errorf("invalid device entry: %v", ss)

View File

@ -27,8 +27,9 @@ import (
// For the proc file format details, // For the proc file format details,
// See: // See:
// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343 // * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343
// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162 // * Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810. // * Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
// * Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
// SoftnetStat contains a single row of data from /proc/net/softnet_stat. // SoftnetStat contains a single row of data from /proc/net/softnet_stat.
type SoftnetStat struct { type SoftnetStat struct {
@ -38,6 +39,18 @@ type SoftnetStat struct {
Dropped uint32 Dropped uint32
// Number of times processing packets ran out of quota. // Number of times processing packets ran out of quota.
TimeSqueezed uint32 TimeSqueezed uint32
// Number of collision occur while obtaining device lock while transmitting.
CPUCollision uint32
// Number of times cpu woken up received_rps.
ReceivedRps uint32
// number of times flow limit has been reached.
FlowLimitCount uint32
// Softnet backlog status.
SoftnetBacklogLen uint32
// CPU id owning this softnet_data.
Index uint32
// softnet_data's Width.
Width int
} }
var softNetProcFile = "net/softnet_stat" var softNetProcFile = "net/softnet_stat"
@ -66,22 +79,57 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
for s.Scan() { for s.Scan() {
columns := strings.Fields(s.Text()) columns := strings.Fields(s.Text())
width := len(columns) width := len(columns)
softnetStat := SoftnetStat{}
if width < minColumns { if width < minColumns {
return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns) return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns)
} }
// We only parse the first three columns at the moment. // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347
us, err := parseHexUint32s(columns[0:3]) if width >= minColumns {
if err != nil { us, err := parseHexUint32s(columns[0:9])
return nil, err if err != nil {
return nil, err
}
softnetStat.Processed = us[0]
softnetStat.Dropped = us[1]
softnetStat.TimeSqueezed = us[2]
softnetStat.CPUCollision = us[8]
} }
stats = append(stats, SoftnetStat{ // Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
Processed: us[0], if width >= 10 {
Dropped: us[1], us, err := parseHexUint32s(columns[9:10])
TimeSqueezed: us[2], if err != nil {
}) return nil, err
}
softnetStat.ReceivedRps = us[0]
}
// Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
if width >= 11 {
us, err := parseHexUint32s(columns[10:11])
if err != nil {
return nil, err
}
softnetStat.FlowLimitCount = us[0]
}
// Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
if width >= 13 {
us, err := parseHexUint32s(columns[11:13])
if err != nil {
return nil, err
}
softnetStat.SoftnetBacklogLen = us[0]
softnetStat.Index = us[1]
}
softnetStat.Width = width
stats = append(stats, softnetStat)
} }
return stats, nil return stats, nil

View File

@ -15,6 +15,7 @@ package procfs
import ( import (
"bufio" "bufio"
"io"
"os" "os"
"path/filepath" "path/filepath"
"strconv" "strconv"
@ -42,27 +43,43 @@ func (fs FS) NetStat() ([]NetStat, error) {
return nil, err return nil, err
} }
netStatFile := NetStat{ procNetstat, err := parseNetstat(file)
Filename: filepath.Base(filePath), if err != nil {
Stats: make(map[string][]uint64), return nil, err
} }
scanner := bufio.NewScanner(file) procNetstat.Filename = filepath.Base(filePath)
scanner.Scan()
// First string is always a header for stats
var headers []string
headers = append(headers, strings.Fields(scanner.Text())...)
// Other strings represent per-CPU counters netStatsTotal = append(netStatsTotal, procNetstat)
for scanner.Scan() {
for num, counter := range strings.Fields(scanner.Text()) {
value, err := strconv.ParseUint(counter, 16, 64)
if err != nil {
return nil, err
}
netStatFile.Stats[headers[num]] = append(netStatFile.Stats[headers[num]], value)
}
}
netStatsTotal = append(netStatsTotal, netStatFile)
} }
return netStatsTotal, nil return netStatsTotal, nil
} }
// parseNetstat parses the metrics from `/proc/net/stat/` file
// and returns a NetStat structure.
func parseNetstat(r io.Reader) (NetStat, error) {
var (
scanner = bufio.NewScanner(r)
netStat = NetStat{
Stats: make(map[string][]uint64),
}
)
scanner.Scan()
// First string is always a header for stats
var headers []string
headers = append(headers, strings.Fields(scanner.Text())...)
// Other strings represent per-CPU counters
for scanner.Scan() {
for num, counter := range strings.Fields(scanner.Text()) {
value, err := strconv.ParseUint(counter, 16, 64)
if err != nil {
return NetStat{}, err
}
netStat.Stats[headers[num]] = append(netStat.Stats[headers[num]], value)
}
}
return netStat, nil
}

View File

@ -23,7 +23,7 @@ import (
"github.com/prometheus/procfs/internal/util" "github.com/prometheus/procfs/internal/util"
) )
// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a // Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a
// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource // specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies // controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in // contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in

98
vendor/github.com/prometheus/procfs/proc_interrupts.go generated vendored Normal file
View File

@ -0,0 +1,98 @@
// Copyright 2022 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// Interrupt represents a single interrupt line.
type Interrupt struct {
// Info is the type of interrupt.
Info string
// Devices is the name of the device that is located at that IRQ
Devices string
// Values is the number of interrupts per CPU.
Values []string
}
// Interrupts models the content of /proc/interrupts. Key is the IRQ number.
// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-interrupts
// - https://raspberrypi.stackexchange.com/questions/105802/explanation-of-proc-interrupts-output
type Interrupts map[string]Interrupt
// Interrupts creates a new instance from a given Proc instance.
func (p Proc) Interrupts() (Interrupts, error) {
data, err := util.ReadFileNoStat(p.path("interrupts"))
if err != nil {
return nil, err
}
return parseInterrupts(bytes.NewReader(data))
}
func parseInterrupts(r io.Reader) (Interrupts, error) {
var (
interrupts = Interrupts{}
scanner = bufio.NewScanner(r)
)
if !scanner.Scan() {
return nil, errors.New("interrupts empty")
}
cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu
for scanner.Scan() {
parts := strings.Fields(scanner.Text())
if len(parts) == 0 { // skip empty lines
continue
}
if len(parts) < 2 {
return nil, fmt.Errorf("not enough fields in interrupts (expected at least 2 fields but got %d): %s", len(parts), parts)
}
intName := parts[0][:len(parts[0])-1] // remove trailing :
if len(parts) == 2 {
interrupts[intName] = Interrupt{
Info: "",
Devices: "",
Values: []string{
parts[1],
},
}
continue
}
intr := Interrupt{
Values: parts[1 : cpuNum+1],
}
if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt
intr.Info = parts[cpuNum+1]
intr.Devices = strings.Join(parts[cpuNum+2:], " ")
} else {
intr.Info = strings.Join(parts[cpuNum+1:], " ")
}
interrupts[intName] = intr
}
return interrupts, scanner.Err()
}

View File

@ -33,139 +33,140 @@ type ProcNetstat struct {
} }
type TcpExt struct { // nolint:revive type TcpExt struct { // nolint:revive
SyncookiesSent float64 SyncookiesSent *float64
SyncookiesRecv float64 SyncookiesRecv *float64
SyncookiesFailed float64 SyncookiesFailed *float64
EmbryonicRsts float64 EmbryonicRsts *float64
PruneCalled float64 PruneCalled *float64
RcvPruned float64 RcvPruned *float64
OfoPruned float64 OfoPruned *float64
OutOfWindowIcmps float64 OutOfWindowIcmps *float64
LockDroppedIcmps float64 LockDroppedIcmps *float64
ArpFilter float64 ArpFilter *float64
TW float64 TW *float64
TWRecycled float64 TWRecycled *float64
TWKilled float64 TWKilled *float64
PAWSActive float64 PAWSActive *float64
PAWSEstab float64 PAWSEstab *float64
DelayedACKs float64 DelayedACKs *float64
DelayedACKLocked float64 DelayedACKLocked *float64
DelayedACKLost float64 DelayedACKLost *float64
ListenOverflows float64 ListenOverflows *float64
ListenDrops float64 ListenDrops *float64
TCPHPHits float64 TCPHPHits *float64
TCPPureAcks float64 TCPPureAcks *float64
TCPHPAcks float64 TCPHPAcks *float64
TCPRenoRecovery float64 TCPRenoRecovery *float64
TCPSackRecovery float64 TCPSackRecovery *float64
TCPSACKReneging float64 TCPSACKReneging *float64
TCPSACKReorder float64 TCPSACKReorder *float64
TCPRenoReorder float64 TCPRenoReorder *float64
TCPTSReorder float64 TCPTSReorder *float64
TCPFullUndo float64 TCPFullUndo *float64
TCPPartialUndo float64 TCPPartialUndo *float64
TCPDSACKUndo float64 TCPDSACKUndo *float64
TCPLossUndo float64 TCPLossUndo *float64
TCPLostRetransmit float64 TCPLostRetransmit *float64
TCPRenoFailures float64 TCPRenoFailures *float64
TCPSackFailures float64 TCPSackFailures *float64
TCPLossFailures float64 TCPLossFailures *float64
TCPFastRetrans float64 TCPFastRetrans *float64
TCPSlowStartRetrans float64 TCPSlowStartRetrans *float64
TCPTimeouts float64 TCPTimeouts *float64
TCPLossProbes float64 TCPLossProbes *float64
TCPLossProbeRecovery float64 TCPLossProbeRecovery *float64
TCPRenoRecoveryFail float64 TCPRenoRecoveryFail *float64
TCPSackRecoveryFail float64 TCPSackRecoveryFail *float64
TCPRcvCollapsed float64 TCPRcvCollapsed *float64
TCPDSACKOldSent float64 TCPDSACKOldSent *float64
TCPDSACKOfoSent float64 TCPDSACKOfoSent *float64
TCPDSACKRecv float64 TCPDSACKRecv *float64
TCPDSACKOfoRecv float64 TCPDSACKOfoRecv *float64
TCPAbortOnData float64 TCPAbortOnData *float64
TCPAbortOnClose float64 TCPAbortOnClose *float64
TCPAbortOnMemory float64 TCPAbortOnMemory *float64
TCPAbortOnTimeout float64 TCPAbortOnTimeout *float64
TCPAbortOnLinger float64 TCPAbortOnLinger *float64
TCPAbortFailed float64 TCPAbortFailed *float64
TCPMemoryPressures float64 TCPMemoryPressures *float64
TCPMemoryPressuresChrono float64 TCPMemoryPressuresChrono *float64
TCPSACKDiscard float64 TCPSACKDiscard *float64
TCPDSACKIgnoredOld float64 TCPDSACKIgnoredOld *float64
TCPDSACKIgnoredNoUndo float64 TCPDSACKIgnoredNoUndo *float64
TCPSpuriousRTOs float64 TCPSpuriousRTOs *float64
TCPMD5NotFound float64 TCPMD5NotFound *float64
TCPMD5Unexpected float64 TCPMD5Unexpected *float64
TCPMD5Failure float64 TCPMD5Failure *float64
TCPSackShifted float64 TCPSackShifted *float64
TCPSackMerged float64 TCPSackMerged *float64
TCPSackShiftFallback float64 TCPSackShiftFallback *float64
TCPBacklogDrop float64 TCPBacklogDrop *float64
PFMemallocDrop float64 PFMemallocDrop *float64
TCPMinTTLDrop float64 TCPMinTTLDrop *float64
TCPDeferAcceptDrop float64 TCPDeferAcceptDrop *float64
IPReversePathFilter float64 IPReversePathFilter *float64
TCPTimeWaitOverflow float64 TCPTimeWaitOverflow *float64
TCPReqQFullDoCookies float64 TCPReqQFullDoCookies *float64
TCPReqQFullDrop float64 TCPReqQFullDrop *float64
TCPRetransFail float64 TCPRetransFail *float64
TCPRcvCoalesce float64 TCPRcvCoalesce *float64
TCPOFOQueue float64 TCPRcvQDrop *float64
TCPOFODrop float64 TCPOFOQueue *float64
TCPOFOMerge float64 TCPOFODrop *float64
TCPChallengeACK float64 TCPOFOMerge *float64
TCPSYNChallenge float64 TCPChallengeACK *float64
TCPFastOpenActive float64 TCPSYNChallenge *float64
TCPFastOpenActiveFail float64 TCPFastOpenActive *float64
TCPFastOpenPassive float64 TCPFastOpenActiveFail *float64
TCPFastOpenPassiveFail float64 TCPFastOpenPassive *float64
TCPFastOpenListenOverflow float64 TCPFastOpenPassiveFail *float64
TCPFastOpenCookieReqd float64 TCPFastOpenListenOverflow *float64
TCPFastOpenBlackhole float64 TCPFastOpenCookieReqd *float64
TCPSpuriousRtxHostQueues float64 TCPFastOpenBlackhole *float64
BusyPollRxPackets float64 TCPSpuriousRtxHostQueues *float64
TCPAutoCorking float64 BusyPollRxPackets *float64
TCPFromZeroWindowAdv float64 TCPAutoCorking *float64
TCPToZeroWindowAdv float64 TCPFromZeroWindowAdv *float64
TCPWantZeroWindowAdv float64 TCPToZeroWindowAdv *float64
TCPSynRetrans float64 TCPWantZeroWindowAdv *float64
TCPOrigDataSent float64 TCPSynRetrans *float64
TCPHystartTrainDetect float64 TCPOrigDataSent *float64
TCPHystartTrainCwnd float64 TCPHystartTrainDetect *float64
TCPHystartDelayDetect float64 TCPHystartTrainCwnd *float64
TCPHystartDelayCwnd float64 TCPHystartDelayDetect *float64
TCPACKSkippedSynRecv float64 TCPHystartDelayCwnd *float64
TCPACKSkippedPAWS float64 TCPACKSkippedSynRecv *float64
TCPACKSkippedSeq float64 TCPACKSkippedPAWS *float64
TCPACKSkippedFinWait2 float64 TCPACKSkippedSeq *float64
TCPACKSkippedTimeWait float64 TCPACKSkippedFinWait2 *float64
TCPACKSkippedChallenge float64 TCPACKSkippedTimeWait *float64
TCPWinProbe float64 TCPACKSkippedChallenge *float64
TCPKeepAlive float64 TCPWinProbe *float64
TCPMTUPFail float64 TCPKeepAlive *float64
TCPMTUPSuccess float64 TCPMTUPFail *float64
TCPWqueueTooBig float64 TCPMTUPSuccess *float64
TCPWqueueTooBig *float64
} }
type IpExt struct { // nolint:revive type IpExt struct { // nolint:revive
InNoRoutes float64 InNoRoutes *float64
InTruncatedPkts float64 InTruncatedPkts *float64
InMcastPkts float64 InMcastPkts *float64
OutMcastPkts float64 OutMcastPkts *float64
InBcastPkts float64 InBcastPkts *float64
OutBcastPkts float64 OutBcastPkts *float64
InOctets float64 InOctets *float64
OutOctets float64 OutOctets *float64
InMcastOctets float64 InMcastOctets *float64
OutMcastOctets float64 OutMcastOctets *float64
InBcastOctets float64 InBcastOctets *float64
OutBcastOctets float64 OutBcastOctets *float64
InCsumErrors float64 InCsumErrors *float64
InNoECTPkts float64 InNoECTPkts *float64
InECT1Pkts float64 InECT1Pkts *float64
InECT0Pkts float64 InECT0Pkts *float64
InCEPkts float64 InCEPkts *float64
ReasmOverlaps float64 ReasmOverlaps *float64
} }
func (p Proc) Netstat() (ProcNetstat, error) { func (p Proc) Netstat() (ProcNetstat, error) {
@ -174,14 +175,14 @@ func (p Proc) Netstat() (ProcNetstat, error) {
if err != nil { if err != nil {
return ProcNetstat{PID: p.PID}, err return ProcNetstat{PID: p.PID}, err
} }
procNetstat, err := parseNetstat(bytes.NewReader(data), filename) procNetstat, err := parseProcNetstat(bytes.NewReader(data), filename)
procNetstat.PID = p.PID procNetstat.PID = p.PID
return procNetstat, err return procNetstat, err
} }
// parseNetstat parses the metrics from proc/<pid>/net/netstat file // parseProcNetstat parses the metrics from proc/<pid>/net/netstat file
// and returns a ProcNetstat structure. // and returns a ProcNetstat structure.
func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) { func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
var ( var (
scanner = bufio.NewScanner(r) scanner = bufio.NewScanner(r)
procNetstat = ProcNetstat{} procNetstat = ProcNetstat{}
@ -208,230 +209,232 @@ func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
case "TcpExt": case "TcpExt":
switch key { switch key {
case "SyncookiesSent": case "SyncookiesSent":
procNetstat.TcpExt.SyncookiesSent = value procNetstat.TcpExt.SyncookiesSent = &value
case "SyncookiesRecv": case "SyncookiesRecv":
procNetstat.TcpExt.SyncookiesRecv = value procNetstat.TcpExt.SyncookiesRecv = &value
case "SyncookiesFailed": case "SyncookiesFailed":
procNetstat.TcpExt.SyncookiesFailed = value procNetstat.TcpExt.SyncookiesFailed = &value
case "EmbryonicRsts": case "EmbryonicRsts":
procNetstat.TcpExt.EmbryonicRsts = value procNetstat.TcpExt.EmbryonicRsts = &value
case "PruneCalled": case "PruneCalled":
procNetstat.TcpExt.PruneCalled = value procNetstat.TcpExt.PruneCalled = &value
case "RcvPruned": case "RcvPruned":
procNetstat.TcpExt.RcvPruned = value procNetstat.TcpExt.RcvPruned = &value
case "OfoPruned": case "OfoPruned":
procNetstat.TcpExt.OfoPruned = value procNetstat.TcpExt.OfoPruned = &value
case "OutOfWindowIcmps": case "OutOfWindowIcmps":
procNetstat.TcpExt.OutOfWindowIcmps = value procNetstat.TcpExt.OutOfWindowIcmps = &value
case "LockDroppedIcmps": case "LockDroppedIcmps":
procNetstat.TcpExt.LockDroppedIcmps = value procNetstat.TcpExt.LockDroppedIcmps = &value
case "ArpFilter": case "ArpFilter":
procNetstat.TcpExt.ArpFilter = value procNetstat.TcpExt.ArpFilter = &value
case "TW": case "TW":
procNetstat.TcpExt.TW = value procNetstat.TcpExt.TW = &value
case "TWRecycled": case "TWRecycled":
procNetstat.TcpExt.TWRecycled = value procNetstat.TcpExt.TWRecycled = &value
case "TWKilled": case "TWKilled":
procNetstat.TcpExt.TWKilled = value procNetstat.TcpExt.TWKilled = &value
case "PAWSActive": case "PAWSActive":
procNetstat.TcpExt.PAWSActive = value procNetstat.TcpExt.PAWSActive = &value
case "PAWSEstab": case "PAWSEstab":
procNetstat.TcpExt.PAWSEstab = value procNetstat.TcpExt.PAWSEstab = &value
case "DelayedACKs": case "DelayedACKs":
procNetstat.TcpExt.DelayedACKs = value procNetstat.TcpExt.DelayedACKs = &value
case "DelayedACKLocked": case "DelayedACKLocked":
procNetstat.TcpExt.DelayedACKLocked = value procNetstat.TcpExt.DelayedACKLocked = &value
case "DelayedACKLost": case "DelayedACKLost":
procNetstat.TcpExt.DelayedACKLost = value procNetstat.TcpExt.DelayedACKLost = &value
case "ListenOverflows": case "ListenOverflows":
procNetstat.TcpExt.ListenOverflows = value procNetstat.TcpExt.ListenOverflows = &value
case "ListenDrops": case "ListenDrops":
procNetstat.TcpExt.ListenDrops = value procNetstat.TcpExt.ListenDrops = &value
case "TCPHPHits": case "TCPHPHits":
procNetstat.TcpExt.TCPHPHits = value procNetstat.TcpExt.TCPHPHits = &value
case "TCPPureAcks": case "TCPPureAcks":
procNetstat.TcpExt.TCPPureAcks = value procNetstat.TcpExt.TCPPureAcks = &value
case "TCPHPAcks": case "TCPHPAcks":
procNetstat.TcpExt.TCPHPAcks = value procNetstat.TcpExt.TCPHPAcks = &value
case "TCPRenoRecovery": case "TCPRenoRecovery":
procNetstat.TcpExt.TCPRenoRecovery = value procNetstat.TcpExt.TCPRenoRecovery = &value
case "TCPSackRecovery": case "TCPSackRecovery":
procNetstat.TcpExt.TCPSackRecovery = value procNetstat.TcpExt.TCPSackRecovery = &value
case "TCPSACKReneging": case "TCPSACKReneging":
procNetstat.TcpExt.TCPSACKReneging = value procNetstat.TcpExt.TCPSACKReneging = &value
case "TCPSACKReorder": case "TCPSACKReorder":
procNetstat.TcpExt.TCPSACKReorder = value procNetstat.TcpExt.TCPSACKReorder = &value
case "TCPRenoReorder": case "TCPRenoReorder":
procNetstat.TcpExt.TCPRenoReorder = value procNetstat.TcpExt.TCPRenoReorder = &value
case "TCPTSReorder": case "TCPTSReorder":
procNetstat.TcpExt.TCPTSReorder = value procNetstat.TcpExt.TCPTSReorder = &value
case "TCPFullUndo": case "TCPFullUndo":
procNetstat.TcpExt.TCPFullUndo = value procNetstat.TcpExt.TCPFullUndo = &value
case "TCPPartialUndo": case "TCPPartialUndo":
procNetstat.TcpExt.TCPPartialUndo = value procNetstat.TcpExt.TCPPartialUndo = &value
case "TCPDSACKUndo": case "TCPDSACKUndo":
procNetstat.TcpExt.TCPDSACKUndo = value procNetstat.TcpExt.TCPDSACKUndo = &value
case "TCPLossUndo": case "TCPLossUndo":
procNetstat.TcpExt.TCPLossUndo = value procNetstat.TcpExt.TCPLossUndo = &value
case "TCPLostRetransmit": case "TCPLostRetransmit":
procNetstat.TcpExt.TCPLostRetransmit = value procNetstat.TcpExt.TCPLostRetransmit = &value
case "TCPRenoFailures": case "TCPRenoFailures":
procNetstat.TcpExt.TCPRenoFailures = value procNetstat.TcpExt.TCPRenoFailures = &value
case "TCPSackFailures": case "TCPSackFailures":
procNetstat.TcpExt.TCPSackFailures = value procNetstat.TcpExt.TCPSackFailures = &value
case "TCPLossFailures": case "TCPLossFailures":
procNetstat.TcpExt.TCPLossFailures = value procNetstat.TcpExt.TCPLossFailures = &value
case "TCPFastRetrans": case "TCPFastRetrans":
procNetstat.TcpExt.TCPFastRetrans = value procNetstat.TcpExt.TCPFastRetrans = &value
case "TCPSlowStartRetrans": case "TCPSlowStartRetrans":
procNetstat.TcpExt.TCPSlowStartRetrans = value procNetstat.TcpExt.TCPSlowStartRetrans = &value
case "TCPTimeouts": case "TCPTimeouts":
procNetstat.TcpExt.TCPTimeouts = value procNetstat.TcpExt.TCPTimeouts = &value
case "TCPLossProbes": case "TCPLossProbes":
procNetstat.TcpExt.TCPLossProbes = value procNetstat.TcpExt.TCPLossProbes = &value
case "TCPLossProbeRecovery": case "TCPLossProbeRecovery":
procNetstat.TcpExt.TCPLossProbeRecovery = value procNetstat.TcpExt.TCPLossProbeRecovery = &value
case "TCPRenoRecoveryFail": case "TCPRenoRecoveryFail":
procNetstat.TcpExt.TCPRenoRecoveryFail = value procNetstat.TcpExt.TCPRenoRecoveryFail = &value
case "TCPSackRecoveryFail": case "TCPSackRecoveryFail":
procNetstat.TcpExt.TCPSackRecoveryFail = value procNetstat.TcpExt.TCPSackRecoveryFail = &value
case "TCPRcvCollapsed": case "TCPRcvCollapsed":
procNetstat.TcpExt.TCPRcvCollapsed = value procNetstat.TcpExt.TCPRcvCollapsed = &value
case "TCPDSACKOldSent": case "TCPDSACKOldSent":
procNetstat.TcpExt.TCPDSACKOldSent = value procNetstat.TcpExt.TCPDSACKOldSent = &value
case "TCPDSACKOfoSent": case "TCPDSACKOfoSent":
procNetstat.TcpExt.TCPDSACKOfoSent = value procNetstat.TcpExt.TCPDSACKOfoSent = &value
case "TCPDSACKRecv": case "TCPDSACKRecv":
procNetstat.TcpExt.TCPDSACKRecv = value procNetstat.TcpExt.TCPDSACKRecv = &value
case "TCPDSACKOfoRecv": case "TCPDSACKOfoRecv":
procNetstat.TcpExt.TCPDSACKOfoRecv = value procNetstat.TcpExt.TCPDSACKOfoRecv = &value
case "TCPAbortOnData": case "TCPAbortOnData":
procNetstat.TcpExt.TCPAbortOnData = value procNetstat.TcpExt.TCPAbortOnData = &value
case "TCPAbortOnClose": case "TCPAbortOnClose":
procNetstat.TcpExt.TCPAbortOnClose = value procNetstat.TcpExt.TCPAbortOnClose = &value
case "TCPDeferAcceptDrop": case "TCPDeferAcceptDrop":
procNetstat.TcpExt.TCPDeferAcceptDrop = value procNetstat.TcpExt.TCPDeferAcceptDrop = &value
case "IPReversePathFilter": case "IPReversePathFilter":
procNetstat.TcpExt.IPReversePathFilter = value procNetstat.TcpExt.IPReversePathFilter = &value
case "TCPTimeWaitOverflow": case "TCPTimeWaitOverflow":
procNetstat.TcpExt.TCPTimeWaitOverflow = value procNetstat.TcpExt.TCPTimeWaitOverflow = &value
case "TCPReqQFullDoCookies": case "TCPReqQFullDoCookies":
procNetstat.TcpExt.TCPReqQFullDoCookies = value procNetstat.TcpExt.TCPReqQFullDoCookies = &value
case "TCPReqQFullDrop": case "TCPReqQFullDrop":
procNetstat.TcpExt.TCPReqQFullDrop = value procNetstat.TcpExt.TCPReqQFullDrop = &value
case "TCPRetransFail": case "TCPRetransFail":
procNetstat.TcpExt.TCPRetransFail = value procNetstat.TcpExt.TCPRetransFail = &value
case "TCPRcvCoalesce": case "TCPRcvCoalesce":
procNetstat.TcpExt.TCPRcvCoalesce = value procNetstat.TcpExt.TCPRcvCoalesce = &value
case "TCPRcvQDrop":
procNetstat.TcpExt.TCPRcvQDrop = &value
case "TCPOFOQueue": case "TCPOFOQueue":
procNetstat.TcpExt.TCPOFOQueue = value procNetstat.TcpExt.TCPOFOQueue = &value
case "TCPOFODrop": case "TCPOFODrop":
procNetstat.TcpExt.TCPOFODrop = value procNetstat.TcpExt.TCPOFODrop = &value
case "TCPOFOMerge": case "TCPOFOMerge":
procNetstat.TcpExt.TCPOFOMerge = value procNetstat.TcpExt.TCPOFOMerge = &value
case "TCPChallengeACK": case "TCPChallengeACK":
procNetstat.TcpExt.TCPChallengeACK = value procNetstat.TcpExt.TCPChallengeACK = &value
case "TCPSYNChallenge": case "TCPSYNChallenge":
procNetstat.TcpExt.TCPSYNChallenge = value procNetstat.TcpExt.TCPSYNChallenge = &value
case "TCPFastOpenActive": case "TCPFastOpenActive":
procNetstat.TcpExt.TCPFastOpenActive = value procNetstat.TcpExt.TCPFastOpenActive = &value
case "TCPFastOpenActiveFail": case "TCPFastOpenActiveFail":
procNetstat.TcpExt.TCPFastOpenActiveFail = value procNetstat.TcpExt.TCPFastOpenActiveFail = &value
case "TCPFastOpenPassive": case "TCPFastOpenPassive":
procNetstat.TcpExt.TCPFastOpenPassive = value procNetstat.TcpExt.TCPFastOpenPassive = &value
case "TCPFastOpenPassiveFail": case "TCPFastOpenPassiveFail":
procNetstat.TcpExt.TCPFastOpenPassiveFail = value procNetstat.TcpExt.TCPFastOpenPassiveFail = &value
case "TCPFastOpenListenOverflow": case "TCPFastOpenListenOverflow":
procNetstat.TcpExt.TCPFastOpenListenOverflow = value procNetstat.TcpExt.TCPFastOpenListenOverflow = &value
case "TCPFastOpenCookieReqd": case "TCPFastOpenCookieReqd":
procNetstat.TcpExt.TCPFastOpenCookieReqd = value procNetstat.TcpExt.TCPFastOpenCookieReqd = &value
case "TCPFastOpenBlackhole": case "TCPFastOpenBlackhole":
procNetstat.TcpExt.TCPFastOpenBlackhole = value procNetstat.TcpExt.TCPFastOpenBlackhole = &value
case "TCPSpuriousRtxHostQueues": case "TCPSpuriousRtxHostQueues":
procNetstat.TcpExt.TCPSpuriousRtxHostQueues = value procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value
case "BusyPollRxPackets": case "BusyPollRxPackets":
procNetstat.TcpExt.BusyPollRxPackets = value procNetstat.TcpExt.BusyPollRxPackets = &value
case "TCPAutoCorking": case "TCPAutoCorking":
procNetstat.TcpExt.TCPAutoCorking = value procNetstat.TcpExt.TCPAutoCorking = &value
case "TCPFromZeroWindowAdv": case "TCPFromZeroWindowAdv":
procNetstat.TcpExt.TCPFromZeroWindowAdv = value procNetstat.TcpExt.TCPFromZeroWindowAdv = &value
case "TCPToZeroWindowAdv": case "TCPToZeroWindowAdv":
procNetstat.TcpExt.TCPToZeroWindowAdv = value procNetstat.TcpExt.TCPToZeroWindowAdv = &value
case "TCPWantZeroWindowAdv": case "TCPWantZeroWindowAdv":
procNetstat.TcpExt.TCPWantZeroWindowAdv = value procNetstat.TcpExt.TCPWantZeroWindowAdv = &value
case "TCPSynRetrans": case "TCPSynRetrans":
procNetstat.TcpExt.TCPSynRetrans = value procNetstat.TcpExt.TCPSynRetrans = &value
case "TCPOrigDataSent": case "TCPOrigDataSent":
procNetstat.TcpExt.TCPOrigDataSent = value procNetstat.TcpExt.TCPOrigDataSent = &value
case "TCPHystartTrainDetect": case "TCPHystartTrainDetect":
procNetstat.TcpExt.TCPHystartTrainDetect = value procNetstat.TcpExt.TCPHystartTrainDetect = &value
case "TCPHystartTrainCwnd": case "TCPHystartTrainCwnd":
procNetstat.TcpExt.TCPHystartTrainCwnd = value procNetstat.TcpExt.TCPHystartTrainCwnd = &value
case "TCPHystartDelayDetect": case "TCPHystartDelayDetect":
procNetstat.TcpExt.TCPHystartDelayDetect = value procNetstat.TcpExt.TCPHystartDelayDetect = &value
case "TCPHystartDelayCwnd": case "TCPHystartDelayCwnd":
procNetstat.TcpExt.TCPHystartDelayCwnd = value procNetstat.TcpExt.TCPHystartDelayCwnd = &value
case "TCPACKSkippedSynRecv": case "TCPACKSkippedSynRecv":
procNetstat.TcpExt.TCPACKSkippedSynRecv = value procNetstat.TcpExt.TCPACKSkippedSynRecv = &value
case "TCPACKSkippedPAWS": case "TCPACKSkippedPAWS":
procNetstat.TcpExt.TCPACKSkippedPAWS = value procNetstat.TcpExt.TCPACKSkippedPAWS = &value
case "TCPACKSkippedSeq": case "TCPACKSkippedSeq":
procNetstat.TcpExt.TCPACKSkippedSeq = value procNetstat.TcpExt.TCPACKSkippedSeq = &value
case "TCPACKSkippedFinWait2": case "TCPACKSkippedFinWait2":
procNetstat.TcpExt.TCPACKSkippedFinWait2 = value procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value
case "TCPACKSkippedTimeWait": case "TCPACKSkippedTimeWait":
procNetstat.TcpExt.TCPACKSkippedTimeWait = value procNetstat.TcpExt.TCPACKSkippedTimeWait = &value
case "TCPACKSkippedChallenge": case "TCPACKSkippedChallenge":
procNetstat.TcpExt.TCPACKSkippedChallenge = value procNetstat.TcpExt.TCPACKSkippedChallenge = &value
case "TCPWinProbe": case "TCPWinProbe":
procNetstat.TcpExt.TCPWinProbe = value procNetstat.TcpExt.TCPWinProbe = &value
case "TCPKeepAlive": case "TCPKeepAlive":
procNetstat.TcpExt.TCPKeepAlive = value procNetstat.TcpExt.TCPKeepAlive = &value
case "TCPMTUPFail": case "TCPMTUPFail":
procNetstat.TcpExt.TCPMTUPFail = value procNetstat.TcpExt.TCPMTUPFail = &value
case "TCPMTUPSuccess": case "TCPMTUPSuccess":
procNetstat.TcpExt.TCPMTUPSuccess = value procNetstat.TcpExt.TCPMTUPSuccess = &value
case "TCPWqueueTooBig": case "TCPWqueueTooBig":
procNetstat.TcpExt.TCPWqueueTooBig = value procNetstat.TcpExt.TCPWqueueTooBig = &value
} }
case "IpExt": case "IpExt":
switch key { switch key {
case "InNoRoutes": case "InNoRoutes":
procNetstat.IpExt.InNoRoutes = value procNetstat.IpExt.InNoRoutes = &value
case "InTruncatedPkts": case "InTruncatedPkts":
procNetstat.IpExt.InTruncatedPkts = value procNetstat.IpExt.InTruncatedPkts = &value
case "InMcastPkts": case "InMcastPkts":
procNetstat.IpExt.InMcastPkts = value procNetstat.IpExt.InMcastPkts = &value
case "OutMcastPkts": case "OutMcastPkts":
procNetstat.IpExt.OutMcastPkts = value procNetstat.IpExt.OutMcastPkts = &value
case "InBcastPkts": case "InBcastPkts":
procNetstat.IpExt.InBcastPkts = value procNetstat.IpExt.InBcastPkts = &value
case "OutBcastPkts": case "OutBcastPkts":
procNetstat.IpExt.OutBcastPkts = value procNetstat.IpExt.OutBcastPkts = &value
case "InOctets": case "InOctets":
procNetstat.IpExt.InOctets = value procNetstat.IpExt.InOctets = &value
case "OutOctets": case "OutOctets":
procNetstat.IpExt.OutOctets = value procNetstat.IpExt.OutOctets = &value
case "InMcastOctets": case "InMcastOctets":
procNetstat.IpExt.InMcastOctets = value procNetstat.IpExt.InMcastOctets = &value
case "OutMcastOctets": case "OutMcastOctets":
procNetstat.IpExt.OutMcastOctets = value procNetstat.IpExt.OutMcastOctets = &value
case "InBcastOctets": case "InBcastOctets":
procNetstat.IpExt.InBcastOctets = value procNetstat.IpExt.InBcastOctets = &value
case "OutBcastOctets": case "OutBcastOctets":
procNetstat.IpExt.OutBcastOctets = value procNetstat.IpExt.OutBcastOctets = &value
case "InCsumErrors": case "InCsumErrors":
procNetstat.IpExt.InCsumErrors = value procNetstat.IpExt.InCsumErrors = &value
case "InNoECTPkts": case "InNoECTPkts":
procNetstat.IpExt.InNoECTPkts = value procNetstat.IpExt.InNoECTPkts = &value
case "InECT1Pkts": case "InECT1Pkts":
procNetstat.IpExt.InECT1Pkts = value procNetstat.IpExt.InECT1Pkts = &value
case "InECT0Pkts": case "InECT0Pkts":
procNetstat.IpExt.InECT0Pkts = value procNetstat.IpExt.InECT0Pkts = &value
case "InCEPkts": case "InCEPkts":
procNetstat.IpExt.InCEPkts = value procNetstat.IpExt.InCEPkts = &value
case "ReasmOverlaps": case "ReasmOverlaps":
procNetstat.IpExt.ReasmOverlaps = value procNetstat.IpExt.ReasmOverlaps = &value
} }
} }
} }

View File

@ -37,100 +37,100 @@ type ProcSnmp struct {
} }
type Ip struct { // nolint:revive type Ip struct { // nolint:revive
Forwarding float64 Forwarding *float64
DefaultTTL float64 DefaultTTL *float64
InReceives float64 InReceives *float64
InHdrErrors float64 InHdrErrors *float64
InAddrErrors float64 InAddrErrors *float64
ForwDatagrams float64 ForwDatagrams *float64
InUnknownProtos float64 InUnknownProtos *float64
InDiscards float64 InDiscards *float64
InDelivers float64 InDelivers *float64
OutRequests float64 OutRequests *float64
OutDiscards float64 OutDiscards *float64
OutNoRoutes float64 OutNoRoutes *float64
ReasmTimeout float64 ReasmTimeout *float64
ReasmReqds float64 ReasmReqds *float64
ReasmOKs float64 ReasmOKs *float64
ReasmFails float64 ReasmFails *float64
FragOKs float64 FragOKs *float64
FragFails float64 FragFails *float64
FragCreates float64 FragCreates *float64
} }
type Icmp struct { type Icmp struct { // nolint:revive
InMsgs float64 InMsgs *float64
InErrors float64 InErrors *float64
InCsumErrors float64 InCsumErrors *float64
InDestUnreachs float64 InDestUnreachs *float64
InTimeExcds float64 InTimeExcds *float64
InParmProbs float64 InParmProbs *float64
InSrcQuenchs float64 InSrcQuenchs *float64
InRedirects float64 InRedirects *float64
InEchos float64 InEchos *float64
InEchoReps float64 InEchoReps *float64
InTimestamps float64 InTimestamps *float64
InTimestampReps float64 InTimestampReps *float64
InAddrMasks float64 InAddrMasks *float64
InAddrMaskReps float64 InAddrMaskReps *float64
OutMsgs float64 OutMsgs *float64
OutErrors float64 OutErrors *float64
OutDestUnreachs float64 OutDestUnreachs *float64
OutTimeExcds float64 OutTimeExcds *float64
OutParmProbs float64 OutParmProbs *float64
OutSrcQuenchs float64 OutSrcQuenchs *float64
OutRedirects float64 OutRedirects *float64
OutEchos float64 OutEchos *float64
OutEchoReps float64 OutEchoReps *float64
OutTimestamps float64 OutTimestamps *float64
OutTimestampReps float64 OutTimestampReps *float64
OutAddrMasks float64 OutAddrMasks *float64
OutAddrMaskReps float64 OutAddrMaskReps *float64
} }
type IcmpMsg struct { type IcmpMsg struct {
InType3 float64 InType3 *float64
OutType3 float64 OutType3 *float64
} }
type Tcp struct { // nolint:revive type Tcp struct { // nolint:revive
RtoAlgorithm float64 RtoAlgorithm *float64
RtoMin float64 RtoMin *float64
RtoMax float64 RtoMax *float64
MaxConn float64 MaxConn *float64
ActiveOpens float64 ActiveOpens *float64
PassiveOpens float64 PassiveOpens *float64
AttemptFails float64 AttemptFails *float64
EstabResets float64 EstabResets *float64
CurrEstab float64 CurrEstab *float64
InSegs float64 InSegs *float64
OutSegs float64 OutSegs *float64
RetransSegs float64 RetransSegs *float64
InErrs float64 InErrs *float64
OutRsts float64 OutRsts *float64
InCsumErrors float64 InCsumErrors *float64
} }
type Udp struct { // nolint:revive type Udp struct { // nolint:revive
InDatagrams float64 InDatagrams *float64
NoPorts float64 NoPorts *float64
InErrors float64 InErrors *float64
OutDatagrams float64 OutDatagrams *float64
RcvbufErrors float64 RcvbufErrors *float64
SndbufErrors float64 SndbufErrors *float64
InCsumErrors float64 InCsumErrors *float64
IgnoredMulti float64 IgnoredMulti *float64
} }
type UdpLite struct { // nolint:revive type UdpLite struct { // nolint:revive
InDatagrams float64 InDatagrams *float64
NoPorts float64 NoPorts *float64
InErrors float64 InErrors *float64
OutDatagrams float64 OutDatagrams *float64
RcvbufErrors float64 RcvbufErrors *float64
SndbufErrors float64 SndbufErrors *float64
InCsumErrors float64 InCsumErrors *float64
IgnoredMulti float64 IgnoredMulti *float64
} }
func (p Proc) Snmp() (ProcSnmp, error) { func (p Proc) Snmp() (ProcSnmp, error) {
@ -173,178 +173,178 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
case "Ip": case "Ip":
switch key { switch key {
case "Forwarding": case "Forwarding":
procSnmp.Ip.Forwarding = value procSnmp.Ip.Forwarding = &value
case "DefaultTTL": case "DefaultTTL":
procSnmp.Ip.DefaultTTL = value procSnmp.Ip.DefaultTTL = &value
case "InReceives": case "InReceives":
procSnmp.Ip.InReceives = value procSnmp.Ip.InReceives = &value
case "InHdrErrors": case "InHdrErrors":
procSnmp.Ip.InHdrErrors = value procSnmp.Ip.InHdrErrors = &value
case "InAddrErrors": case "InAddrErrors":
procSnmp.Ip.InAddrErrors = value procSnmp.Ip.InAddrErrors = &value
case "ForwDatagrams": case "ForwDatagrams":
procSnmp.Ip.ForwDatagrams = value procSnmp.Ip.ForwDatagrams = &value
case "InUnknownProtos": case "InUnknownProtos":
procSnmp.Ip.InUnknownProtos = value procSnmp.Ip.InUnknownProtos = &value
case "InDiscards": case "InDiscards":
procSnmp.Ip.InDiscards = value procSnmp.Ip.InDiscards = &value
case "InDelivers": case "InDelivers":
procSnmp.Ip.InDelivers = value procSnmp.Ip.InDelivers = &value
case "OutRequests": case "OutRequests":
procSnmp.Ip.OutRequests = value procSnmp.Ip.OutRequests = &value
case "OutDiscards": case "OutDiscards":
procSnmp.Ip.OutDiscards = value procSnmp.Ip.OutDiscards = &value
case "OutNoRoutes": case "OutNoRoutes":
procSnmp.Ip.OutNoRoutes = value procSnmp.Ip.OutNoRoutes = &value
case "ReasmTimeout": case "ReasmTimeout":
procSnmp.Ip.ReasmTimeout = value procSnmp.Ip.ReasmTimeout = &value
case "ReasmReqds": case "ReasmReqds":
procSnmp.Ip.ReasmReqds = value procSnmp.Ip.ReasmReqds = &value
case "ReasmOKs": case "ReasmOKs":
procSnmp.Ip.ReasmOKs = value procSnmp.Ip.ReasmOKs = &value
case "ReasmFails": case "ReasmFails":
procSnmp.Ip.ReasmFails = value procSnmp.Ip.ReasmFails = &value
case "FragOKs": case "FragOKs":
procSnmp.Ip.FragOKs = value procSnmp.Ip.FragOKs = &value
case "FragFails": case "FragFails":
procSnmp.Ip.FragFails = value procSnmp.Ip.FragFails = &value
case "FragCreates": case "FragCreates":
procSnmp.Ip.FragCreates = value procSnmp.Ip.FragCreates = &value
} }
case "Icmp": case "Icmp":
switch key { switch key {
case "InMsgs": case "InMsgs":
procSnmp.Icmp.InMsgs = value procSnmp.Icmp.InMsgs = &value
case "InErrors": case "InErrors":
procSnmp.Icmp.InErrors = value procSnmp.Icmp.InErrors = &value
case "InCsumErrors": case "InCsumErrors":
procSnmp.Icmp.InCsumErrors = value procSnmp.Icmp.InCsumErrors = &value
case "InDestUnreachs": case "InDestUnreachs":
procSnmp.Icmp.InDestUnreachs = value procSnmp.Icmp.InDestUnreachs = &value
case "InTimeExcds": case "InTimeExcds":
procSnmp.Icmp.InTimeExcds = value procSnmp.Icmp.InTimeExcds = &value
case "InParmProbs": case "InParmProbs":
procSnmp.Icmp.InParmProbs = value procSnmp.Icmp.InParmProbs = &value
case "InSrcQuenchs": case "InSrcQuenchs":
procSnmp.Icmp.InSrcQuenchs = value procSnmp.Icmp.InSrcQuenchs = &value
case "InRedirects": case "InRedirects":
procSnmp.Icmp.InRedirects = value procSnmp.Icmp.InRedirects = &value
case "InEchos": case "InEchos":
procSnmp.Icmp.InEchos = value procSnmp.Icmp.InEchos = &value
case "InEchoReps": case "InEchoReps":
procSnmp.Icmp.InEchoReps = value procSnmp.Icmp.InEchoReps = &value
case "InTimestamps": case "InTimestamps":
procSnmp.Icmp.InTimestamps = value procSnmp.Icmp.InTimestamps = &value
case "InTimestampReps": case "InTimestampReps":
procSnmp.Icmp.InTimestampReps = value procSnmp.Icmp.InTimestampReps = &value
case "InAddrMasks": case "InAddrMasks":
procSnmp.Icmp.InAddrMasks = value procSnmp.Icmp.InAddrMasks = &value
case "InAddrMaskReps": case "InAddrMaskReps":
procSnmp.Icmp.InAddrMaskReps = value procSnmp.Icmp.InAddrMaskReps = &value
case "OutMsgs": case "OutMsgs":
procSnmp.Icmp.OutMsgs = value procSnmp.Icmp.OutMsgs = &value
case "OutErrors": case "OutErrors":
procSnmp.Icmp.OutErrors = value procSnmp.Icmp.OutErrors = &value
case "OutDestUnreachs": case "OutDestUnreachs":
procSnmp.Icmp.OutDestUnreachs = value procSnmp.Icmp.OutDestUnreachs = &value
case "OutTimeExcds": case "OutTimeExcds":
procSnmp.Icmp.OutTimeExcds = value procSnmp.Icmp.OutTimeExcds = &value
case "OutParmProbs": case "OutParmProbs":
procSnmp.Icmp.OutParmProbs = value procSnmp.Icmp.OutParmProbs = &value
case "OutSrcQuenchs": case "OutSrcQuenchs":
procSnmp.Icmp.OutSrcQuenchs = value procSnmp.Icmp.OutSrcQuenchs = &value
case "OutRedirects": case "OutRedirects":
procSnmp.Icmp.OutRedirects = value procSnmp.Icmp.OutRedirects = &value
case "OutEchos": case "OutEchos":
procSnmp.Icmp.OutEchos = value procSnmp.Icmp.OutEchos = &value
case "OutEchoReps": case "OutEchoReps":
procSnmp.Icmp.OutEchoReps = value procSnmp.Icmp.OutEchoReps = &value
case "OutTimestamps": case "OutTimestamps":
procSnmp.Icmp.OutTimestamps = value procSnmp.Icmp.OutTimestamps = &value
case "OutTimestampReps": case "OutTimestampReps":
procSnmp.Icmp.OutTimestampReps = value procSnmp.Icmp.OutTimestampReps = &value
case "OutAddrMasks": case "OutAddrMasks":
procSnmp.Icmp.OutAddrMasks = value procSnmp.Icmp.OutAddrMasks = &value
case "OutAddrMaskReps": case "OutAddrMaskReps":
procSnmp.Icmp.OutAddrMaskReps = value procSnmp.Icmp.OutAddrMaskReps = &value
} }
case "IcmpMsg": case "IcmpMsg":
switch key { switch key {
case "InType3": case "InType3":
procSnmp.IcmpMsg.InType3 = value procSnmp.IcmpMsg.InType3 = &value
case "OutType3": case "OutType3":
procSnmp.IcmpMsg.OutType3 = value procSnmp.IcmpMsg.OutType3 = &value
} }
case "Tcp": case "Tcp":
switch key { switch key {
case "RtoAlgorithm": case "RtoAlgorithm":
procSnmp.Tcp.RtoAlgorithm = value procSnmp.Tcp.RtoAlgorithm = &value
case "RtoMin": case "RtoMin":
procSnmp.Tcp.RtoMin = value procSnmp.Tcp.RtoMin = &value
case "RtoMax": case "RtoMax":
procSnmp.Tcp.RtoMax = value procSnmp.Tcp.RtoMax = &value
case "MaxConn": case "MaxConn":
procSnmp.Tcp.MaxConn = value procSnmp.Tcp.MaxConn = &value
case "ActiveOpens": case "ActiveOpens":
procSnmp.Tcp.ActiveOpens = value procSnmp.Tcp.ActiveOpens = &value
case "PassiveOpens": case "PassiveOpens":
procSnmp.Tcp.PassiveOpens = value procSnmp.Tcp.PassiveOpens = &value
case "AttemptFails": case "AttemptFails":
procSnmp.Tcp.AttemptFails = value procSnmp.Tcp.AttemptFails = &value
case "EstabResets": case "EstabResets":
procSnmp.Tcp.EstabResets = value procSnmp.Tcp.EstabResets = &value
case "CurrEstab": case "CurrEstab":
procSnmp.Tcp.CurrEstab = value procSnmp.Tcp.CurrEstab = &value
case "InSegs": case "InSegs":
procSnmp.Tcp.InSegs = value procSnmp.Tcp.InSegs = &value
case "OutSegs": case "OutSegs":
procSnmp.Tcp.OutSegs = value procSnmp.Tcp.OutSegs = &value
case "RetransSegs": case "RetransSegs":
procSnmp.Tcp.RetransSegs = value procSnmp.Tcp.RetransSegs = &value
case "InErrs": case "InErrs":
procSnmp.Tcp.InErrs = value procSnmp.Tcp.InErrs = &value
case "OutRsts": case "OutRsts":
procSnmp.Tcp.OutRsts = value procSnmp.Tcp.OutRsts = &value
case "InCsumErrors": case "InCsumErrors":
procSnmp.Tcp.InCsumErrors = value procSnmp.Tcp.InCsumErrors = &value
} }
case "Udp": case "Udp":
switch key { switch key {
case "InDatagrams": case "InDatagrams":
procSnmp.Udp.InDatagrams = value procSnmp.Udp.InDatagrams = &value
case "NoPorts": case "NoPorts":
procSnmp.Udp.NoPorts = value procSnmp.Udp.NoPorts = &value
case "InErrors": case "InErrors":
procSnmp.Udp.InErrors = value procSnmp.Udp.InErrors = &value
case "OutDatagrams": case "OutDatagrams":
procSnmp.Udp.OutDatagrams = value procSnmp.Udp.OutDatagrams = &value
case "RcvbufErrors": case "RcvbufErrors":
procSnmp.Udp.RcvbufErrors = value procSnmp.Udp.RcvbufErrors = &value
case "SndbufErrors": case "SndbufErrors":
procSnmp.Udp.SndbufErrors = value procSnmp.Udp.SndbufErrors = &value
case "InCsumErrors": case "InCsumErrors":
procSnmp.Udp.InCsumErrors = value procSnmp.Udp.InCsumErrors = &value
case "IgnoredMulti": case "IgnoredMulti":
procSnmp.Udp.IgnoredMulti = value procSnmp.Udp.IgnoredMulti = &value
} }
case "UdpLite": case "UdpLite":
switch key { switch key {
case "InDatagrams": case "InDatagrams":
procSnmp.UdpLite.InDatagrams = value procSnmp.UdpLite.InDatagrams = &value
case "NoPorts": case "NoPorts":
procSnmp.UdpLite.NoPorts = value procSnmp.UdpLite.NoPorts = &value
case "InErrors": case "InErrors":
procSnmp.UdpLite.InErrors = value procSnmp.UdpLite.InErrors = &value
case "OutDatagrams": case "OutDatagrams":
procSnmp.UdpLite.OutDatagrams = value procSnmp.UdpLite.OutDatagrams = &value
case "RcvbufErrors": case "RcvbufErrors":
procSnmp.UdpLite.RcvbufErrors = value procSnmp.UdpLite.RcvbufErrors = &value
case "SndbufErrors": case "SndbufErrors":
procSnmp.UdpLite.SndbufErrors = value procSnmp.UdpLite.SndbufErrors = &value
case "InCsumErrors": case "InCsumErrors":
procSnmp.UdpLite.InCsumErrors = value procSnmp.UdpLite.InCsumErrors = &value
case "IgnoredMulti": case "IgnoredMulti":
procSnmp.UdpLite.IgnoredMulti = value procSnmp.UdpLite.IgnoredMulti = &value
} }
} }
} }

View File

@ -36,106 +36,106 @@ type ProcSnmp6 struct {
} }
type Ip6 struct { // nolint:revive type Ip6 struct { // nolint:revive
InReceives float64 InReceives *float64
InHdrErrors float64 InHdrErrors *float64
InTooBigErrors float64 InTooBigErrors *float64
InNoRoutes float64 InNoRoutes *float64
InAddrErrors float64 InAddrErrors *float64
InUnknownProtos float64 InUnknownProtos *float64
InTruncatedPkts float64 InTruncatedPkts *float64
InDiscards float64 InDiscards *float64
InDelivers float64 InDelivers *float64
OutForwDatagrams float64 OutForwDatagrams *float64
OutRequests float64 OutRequests *float64
OutDiscards float64 OutDiscards *float64
OutNoRoutes float64 OutNoRoutes *float64
ReasmTimeout float64 ReasmTimeout *float64
ReasmReqds float64 ReasmReqds *float64
ReasmOKs float64 ReasmOKs *float64
ReasmFails float64 ReasmFails *float64
FragOKs float64 FragOKs *float64
FragFails float64 FragFails *float64
FragCreates float64 FragCreates *float64
InMcastPkts float64 InMcastPkts *float64
OutMcastPkts float64 OutMcastPkts *float64
InOctets float64 InOctets *float64
OutOctets float64 OutOctets *float64
InMcastOctets float64 InMcastOctets *float64
OutMcastOctets float64 OutMcastOctets *float64
InBcastOctets float64 InBcastOctets *float64
OutBcastOctets float64 OutBcastOctets *float64
InNoECTPkts float64 InNoECTPkts *float64
InECT1Pkts float64 InECT1Pkts *float64
InECT0Pkts float64 InECT0Pkts *float64
InCEPkts float64 InCEPkts *float64
} }
type Icmp6 struct { type Icmp6 struct {
InMsgs float64 InMsgs *float64
InErrors float64 InErrors *float64
OutMsgs float64 OutMsgs *float64
OutErrors float64 OutErrors *float64
InCsumErrors float64 InCsumErrors *float64
InDestUnreachs float64 InDestUnreachs *float64
InPktTooBigs float64 InPktTooBigs *float64
InTimeExcds float64 InTimeExcds *float64
InParmProblems float64 InParmProblems *float64
InEchos float64 InEchos *float64
InEchoReplies float64 InEchoReplies *float64
InGroupMembQueries float64 InGroupMembQueries *float64
InGroupMembResponses float64 InGroupMembResponses *float64
InGroupMembReductions float64 InGroupMembReductions *float64
InRouterSolicits float64 InRouterSolicits *float64
InRouterAdvertisements float64 InRouterAdvertisements *float64
InNeighborSolicits float64 InNeighborSolicits *float64
InNeighborAdvertisements float64 InNeighborAdvertisements *float64
InRedirects float64 InRedirects *float64
InMLDv2Reports float64 InMLDv2Reports *float64
OutDestUnreachs float64 OutDestUnreachs *float64
OutPktTooBigs float64 OutPktTooBigs *float64
OutTimeExcds float64 OutTimeExcds *float64
OutParmProblems float64 OutParmProblems *float64
OutEchos float64 OutEchos *float64
OutEchoReplies float64 OutEchoReplies *float64
OutGroupMembQueries float64 OutGroupMembQueries *float64
OutGroupMembResponses float64 OutGroupMembResponses *float64
OutGroupMembReductions float64 OutGroupMembReductions *float64
OutRouterSolicits float64 OutRouterSolicits *float64
OutRouterAdvertisements float64 OutRouterAdvertisements *float64
OutNeighborSolicits float64 OutNeighborSolicits *float64
OutNeighborAdvertisements float64 OutNeighborAdvertisements *float64
OutRedirects float64 OutRedirects *float64
OutMLDv2Reports float64 OutMLDv2Reports *float64
InType1 float64 InType1 *float64
InType134 float64 InType134 *float64
InType135 float64 InType135 *float64
InType136 float64 InType136 *float64
InType143 float64 InType143 *float64
OutType133 float64 OutType133 *float64
OutType135 float64 OutType135 *float64
OutType136 float64 OutType136 *float64
OutType143 float64 OutType143 *float64
} }
type Udp6 struct { // nolint:revive type Udp6 struct { // nolint:revive
InDatagrams float64 InDatagrams *float64
NoPorts float64 NoPorts *float64
InErrors float64 InErrors *float64
OutDatagrams float64 OutDatagrams *float64
RcvbufErrors float64 RcvbufErrors *float64
SndbufErrors float64 SndbufErrors *float64
InCsumErrors float64 InCsumErrors *float64
IgnoredMulti float64 IgnoredMulti *float64
} }
type UdpLite6 struct { // nolint:revive type UdpLite6 struct { // nolint:revive
InDatagrams float64 InDatagrams *float64
NoPorts float64 NoPorts *float64
InErrors float64 InErrors *float64
OutDatagrams float64 OutDatagrams *float64
RcvbufErrors float64 RcvbufErrors *float64
SndbufErrors float64 SndbufErrors *float64
InCsumErrors float64 InCsumErrors *float64
} }
func (p Proc) Snmp6() (ProcSnmp6, error) { func (p Proc) Snmp6() (ProcSnmp6, error) {
@ -182,197 +182,197 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
case "Ip6": case "Ip6":
switch key { switch key {
case "InReceives": case "InReceives":
procSnmp6.Ip6.InReceives = value procSnmp6.Ip6.InReceives = &value
case "InHdrErrors": case "InHdrErrors":
procSnmp6.Ip6.InHdrErrors = value procSnmp6.Ip6.InHdrErrors = &value
case "InTooBigErrors": case "InTooBigErrors":
procSnmp6.Ip6.InTooBigErrors = value procSnmp6.Ip6.InTooBigErrors = &value
case "InNoRoutes": case "InNoRoutes":
procSnmp6.Ip6.InNoRoutes = value procSnmp6.Ip6.InNoRoutes = &value
case "InAddrErrors": case "InAddrErrors":
procSnmp6.Ip6.InAddrErrors = value procSnmp6.Ip6.InAddrErrors = &value
case "InUnknownProtos": case "InUnknownProtos":
procSnmp6.Ip6.InUnknownProtos = value procSnmp6.Ip6.InUnknownProtos = &value
case "InTruncatedPkts": case "InTruncatedPkts":
procSnmp6.Ip6.InTruncatedPkts = value procSnmp6.Ip6.InTruncatedPkts = &value
case "InDiscards": case "InDiscards":
procSnmp6.Ip6.InDiscards = value procSnmp6.Ip6.InDiscards = &value
case "InDelivers": case "InDelivers":
procSnmp6.Ip6.InDelivers = value procSnmp6.Ip6.InDelivers = &value
case "OutForwDatagrams": case "OutForwDatagrams":
procSnmp6.Ip6.OutForwDatagrams = value procSnmp6.Ip6.OutForwDatagrams = &value
case "OutRequests": case "OutRequests":
procSnmp6.Ip6.OutRequests = value procSnmp6.Ip6.OutRequests = &value
case "OutDiscards": case "OutDiscards":
procSnmp6.Ip6.OutDiscards = value procSnmp6.Ip6.OutDiscards = &value
case "OutNoRoutes": case "OutNoRoutes":
procSnmp6.Ip6.OutNoRoutes = value procSnmp6.Ip6.OutNoRoutes = &value
case "ReasmTimeout": case "ReasmTimeout":
procSnmp6.Ip6.ReasmTimeout = value procSnmp6.Ip6.ReasmTimeout = &value
case "ReasmReqds": case "ReasmReqds":
procSnmp6.Ip6.ReasmReqds = value procSnmp6.Ip6.ReasmReqds = &value
case "ReasmOKs": case "ReasmOKs":
procSnmp6.Ip6.ReasmOKs = value procSnmp6.Ip6.ReasmOKs = &value
case "ReasmFails": case "ReasmFails":
procSnmp6.Ip6.ReasmFails = value procSnmp6.Ip6.ReasmFails = &value
case "FragOKs": case "FragOKs":
procSnmp6.Ip6.FragOKs = value procSnmp6.Ip6.FragOKs = &value
case "FragFails": case "FragFails":
procSnmp6.Ip6.FragFails = value procSnmp6.Ip6.FragFails = &value
case "FragCreates": case "FragCreates":
procSnmp6.Ip6.FragCreates = value procSnmp6.Ip6.FragCreates = &value
case "InMcastPkts": case "InMcastPkts":
procSnmp6.Ip6.InMcastPkts = value procSnmp6.Ip6.InMcastPkts = &value
case "OutMcastPkts": case "OutMcastPkts":
procSnmp6.Ip6.OutMcastPkts = value procSnmp6.Ip6.OutMcastPkts = &value
case "InOctets": case "InOctets":
procSnmp6.Ip6.InOctets = value procSnmp6.Ip6.InOctets = &value
case "OutOctets": case "OutOctets":
procSnmp6.Ip6.OutOctets = value procSnmp6.Ip6.OutOctets = &value
case "InMcastOctets": case "InMcastOctets":
procSnmp6.Ip6.InMcastOctets = value procSnmp6.Ip6.InMcastOctets = &value
case "OutMcastOctets": case "OutMcastOctets":
procSnmp6.Ip6.OutMcastOctets = value procSnmp6.Ip6.OutMcastOctets = &value
case "InBcastOctets": case "InBcastOctets":
procSnmp6.Ip6.InBcastOctets = value procSnmp6.Ip6.InBcastOctets = &value
case "OutBcastOctets": case "OutBcastOctets":
procSnmp6.Ip6.OutBcastOctets = value procSnmp6.Ip6.OutBcastOctets = &value
case "InNoECTPkts": case "InNoECTPkts":
procSnmp6.Ip6.InNoECTPkts = value procSnmp6.Ip6.InNoECTPkts = &value
case "InECT1Pkts": case "InECT1Pkts":
procSnmp6.Ip6.InECT1Pkts = value procSnmp6.Ip6.InECT1Pkts = &value
case "InECT0Pkts": case "InECT0Pkts":
procSnmp6.Ip6.InECT0Pkts = value procSnmp6.Ip6.InECT0Pkts = &value
case "InCEPkts": case "InCEPkts":
procSnmp6.Ip6.InCEPkts = value procSnmp6.Ip6.InCEPkts = &value
} }
case "Icmp6": case "Icmp6":
switch key { switch key {
case "InMsgs": case "InMsgs":
procSnmp6.Icmp6.InMsgs = value procSnmp6.Icmp6.InMsgs = &value
case "InErrors": case "InErrors":
procSnmp6.Icmp6.InErrors = value procSnmp6.Icmp6.InErrors = &value
case "OutMsgs": case "OutMsgs":
procSnmp6.Icmp6.OutMsgs = value procSnmp6.Icmp6.OutMsgs = &value
case "OutErrors": case "OutErrors":
procSnmp6.Icmp6.OutErrors = value procSnmp6.Icmp6.OutErrors = &value
case "InCsumErrors": case "InCsumErrors":
procSnmp6.Icmp6.InCsumErrors = value procSnmp6.Icmp6.InCsumErrors = &value
case "InDestUnreachs": case "InDestUnreachs":
procSnmp6.Icmp6.InDestUnreachs = value procSnmp6.Icmp6.InDestUnreachs = &value
case "InPktTooBigs": case "InPktTooBigs":
procSnmp6.Icmp6.InPktTooBigs = value procSnmp6.Icmp6.InPktTooBigs = &value
case "InTimeExcds": case "InTimeExcds":
procSnmp6.Icmp6.InTimeExcds = value procSnmp6.Icmp6.InTimeExcds = &value
case "InParmProblems": case "InParmProblems":
procSnmp6.Icmp6.InParmProblems = value procSnmp6.Icmp6.InParmProblems = &value
case "InEchos": case "InEchos":
procSnmp6.Icmp6.InEchos = value procSnmp6.Icmp6.InEchos = &value
case "InEchoReplies": case "InEchoReplies":
procSnmp6.Icmp6.InEchoReplies = value procSnmp6.Icmp6.InEchoReplies = &value
case "InGroupMembQueries": case "InGroupMembQueries":
procSnmp6.Icmp6.InGroupMembQueries = value procSnmp6.Icmp6.InGroupMembQueries = &value
case "InGroupMembResponses": case "InGroupMembResponses":
procSnmp6.Icmp6.InGroupMembResponses = value procSnmp6.Icmp6.InGroupMembResponses = &value
case "InGroupMembReductions": case "InGroupMembReductions":
procSnmp6.Icmp6.InGroupMembReductions = value procSnmp6.Icmp6.InGroupMembReductions = &value
case "InRouterSolicits": case "InRouterSolicits":
procSnmp6.Icmp6.InRouterSolicits = value procSnmp6.Icmp6.InRouterSolicits = &value
case "InRouterAdvertisements": case "InRouterAdvertisements":
procSnmp6.Icmp6.InRouterAdvertisements = value procSnmp6.Icmp6.InRouterAdvertisements = &value
case "InNeighborSolicits": case "InNeighborSolicits":
procSnmp6.Icmp6.InNeighborSolicits = value procSnmp6.Icmp6.InNeighborSolicits = &value
case "InNeighborAdvertisements": case "InNeighborAdvertisements":
procSnmp6.Icmp6.InNeighborAdvertisements = value procSnmp6.Icmp6.InNeighborAdvertisements = &value
case "InRedirects": case "InRedirects":
procSnmp6.Icmp6.InRedirects = value procSnmp6.Icmp6.InRedirects = &value
case "InMLDv2Reports": case "InMLDv2Reports":
procSnmp6.Icmp6.InMLDv2Reports = value procSnmp6.Icmp6.InMLDv2Reports = &value
case "OutDestUnreachs": case "OutDestUnreachs":
procSnmp6.Icmp6.OutDestUnreachs = value procSnmp6.Icmp6.OutDestUnreachs = &value
case "OutPktTooBigs": case "OutPktTooBigs":
procSnmp6.Icmp6.OutPktTooBigs = value procSnmp6.Icmp6.OutPktTooBigs = &value
case "OutTimeExcds": case "OutTimeExcds":
procSnmp6.Icmp6.OutTimeExcds = value procSnmp6.Icmp6.OutTimeExcds = &value
case "OutParmProblems": case "OutParmProblems":
procSnmp6.Icmp6.OutParmProblems = value procSnmp6.Icmp6.OutParmProblems = &value
case "OutEchos": case "OutEchos":
procSnmp6.Icmp6.OutEchos = value procSnmp6.Icmp6.OutEchos = &value
case "OutEchoReplies": case "OutEchoReplies":
procSnmp6.Icmp6.OutEchoReplies = value procSnmp6.Icmp6.OutEchoReplies = &value
case "OutGroupMembQueries": case "OutGroupMembQueries":
procSnmp6.Icmp6.OutGroupMembQueries = value procSnmp6.Icmp6.OutGroupMembQueries = &value
case "OutGroupMembResponses": case "OutGroupMembResponses":
procSnmp6.Icmp6.OutGroupMembResponses = value procSnmp6.Icmp6.OutGroupMembResponses = &value
case "OutGroupMembReductions": case "OutGroupMembReductions":
procSnmp6.Icmp6.OutGroupMembReductions = value procSnmp6.Icmp6.OutGroupMembReductions = &value
case "OutRouterSolicits": case "OutRouterSolicits":
procSnmp6.Icmp6.OutRouterSolicits = value procSnmp6.Icmp6.OutRouterSolicits = &value
case "OutRouterAdvertisements": case "OutRouterAdvertisements":
procSnmp6.Icmp6.OutRouterAdvertisements = value procSnmp6.Icmp6.OutRouterAdvertisements = &value
case "OutNeighborSolicits": case "OutNeighborSolicits":
procSnmp6.Icmp6.OutNeighborSolicits = value procSnmp6.Icmp6.OutNeighborSolicits = &value
case "OutNeighborAdvertisements": case "OutNeighborAdvertisements":
procSnmp6.Icmp6.OutNeighborAdvertisements = value procSnmp6.Icmp6.OutNeighborAdvertisements = &value
case "OutRedirects": case "OutRedirects":
procSnmp6.Icmp6.OutRedirects = value procSnmp6.Icmp6.OutRedirects = &value
case "OutMLDv2Reports": case "OutMLDv2Reports":
procSnmp6.Icmp6.OutMLDv2Reports = value procSnmp6.Icmp6.OutMLDv2Reports = &value
case "InType1": case "InType1":
procSnmp6.Icmp6.InType1 = value procSnmp6.Icmp6.InType1 = &value
case "InType134": case "InType134":
procSnmp6.Icmp6.InType134 = value procSnmp6.Icmp6.InType134 = &value
case "InType135": case "InType135":
procSnmp6.Icmp6.InType135 = value procSnmp6.Icmp6.InType135 = &value
case "InType136": case "InType136":
procSnmp6.Icmp6.InType136 = value procSnmp6.Icmp6.InType136 = &value
case "InType143": case "InType143":
procSnmp6.Icmp6.InType143 = value procSnmp6.Icmp6.InType143 = &value
case "OutType133": case "OutType133":
procSnmp6.Icmp6.OutType133 = value procSnmp6.Icmp6.OutType133 = &value
case "OutType135": case "OutType135":
procSnmp6.Icmp6.OutType135 = value procSnmp6.Icmp6.OutType135 = &value
case "OutType136": case "OutType136":
procSnmp6.Icmp6.OutType136 = value procSnmp6.Icmp6.OutType136 = &value
case "OutType143": case "OutType143":
procSnmp6.Icmp6.OutType143 = value procSnmp6.Icmp6.OutType143 = &value
} }
case "Udp6": case "Udp6":
switch key { switch key {
case "InDatagrams": case "InDatagrams":
procSnmp6.Udp6.InDatagrams = value procSnmp6.Udp6.InDatagrams = &value
case "NoPorts": case "NoPorts":
procSnmp6.Udp6.NoPorts = value procSnmp6.Udp6.NoPorts = &value
case "InErrors": case "InErrors":
procSnmp6.Udp6.InErrors = value procSnmp6.Udp6.InErrors = &value
case "OutDatagrams": case "OutDatagrams":
procSnmp6.Udp6.OutDatagrams = value procSnmp6.Udp6.OutDatagrams = &value
case "RcvbufErrors": case "RcvbufErrors":
procSnmp6.Udp6.RcvbufErrors = value procSnmp6.Udp6.RcvbufErrors = &value
case "SndbufErrors": case "SndbufErrors":
procSnmp6.Udp6.SndbufErrors = value procSnmp6.Udp6.SndbufErrors = &value
case "InCsumErrors": case "InCsumErrors":
procSnmp6.Udp6.InCsumErrors = value procSnmp6.Udp6.InCsumErrors = &value
case "IgnoredMulti": case "IgnoredMulti":
procSnmp6.Udp6.IgnoredMulti = value procSnmp6.Udp6.IgnoredMulti = &value
} }
case "UdpLite6": case "UdpLite6":
switch key { switch key {
case "InDatagrams": case "InDatagrams":
procSnmp6.UdpLite6.InDatagrams = value procSnmp6.UdpLite6.InDatagrams = &value
case "NoPorts": case "NoPorts":
procSnmp6.UdpLite6.NoPorts = value procSnmp6.UdpLite6.NoPorts = &value
case "InErrors": case "InErrors":
procSnmp6.UdpLite6.InErrors = value procSnmp6.UdpLite6.InErrors = &value
case "OutDatagrams": case "OutDatagrams":
procSnmp6.UdpLite6.OutDatagrams = value procSnmp6.UdpLite6.OutDatagrams = &value
case "RcvbufErrors": case "RcvbufErrors":
procSnmp6.UdpLite6.RcvbufErrors = value procSnmp6.UdpLite6.RcvbufErrors = &value
case "SndbufErrors": case "SndbufErrors":
procSnmp6.UdpLite6.SndbufErrors = value procSnmp6.UdpLite6.SndbufErrors = &value
case "InCsumErrors": case "InCsumErrors":
procSnmp6.UdpLite6.InCsumErrors = value procSnmp6.UdpLite6.InCsumErrors = &value
} }
} }
} }

View File

@ -102,6 +102,8 @@ type ProcStat struct {
RSS int RSS int
// Soft limit in bytes on the rss of the process. // Soft limit in bytes on the rss of the process.
RSSLimit uint64 RSSLimit uint64
// CPU number last executed on.
Processor uint
// Real-time scheduling priority, a number in the range 1 to 99 for processes // Real-time scheduling priority, a number in the range 1 to 99 for processes
// scheduled under a real-time policy, or 0, for non-real-time processes. // scheduled under a real-time policy, or 0, for non-real-time processes.
RTPriority uint RTPriority uint
@ -184,7 +186,7 @@ func (p Proc) Stat() (ProcStat, error) {
&ignoreUint64, &ignoreUint64,
&ignoreUint64, &ignoreUint64,
&ignoreInt64, &ignoreInt64,
&ignoreInt64, &s.Processor,
&s.RTPriority, &s.RTPriority,
&s.Policy, &s.Policy,
&s.DelayAcctBlkIOTicks, &s.DelayAcctBlkIOTicks,

View File

@ -96,10 +96,10 @@ func (p Proc) NewStatus() (ProcStatus, error) {
kv := strings.SplitN(line, ":", 2) kv := strings.SplitN(line, ":", 2)
// removes spaces // removes spaces
k := string(strings.TrimSpace(kv[0])) k := strings.TrimSpace(kv[0])
v := string(strings.TrimSpace(kv[1])) v := strings.TrimSpace(kv[1])
// removes "kB" // removes "kB"
v = string(bytes.Trim([]byte(v), " kB")) v = strings.TrimSuffix(v, " kB")
// value to int when possible // value to int when possible
// we can skip error check here, 'cause vKBytes is not used when value is a string // we can skip error check here, 'cause vKBytes is not used when value is a string

View File

@ -62,7 +62,7 @@ type Stat struct {
// Summed up cpu statistics. // Summed up cpu statistics.
CPUTotal CPUStat CPUTotal CPUStat
// Per-CPU statistics. // Per-CPU statistics.
CPU []CPUStat CPU map[int64]CPUStat
// Number of times interrupts were handled, which contains numbered and unnumbered IRQs. // Number of times interrupts were handled, which contains numbered and unnumbered IRQs.
IRQTotal uint64 IRQTotal uint64
// Number of times a numbered IRQ was triggered. // Number of times a numbered IRQ was triggered.
@ -170,10 +170,23 @@ func (fs FS) Stat() (Stat, error) {
if err != nil { if err != nil {
return Stat{}, err return Stat{}, err
} }
procStat, err := parseStat(bytes.NewReader(data), fileName)
if err != nil {
return Stat{}, err
}
return procStat, nil
}
stat := Stat{} // parseStat parses the metrics from /proc/[pid]/stat.
func parseStat(r io.Reader, fileName string) (Stat, error) {
var (
scanner = bufio.NewScanner(r)
stat = Stat{
CPU: make(map[int64]CPUStat),
}
err error
)
scanner := bufio.NewScanner(bytes.NewReader(data))
for scanner.Scan() { for scanner.Scan() {
line := scanner.Text() line := scanner.Text()
parts := strings.Fields(scanner.Text()) parts := strings.Fields(scanner.Text())
@ -228,9 +241,6 @@ func (fs FS) Stat() (Stat, error) {
if cpuID == -1 { if cpuID == -1 {
stat.CPUTotal = cpuStat stat.CPUTotal = cpuStat
} else { } else {
for int64(len(stat.CPU)) <= cpuID {
stat.CPU = append(stat.CPU, CPUStat{})
}
stat.CPU[cpuID] = cpuStat stat.CPU[cpuID] = cpuStat
} }
} }

79
vendor/github.com/prometheus/procfs/thread.go generated vendored Normal file
View File

@ -0,0 +1,79 @@
// Copyright 2022 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"fmt"
"os"
"strconv"
fsi "github.com/prometheus/procfs/internal/fs"
)
// Provide access to /proc/PID/task/TID files, for thread specific values. Since
// such files have the same structure as /proc/PID/ ones, the data structures
// and the parsers for the latter may be reused.
// AllThreads returns a list of all currently available threads under /proc/PID.
func AllThreads(pid int) (Procs, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return Procs{}, err
}
return fs.AllThreads(pid)
}
// AllThreads returns a list of all currently available threads for PID.
func (fs FS) AllThreads(pid int) (Procs, error) {
taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
d, err := os.Open(taskPath)
if err != nil {
return Procs{}, err
}
defer d.Close()
names, err := d.Readdirnames(-1)
if err != nil {
return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err)
}
t := Procs{}
for _, n := range names {
tid, err := strconv.ParseInt(n, 10, 64)
if err != nil {
continue
}
t = append(t, Proc{PID: int(tid), fs: fsi.FS(taskPath)})
}
return t, nil
}
// Thread returns a process for a given PID, TID.
func (fs FS) Thread(pid, tid int) (Proc, error) {
taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
if _, err := os.Stat(taskPath); err != nil {
return Proc{}, err
}
return Proc{PID: tid, fs: fsi.FS(taskPath)}, nil
}
// Thread returns a process for a given TID of Proc.
func (proc Proc) Thread(tid int) (Proc, error) {
tfs := fsi.FS(proc.path("task"))
if _, err := os.Stat(tfs.Path(strconv.Itoa(tid))); err != nil {
return Proc{}, err
}
return Proc{PID: tid, fs: tfs}, nil
}

View File

@ -26,7 +26,9 @@ import (
) )
// The VM interface is described at // The VM interface is described at
// https://www.kernel.org/doc/Documentation/sysctl/vm.txt //
// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
//
// Each setting is exposed as a single file. // Each setting is exposed as a single file.
// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array // Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array
// and numa_zonelist_order (deprecated) which is a string. // and numa_zonelist_order (deprecated) which is a string.

View File

@ -1,9 +1,10 @@
package persistence package persistence
import ( import (
"crypto/sha256"
"encoding/binary" "encoding/binary"
"errors" "errors"
"github.com/waku-org/go-waku/waku/v2/utils"
) )
const ( const (
@ -34,7 +35,7 @@ func (k *DBKey) Bytes() []byte {
// NewDBKey creates a new DBKey with the given values. // NewDBKey creates a new DBKey with the given values.
func NewDBKey(senderTimestamp uint64, receiverTimestamp uint64, pubsubTopic string, digest []byte) *DBKey { func NewDBKey(senderTimestamp uint64, receiverTimestamp uint64, pubsubTopic string, digest []byte) *DBKey {
pubSubHash := sha256.Sum256([]byte(pubsubTopic)) pubSubHash := utils.SHA256([]byte(pubsubTopic))
var k DBKey var k DBKey
k.raw = make([]byte, DBKeyLength) k.raw = make([]byte, DBKeyLength)

View File

@ -9,6 +9,7 @@ import (
"sync" "sync"
"time" "time"
golog "github.com/ipfs/go-log/v2"
"github.com/libp2p/go-libp2p" "github.com/libp2p/go-libp2p"
"go.uber.org/zap" "go.uber.org/zap"
@ -129,6 +130,7 @@ func New(opts ...WakuNodeOption) (*WakuNode, error) {
if params.logger == nil { if params.logger == nil {
params.logger = utils.Logger() params.logger = utils.Logger()
golog.SetAllLoggers(params.logLevel)
} }
if params.privKey == nil { if params.privKey == nil {

View File

@ -58,7 +58,8 @@ type WakuNodeParameters struct {
wssPort int wssPort int
tlsConfig *tls.Config tlsConfig *tls.Config
logger *zap.Logger logger *zap.Logger
logLevel logging.LogLevel
noDefaultWakuTopic bool noDefaultWakuTopic bool
enableRelay bool enableRelay bool
@ -150,7 +151,8 @@ func WithLogger(l *zap.Logger) WakuNodeOption {
// WithLogLevel is a WakuNodeOption that sets the log level for go-waku // WithLogLevel is a WakuNodeOption that sets the log level for go-waku
func WithLogLevel(lvl zapcore.Level) WakuNodeOption { func WithLogLevel(lvl zapcore.Level) WakuNodeOption {
return func(params *WakuNodeParameters) error { return func(params *WakuNodeParameters) error {
logging.SetAllLoggers(logging.LogLevel(lvl)) params.logLevel = logging.LogLevel(lvl)
logging.SetAllLoggers(params.logLevel)
return nil return nil
} }
} }

View File

@ -1,10 +1,9 @@
package protocol package protocol
import ( import (
"crypto/sha256"
wpb "github.com/waku-org/go-waku/waku/v2/protocol/pb" wpb "github.com/waku-org/go-waku/waku/v2/protocol/pb"
"github.com/waku-org/go-waku/waku/v2/protocol/store/pb" "github.com/waku-org/go-waku/waku/v2/protocol/store/pb"
"github.com/waku-org/go-waku/waku/v2/utils"
) )
// Envelope contains information about the pubsub topic of a WakuMessage // Envelope contains information about the pubsub topic of a WakuMessage
@ -22,7 +21,7 @@ type Envelope struct {
// as well as generating a hash based on the bytes that compose the message // as well as generating a hash based on the bytes that compose the message
func NewEnvelope(msg *wpb.WakuMessage, receiverTime int64, pubSubTopic string) *Envelope { func NewEnvelope(msg *wpb.WakuMessage, receiverTime int64, pubSubTopic string) *Envelope {
messageHash, dataLen, _ := msg.Hash() messageHash, dataLen, _ := msg.Hash()
hash := sha256.Sum256(append([]byte(msg.ContentTopic), msg.Payload...)) hash := utils.SHA256(append([]byte(msg.ContentTopic), msg.Payload...))
return &Envelope{ return &Envelope{
msg: msg, msg: msg,
size: dataLen, size: dataLen,

View File

@ -0,0 +1,112 @@
package peer_exchange
import (
"bytes"
"context"
"math"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/rlp"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-msgio/pbio"
"github.com/waku-org/go-waku/waku/v2/metrics"
"github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange/pb"
"github.com/waku-org/go-waku/waku/v2/utils"
"go.uber.org/zap"
)
func (wakuPX *WakuPeerExchange) Request(ctx context.Context, numPeers int, opts ...PeerExchangeOption) error {
params := new(PeerExchangeParameters)
params.host = wakuPX.h
params.log = wakuPX.log
optList := DefaultOptions(wakuPX.h)
optList = append(optList, opts...)
for _, opt := range optList {
opt(params)
}
if params.selectedPeer == "" {
metrics.RecordPeerExchangeError(ctx, "dialError")
return ErrNoPeersAvailable
}
requestRPC := &pb.PeerExchangeRPC{
Query: &pb.PeerExchangeQuery{
NumPeers: uint64(numPeers),
},
}
// We connect first so dns4 addresses are resolved (NewStream does not do it)
err := wakuPX.h.Connect(ctx, wakuPX.h.Peerstore().PeerInfo(params.selectedPeer))
if err != nil {
return err
}
connOpt, err := wakuPX.h.NewStream(ctx, params.selectedPeer, PeerExchangeID_v20alpha1)
if err != nil {
return err
}
defer connOpt.Close()
writer := pbio.NewDelimitedWriter(connOpt)
err = writer.WriteMsg(requestRPC)
if err != nil {
return err
}
reader := pbio.NewDelimitedReader(connOpt, math.MaxInt32)
responseRPC := &pb.PeerExchangeRPC{}
err = reader.ReadMsg(responseRPC)
if err != nil {
return err
}
return wakuPX.handleResponse(ctx, responseRPC.Response)
}
func (wakuPX *WakuPeerExchange) handleResponse(ctx context.Context, response *pb.PeerExchangeResponse) error {
var peers []peer.AddrInfo
for _, p := range response.PeerInfos {
enrRecord := &enr.Record{}
buf := bytes.NewBuffer(p.ENR)
err := enrRecord.DecodeRLP(rlp.NewStream(buf, uint64(len(p.ENR))))
if err != nil {
wakuPX.log.Error("converting bytes to enr", zap.Error(err))
return err
}
enodeRecord, err := enode.New(enode.ValidSchemes, enrRecord)
if err != nil {
wakuPX.log.Error("creating enode record", zap.Error(err))
return err
}
peerInfo, err := utils.EnodeToPeerInfo(enodeRecord)
if err != nil {
return err
}
peers = append(peers, *peerInfo)
}
if len(peers) != 0 {
wakuPX.log.Info("connecting to newly discovered peers", zap.Int("count", len(peers)))
wakuPX.wg.Add(1)
go func() {
defer wakuPX.wg.Done()
for _, p := range peers {
select {
case <-ctx.Done():
return
case wakuPX.peerConnector.PeerChannel() <- p:
}
}
}()
}
return nil
}

View File

@ -11,10 +11,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/rlp"
"github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peer"
@ -96,51 +93,6 @@ func (wakuPX *WakuPeerExchange) Start(ctx context.Context) error {
return nil return nil
} }
func (wakuPX *WakuPeerExchange) handleResponse(ctx context.Context, response *pb.PeerExchangeResponse) error {
var peers []peer.AddrInfo
for _, p := range response.PeerInfos {
enrRecord := &enr.Record{}
buf := bytes.NewBuffer(p.ENR)
err := enrRecord.DecodeRLP(rlp.NewStream(buf, uint64(len(p.ENR))))
if err != nil {
wakuPX.log.Error("converting bytes to enr", zap.Error(err))
return err
}
enodeRecord, err := enode.New(enode.ValidSchemes, enrRecord)
if err != nil {
wakuPX.log.Error("creating enode record", zap.Error(err))
return err
}
peerInfo, err := utils.EnodeToPeerInfo(enodeRecord)
if err != nil {
return err
}
peers = append(peers, *peerInfo)
}
if len(peers) != 0 {
log.Info("connecting to newly discovered peers", zap.Int("count", len(peers)))
wakuPX.wg.Add(1)
go func() {
defer wakuPX.wg.Done()
for _, p := range peers {
select {
case <-ctx.Done():
return
case wakuPX.peerConnector.PeerChannel() <- p:
}
}
}()
}
return nil
}
func (wakuPX *WakuPeerExchange) onRequest(ctx context.Context) func(s network.Stream) { func (wakuPX *WakuPeerExchange) onRequest(ctx context.Context) func(s network.Stream) {
return func(s network.Stream) { return func(s network.Stream) {
defer s.Close() defer s.Close()
@ -156,19 +108,22 @@ func (wakuPX *WakuPeerExchange) onRequest(ctx context.Context) func(s network.St
if requestRPC.Query != nil { if requestRPC.Query != nil {
logger.Info("request received") logger.Info("request received")
err := wakuPX.respond(ctx, requestRPC.Query.NumPeers, s.Conn().RemotePeer())
records, err := wakuPX.getENRsFromCache(requestRPC.Query.NumPeers)
if err != nil { if err != nil {
logger.Error("responding", zap.Error(err)) logger.Error("obtaining enrs from cache", zap.Error(err))
metrics.RecordPeerExchangeError(ctx, "pxFailure") metrics.RecordPeerExchangeError(ctx, "pxFailure")
return return
} }
}
if requestRPC.Response != nil { responseRPC := &pb.PeerExchangeRPC{}
logger.Info("response received") responseRPC.Response = new(pb.PeerExchangeResponse)
err := wakuPX.handleResponse(ctx, requestRPC.Response) responseRPC.Response.PeerInfos = records
writer := pbio.NewDelimitedWriter(s)
err = writer.WriteMsg(responseRPC)
if err != nil { if err != nil {
logger.Error("handling response", zap.Error(err)) logger.Error("writing response", zap.Error(err))
metrics.RecordPeerExchangeError(ctx, "pxFailure") metrics.RecordPeerExchangeError(ctx, "pxFailure")
return return
} }
@ -176,31 +131,6 @@ func (wakuPX *WakuPeerExchange) onRequest(ctx context.Context) func(s network.St
} }
} }
func (wakuPX *WakuPeerExchange) Request(ctx context.Context, numPeers int, opts ...PeerExchangeOption) error {
params := new(PeerExchangeParameters)
params.host = wakuPX.h
params.log = wakuPX.log
optList := DefaultOptions(wakuPX.h)
optList = append(optList, opts...)
for _, opt := range optList {
opt(params)
}
if params.selectedPeer == "" {
metrics.RecordPeerExchangeError(ctx, "dialError")
return ErrNoPeersAvailable
}
requestRPC := &pb.PeerExchangeRPC{
Query: &pb.PeerExchangeQuery{
NumPeers: uint64(numPeers),
},
}
return wakuPX.sendPeerExchangeRPCToPeer(ctx, requestRPC, params.selectedPeer)
}
// Stop unmounts the peer exchange protocol // Stop unmounts the peer exchange protocol
func (wakuPX *WakuPeerExchange) Stop() { func (wakuPX *WakuPeerExchange) Stop() {
if wakuPX.cancel == nil { if wakuPX.cancel == nil {
@ -212,46 +142,6 @@ func (wakuPX *WakuPeerExchange) Stop() {
wakuPX.wg.Wait() wakuPX.wg.Wait()
} }
func (wakuPX *WakuPeerExchange) sendPeerExchangeRPCToPeer(ctx context.Context, rpc *pb.PeerExchangeRPC, peerID peer.ID) error {
logger := wakuPX.log.With(logging.HostID("peer", peerID))
// We connect first so dns4 addresses are resolved (NewStream does not do it)
err := wakuPX.h.Connect(ctx, wakuPX.h.Peerstore().PeerInfo(peerID))
if err != nil {
logger.Error("connecting peer", zap.Error(err))
return err
}
connOpt, err := wakuPX.h.NewStream(ctx, peerID, PeerExchangeID_v20alpha1)
if err != nil {
logger.Error("creating stream to peer", zap.Error(err))
return err
}
defer connOpt.Close()
writer := pbio.NewDelimitedWriter(connOpt)
err = writer.WriteMsg(rpc)
if err != nil {
logger.Error("writing response", zap.Error(err))
return err
}
return nil
}
func (wakuPX *WakuPeerExchange) respond(ctx context.Context, numPeers uint64, peerID peer.ID) error {
records, err := wakuPX.getENRsFromCache(numPeers)
if err != nil {
return err
}
responseRPC := &pb.PeerExchangeRPC{}
responseRPC.Response = new(pb.PeerExchangeResponse)
responseRPC.Response.PeerInfos = records
return wakuPX.sendPeerExchangeRPCToPeer(ctx, responseRPC, peerID)
}
func (wakuPX *WakuPeerExchange) getENRsFromCache(numPeers uint64) ([]*pb.PeerInfo, error) { func (wakuPX *WakuPeerExchange) getENRsFromCache(numPeers uint64) ([]*pb.PeerInfo, error) {
wakuPX.enrCacheMutex.Lock() wakuPX.enrCacheMutex.Lock()
defer wakuPX.enrCacheMutex.Unlock() defer wakuPX.enrCacheMutex.Unlock()

View File

@ -2,7 +2,6 @@ package relay
import ( import (
"context" "context"
"crypto/sha256"
"encoding/hex" "encoding/hex"
"errors" "errors"
"fmt" "fmt"
@ -23,6 +22,7 @@ import (
waku_proto "github.com/waku-org/go-waku/waku/v2/protocol" waku_proto "github.com/waku-org/go-waku/waku/v2/protocol"
"github.com/waku-org/go-waku/waku/v2/protocol/pb" "github.com/waku-org/go-waku/waku/v2/protocol/pb"
"github.com/waku-org/go-waku/waku/v2/timesource" "github.com/waku-org/go-waku/waku/v2/timesource"
"github.com/waku-org/go-waku/waku/v2/utils"
) )
const WakuRelayID_v200 = protocol.ID("/vac/waku/relay/2.0.0") const WakuRelayID_v200 = protocol.ID("/vac/waku/relay/2.0.0")
@ -52,8 +52,7 @@ type WakuRelay struct {
} }
func msgIdFn(pmsg *pubsub_pb.Message) string { func msgIdFn(pmsg *pubsub_pb.Message) string {
hash := sha256.Sum256(pmsg.Data) return string(utils.SHA256(pmsg.Data))
return string(hash[:])
} }
// NewWakuRelay returns a new instance of a WakuRelay struct // NewWakuRelay returns a new instance of a WakuRelay struct

View File

@ -0,0 +1,25 @@
package utils
import (
"crypto/sha256"
"hash"
"sync"
)
var sha256Pool = sync.Pool{New: func() interface{} {
return sha256.New()
}}
func SHA256(data []byte) []byte {
h, ok := sha256Pool.Get().(hash.Hash)
if !ok {
h = sha256.New()
}
defer sha256Pool.Put(h)
h.Reset()
var result [32]byte
h.Write(data)
h.Sum(result[:0])
return result[:]
}

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build (386 || amd64 || amd64p32) && gccgo
// +build 386 amd64 amd64p32 // +build 386 amd64 amd64p32
// +build gccgo // +build gccgo

View File

@ -4,6 +4,11 @@
package cpu package cpu
import (
"strings"
"syscall"
)
// HWCAP/HWCAP2 bits. These are exposed by Linux. // HWCAP/HWCAP2 bits. These are exposed by Linux.
const ( const (
hwcap_FP = 1 << 0 hwcap_FP = 1 << 0
@ -32,10 +37,45 @@ const (
hwcap_ASIMDFHM = 1 << 23 hwcap_ASIMDFHM = 1 << 23
) )
// linuxKernelCanEmulateCPUID reports whether we're running
// on Linux 4.11+. Ideally we'd like to ask the question about
// whether the current kernel contains
// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=77c97b4ee21290f5f083173d957843b615abbff2
// but the version number will have to do.
func linuxKernelCanEmulateCPUID() bool {
var un syscall.Utsname
syscall.Uname(&un)
var sb strings.Builder
for _, b := range un.Release[:] {
if b == 0 {
break
}
sb.WriteByte(byte(b))
}
major, minor, _, ok := parseRelease(sb.String())
return ok && (major > 4 || major == 4 && minor >= 11)
}
func doinit() { func doinit() {
if err := readHWCAP(); err != nil { if err := readHWCAP(); err != nil {
// failed to read /proc/self/auxv, try reading registers directly // We failed to read /proc/self/auxv. This can happen if the binary has
readARM64Registers() // been given extra capabilities(7) with /bin/setcap.
//
// When this happens, we have two options. If the Linux kernel is new
// enough (4.11+), we can read the arm64 registers directly which'll
// trap into the kernel and then return back to userspace.
//
// But on older kernels, such as Linux 4.4.180 as used on many Synology
// devices, calling readARM64Registers (specifically getisar0) will
// cause a SIGILL and we'll die. So for older kernels, parse /proc/cpuinfo
// instead.
//
// See golang/go#57336.
if linuxKernelCanEmulateCPUID() {
readARM64Registers()
} else {
readLinuxProcCPUInfo()
}
return return
} }

11
vendor/golang.org/x/sys/cpu/endian_big.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64
// +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64
package cpu
// IsBigEndian records whether the GOARCH's byte order is big endian.
const IsBigEndian = true

11
vendor/golang.org/x/sys/cpu/endian_little.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh
// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh
package cpu
// IsBigEndian records whether the GOARCH's byte order is big endian.
const IsBigEndian = false

43
vendor/golang.org/x/sys/cpu/parse.go generated vendored Normal file
View File

@ -0,0 +1,43 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cpu
import "strconv"
// parseRelease parses a dot-separated version number. It follows the semver
// syntax, but allows the minor and patch versions to be elided.
//
// This is a copy of the Go runtime's parseRelease from
// https://golang.org/cl/209597.
func parseRelease(rel string) (major, minor, patch int, ok bool) {
// Strip anything after a dash or plus.
for i := 0; i < len(rel); i++ {
if rel[i] == '-' || rel[i] == '+' {
rel = rel[:i]
break
}
}
next := func() (int, bool) {
for i := 0; i < len(rel); i++ {
if rel[i] == '.' {
ver, err := strconv.Atoi(rel[:i])
rel = rel[i+1:]
return ver, err == nil
}
}
ver, err := strconv.Atoi(rel)
rel = ""
return ver, err == nil
}
if major, ok = next(); !ok || rel == "" {
return
}
if minor, ok = next(); !ok || rel == "" {
return
}
patch, ok = next()
return
}

54
vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux && arm64
// +build linux,arm64
package cpu
import (
"errors"
"io"
"os"
"strings"
)
func readLinuxProcCPUInfo() error {
f, err := os.Open("/proc/cpuinfo")
if err != nil {
return err
}
defer f.Close()
var buf [1 << 10]byte // enough for first CPU
n, err := io.ReadFull(f, buf[:])
if err != nil && err != io.ErrUnexpectedEOF {
return err
}
in := string(buf[:n])
const features = "\nFeatures : "
i := strings.Index(in, features)
if i == -1 {
return errors.New("no CPU features found")
}
in = in[i+len(features):]
if i := strings.Index(in, "\n"); i != -1 {
in = in[:i]
}
m := map[string]*bool{}
initOptions() // need it early here; it's harmless to call twice
for _, o := range options {
m[o.Name] = o.Feature
}
// The EVTSTRM field has alias "evstrm" in Go, but Linux calls it "evtstrm".
m["evtstrm"] = &ARM64.HasEVTSTRM
for _, f := range strings.Fields(in) {
if p, ok := m[f]; ok {
*p = true
}
}
return nil
}

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build gccgo && !aix //go:build gccgo && !aix && !hurd
// +build gccgo,!aix // +build gccgo,!aix,!hurd
package unix package unix

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build gccgo //go:build gccgo && !aix && !hurd
// +build !aix // +build gccgo,!aix,!hurd
#include <errno.h> #include <errno.h>
#include <stdint.h> #include <stdint.h>

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris //go:build aix || darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd || solaris
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris // +build aix darwin dragonfly freebsd hurd linux netbsd openbsd solaris
package unix package unix

View File

@ -174,10 +174,10 @@ openbsd_arm64)
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;; ;;
openbsd_mips64) openbsd_mips64)
mkasm="go run mkasm.go"
mkerrors="$mkerrors -m64" mkerrors="$mkerrors -m64"
mksyscall="go run mksyscall.go -openbsd" mksyscall="go run mksyscall.go -openbsd -libc"
mksysctl="go run mksysctl_openbsd.go" mksysctl="go run mksysctl_openbsd.go"
mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
# Let the type of C char be signed for making the bare syscall # Let the type of C char be signed for making the bare syscall
# API consistent across platforms. # API consistent across platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"

View File

@ -230,6 +230,7 @@ func direntNamlen(buf []byte) (uint64, bool) {
func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) } func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) }
func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) } func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) }
func PtraceDenyAttach() (err error) { return ptrace(PT_DENY_ATTACH, 0, 0, 0) }
//sysnb pipe(p *[2]int32) (err error) //sysnb pipe(p *[2]int32) (err error)

View File

@ -255,6 +255,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
//sys Chmod(path string, mode uint32) (err error) //sys Chmod(path string, mode uint32) (err error)
//sys Chown(path string, uid int, gid int) (err error) //sys Chown(path string, uid int, gid int) (err error)
//sys Chroot(path string) (err error) //sys Chroot(path string) (err error)
//sys ClockGettime(clockid int32, time *Timespec) (err error)
//sys Close(fd int) (err error) //sys Close(fd int) (err error)
//sys Dup(fd int) (nfd int, err error) //sys Dup(fd int) (nfd int, err error)
//sys Dup2(from int, to int) (err error) //sys Dup2(from int, to int) (err error)

View File

@ -319,6 +319,7 @@ func PtraceSingleStep(pid int) (err error) {
//sys Chmod(path string, mode uint32) (err error) //sys Chmod(path string, mode uint32) (err error)
//sys Chown(path string, uid int, gid int) (err error) //sys Chown(path string, uid int, gid int) (err error)
//sys Chroot(path string) (err error) //sys Chroot(path string) (err error)
//sys ClockGettime(clockid int32, time *Timespec) (err error)
//sys Close(fd int) (err error) //sys Close(fd int) (err error)
//sys Dup(fd int) (nfd int, err error) //sys Dup(fd int) (nfd int, err error)
//sys Dup2(from int, to int) (err error) //sys Dup2(from int, to int) (err error)

View File

@ -60,8 +60,13 @@ func PtraceGetFsBase(pid int, fsbase *int64) (err error) {
return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0)
} }
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint32(countin)} ioDesc := PtraceIoDesc{
Op: int32(req),
Offs: offs,
Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe.
Len: uint32(countin),
}
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err return int(ioDesc.Len), err
} }

View File

@ -60,8 +60,13 @@ func PtraceGetFsBase(pid int, fsbase *int64) (err error) {
return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0)
} }
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)} ioDesc := PtraceIoDesc{
Op: int32(req),
Offs: offs,
Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe.
Len: uint64(countin),
}
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err return int(ioDesc.Len), err
} }

View File

@ -56,8 +56,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint32(countin)} ioDesc := PtraceIoDesc{
Op: int32(req),
Offs: offs,
Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe.
Len: uint32(countin),
}
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err return int(ioDesc.Len), err
} }

View File

@ -56,8 +56,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)} ioDesc := PtraceIoDesc{
Op: int32(req),
Offs: offs,
Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe.
Len: uint64(countin),
}
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err return int(ioDesc.Len), err
} }

View File

@ -56,8 +56,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)} ioDesc := PtraceIoDesc{
Op: int32(req),
Offs: offs,
Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe.
Len: uint64(countin),
}
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err return int(ioDesc.Len), err
} }

22
vendor/golang.org/x/sys/unix/syscall_hurd.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build hurd
// +build hurd
package unix
/*
#include <stdint.h>
int ioctl(int, unsigned long int, uintptr_t);
*/
import "C"
func ioctl(fd int, req uint, arg uintptr) (err error) {
r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg))
if r0 == -1 && er != nil {
err = er
}
return
}

29
vendor/golang.org/x/sys/unix/syscall_hurd_386.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build 386 && hurd
// +build 386,hurd
package unix
const (
TIOCGETA = 0x62251713
)
type Winsize struct {
Row uint16
Col uint16
Xpixel uint16
Ypixel uint16
}
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Cc [20]uint8
Ispeed int32
Ospeed int32
}

View File

@ -1800,6 +1800,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
//sysnb Capset(hdr *CapUserHeader, data *CapUserData) (err error) //sysnb Capset(hdr *CapUserHeader, data *CapUserData) (err error)
//sys Chdir(path string) (err error) //sys Chdir(path string) (err error)
//sys Chroot(path string) (err error) //sys Chroot(path string) (err error)
//sys ClockAdjtime(clockid int32, buf *Timex) (state int, err error)
//sys ClockGetres(clockid int32, res *Timespec) (err error) //sys ClockGetres(clockid int32, res *Timespec) (err error)
//sys ClockGettime(clockid int32, time *Timespec) (err error) //sys ClockGettime(clockid int32, time *Timespec) (err error)
//sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) //sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error)
@ -1973,36 +1974,46 @@ func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) {
//sys preadv2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) = SYS_PREADV2 //sys preadv2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) = SYS_PREADV2
//sys pwritev2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) = SYS_PWRITEV2 //sys pwritev2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) = SYS_PWRITEV2
func bytes2iovec(bs [][]byte) []Iovec { // minIovec is the size of the small initial allocation used by
iovecs := make([]Iovec, len(bs)) // Readv, Writev, etc.
for i, b := range bs { //
iovecs[i].SetLen(len(b)) // This small allocation gets stack allocated, which lets the
// common use case of len(iovs) <= minIovs avoid more expensive
// heap allocations.
const minIovec = 8
// appendBytes converts bs to Iovecs and appends them to vecs.
func appendBytes(vecs []Iovec, bs [][]byte) []Iovec {
for _, b := range bs {
var v Iovec
v.SetLen(len(b))
if len(b) > 0 { if len(b) > 0 {
iovecs[i].Base = &b[0] v.Base = &b[0]
} else { } else {
iovecs[i].Base = (*byte)(unsafe.Pointer(&_zero)) v.Base = (*byte)(unsafe.Pointer(&_zero))
} }
vecs = append(vecs, v)
} }
return iovecs return vecs
} }
// offs2lohi splits offs into its lower and upper unsigned long. On 64-bit // offs2lohi splits offs into its low and high order bits.
// systems, hi will always be 0. On 32-bit systems, offs will be split in half.
// preadv/pwritev chose this calling convention so they don't need to add a
// padding-register for alignment on ARM.
func offs2lohi(offs int64) (lo, hi uintptr) { func offs2lohi(offs int64) (lo, hi uintptr) {
return uintptr(offs), uintptr(uint64(offs) >> SizeofLong) const longBits = SizeofLong * 8
return uintptr(offs), uintptr(uint64(offs) >> (longBits - 1) >> 1) // two shifts to avoid false positive in vet
} }
func Readv(fd int, iovs [][]byte) (n int, err error) { func Readv(fd int, iovs [][]byte) (n int, err error) {
iovecs := bytes2iovec(iovs) iovecs := make([]Iovec, 0, minIovec)
iovecs = appendBytes(iovecs, iovs)
n, err = readv(fd, iovecs) n, err = readv(fd, iovecs)
readvRacedetect(iovecs, n, err) readvRacedetect(iovecs, n, err)
return n, err return n, err
} }
func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) {
iovecs := bytes2iovec(iovs) iovecs := make([]Iovec, 0, minIovec)
iovecs = appendBytes(iovecs, iovs)
lo, hi := offs2lohi(offset) lo, hi := offs2lohi(offset)
n, err = preadv(fd, iovecs, lo, hi) n, err = preadv(fd, iovecs, lo, hi)
readvRacedetect(iovecs, n, err) readvRacedetect(iovecs, n, err)
@ -2010,7 +2021,8 @@ func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) {
} }
func Preadv2(fd int, iovs [][]byte, offset int64, flags int) (n int, err error) { func Preadv2(fd int, iovs [][]byte, offset int64, flags int) (n int, err error) {
iovecs := bytes2iovec(iovs) iovecs := make([]Iovec, 0, minIovec)
iovecs = appendBytes(iovecs, iovs)
lo, hi := offs2lohi(offset) lo, hi := offs2lohi(offset)
n, err = preadv2(fd, iovecs, lo, hi, flags) n, err = preadv2(fd, iovecs, lo, hi, flags)
readvRacedetect(iovecs, n, err) readvRacedetect(iovecs, n, err)
@ -2037,7 +2049,8 @@ func readvRacedetect(iovecs []Iovec, n int, err error) {
} }
func Writev(fd int, iovs [][]byte) (n int, err error) { func Writev(fd int, iovs [][]byte) (n int, err error) {
iovecs := bytes2iovec(iovs) iovecs := make([]Iovec, 0, minIovec)
iovecs = appendBytes(iovecs, iovs)
if raceenabled { if raceenabled {
raceReleaseMerge(unsafe.Pointer(&ioSync)) raceReleaseMerge(unsafe.Pointer(&ioSync))
} }
@ -2047,7 +2060,8 @@ func Writev(fd int, iovs [][]byte) (n int, err error) {
} }
func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) {
iovecs := bytes2iovec(iovs) iovecs := make([]Iovec, 0, minIovec)
iovecs = appendBytes(iovecs, iovs)
if raceenabled { if raceenabled {
raceReleaseMerge(unsafe.Pointer(&ioSync)) raceReleaseMerge(unsafe.Pointer(&ioSync))
} }
@ -2058,7 +2072,8 @@ func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) {
} }
func Pwritev2(fd int, iovs [][]byte, offset int64, flags int) (n int, err error) { func Pwritev2(fd int, iovs [][]byte, offset int64, flags int) (n int, err error) {
iovecs := bytes2iovec(iovs) iovecs := make([]Iovec, 0, minIovec)
iovecs = appendBytes(iovecs, iovs)
if raceenabled { if raceenabled {
raceReleaseMerge(unsafe.Pointer(&ioSync)) raceReleaseMerge(unsafe.Pointer(&ioSync))
} }

View File

@ -110,6 +110,20 @@ func direntNamlen(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen))
} }
func SysctlUvmexp(name string) (*Uvmexp, error) {
mib, err := sysctlmib(name)
if err != nil {
return nil, err
}
n := uintptr(SizeofUvmexp)
var u Uvmexp
if err := sysctl(mib, (*byte)(unsafe.Pointer(&u)), &n, nil, 0); err != nil {
return nil, err
}
return &u, nil
}
func Pipe(p []int) (err error) { func Pipe(p []int) (err error) {
return Pipe2(p, 0) return Pipe2(p, 0)
} }
@ -245,6 +259,7 @@ func Statvfs(path string, buf *Statvfs_t) (err error) {
//sys Chmod(path string, mode uint32) (err error) //sys Chmod(path string, mode uint32) (err error)
//sys Chown(path string, uid int, gid int) (err error) //sys Chown(path string, uid int, gid int) (err error)
//sys Chroot(path string) (err error) //sys Chroot(path string) (err error)
//sys ClockGettime(clockid int32, time *Timespec) (err error)
//sys Close(fd int) (err error) //sys Close(fd int) (err error)
//sys Dup(fd int) (nfd int, err error) //sys Dup(fd int) (nfd int, err error)
//sys Dup2(from int, to int) (err error) //sys Dup2(from int, to int) (err error)

View File

@ -220,6 +220,7 @@ func Uname(uname *Utsname) error {
//sys Chmod(path string, mode uint32) (err error) //sys Chmod(path string, mode uint32) (err error)
//sys Chown(path string, uid int, gid int) (err error) //sys Chown(path string, uid int, gid int) (err error)
//sys Chroot(path string) (err error) //sys Chroot(path string) (err error)
//sys ClockGettime(clockid int32, time *Timespec) (err error)
//sys Close(fd int) (err error) //sys Close(fd int) (err error)
//sys Dup(fd int) (nfd int, err error) //sys Dup(fd int) (nfd int, err error)
//sys Dup2(from int, to int) (err error) //sys Dup2(from int, to int) (err error)

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build openbsd && !mips64 //go:build openbsd
// +build openbsd,!mips64 // +build openbsd
package unix package unix

View File

@ -590,6 +590,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
//sys Chmod(path string, mode uint32) (err error) //sys Chmod(path string, mode uint32) (err error)
//sys Chown(path string, uid int, gid int) (err error) //sys Chown(path string, uid int, gid int) (err error)
//sys Chroot(path string) (err error) //sys Chroot(path string) (err error)
//sys ClockGettime(clockid int32, time *Timespec) (err error)
//sys Close(fd int) (err error) //sys Close(fd int) (err error)
//sys Creat(path string, mode uint32) (fd int, err error) //sys Creat(path string, mode uint32) (fd int, err error)
//sys Dup(fd int) (nfd int, err error) //sys Dup(fd int) (nfd int, err error)

View File

@ -331,6 +331,19 @@ func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) {
return return
} }
// Recvmsg receives a message from a socket using the recvmsg system call. The
// received non-control data will be written to p, and any "out of band"
// control data will be written to oob. The flags are passed to recvmsg.
//
// The results are:
// - n is the number of non-control data bytes read into p
// - oobn is the number of control data bytes read into oob; this may be interpreted using [ParseSocketControlMessage]
// - recvflags is flags returned by recvmsg
// - from is the address of the sender
//
// If the underlying socket type is not SOCK_DGRAM, a received message
// containing oob data and a single '\0' of non-control data is treated as if
// the message contained only control data, i.e. n will be zero on return.
func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) {
var iov [1]Iovec var iov [1]Iovec
if len(p) > 0 { if len(p) > 0 {
@ -346,13 +359,9 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from
return return
} }
// RecvmsgBuffers receives a message from a socket using the recvmsg // RecvmsgBuffers receives a message from a socket using the recvmsg system
// system call. The flags are passed to recvmsg. Any non-control data // call. This function is equivalent to Recvmsg, but non-control data read is
// read is scattered into the buffers slices. The results are: // scattered into the buffers slices.
// - n is the number of non-control data read into bufs
// - oobn is the number of control data read into oob; this may be interpreted using [ParseSocketControlMessage]
// - recvflags is flags returned by recvmsg
// - from is the address of the sender
func RecvmsgBuffers(fd int, buffers [][]byte, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { func RecvmsgBuffers(fd int, buffers [][]byte, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) {
iov := make([]Iovec, len(buffers)) iov := make([]Iovec, len(buffers))
for i := range buffers { for i := range buffers {
@ -371,11 +380,38 @@ func RecvmsgBuffers(fd int, buffers [][]byte, oob []byte, flags int) (n, oobn in
return return
} }
// Sendmsg sends a message on a socket to an address using the sendmsg system
// call. This function is equivalent to SendmsgN, but does not return the
// number of bytes actually sent.
func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) {
_, err = SendmsgN(fd, p, oob, to, flags) _, err = SendmsgN(fd, p, oob, to, flags)
return return
} }
// SendmsgN sends a message on a socket to an address using the sendmsg system
// call. p contains the non-control data to send, and oob contains the "out of
// band" control data. The flags are passed to sendmsg. The number of
// non-control bytes actually written to the socket is returned.
//
// Some socket types do not support sending control data without accompanying
// non-control data. If p is empty, and oob contains control data, and the
// underlying socket type is not SOCK_DGRAM, p will be treated as containing a
// single '\0' and the return value will indicate zero bytes sent.
//
// The Go function Recvmsg, if called with an empty p and a non-empty oob,
// will read and ignore this additional '\0'. If the message is received by
// code that does not use Recvmsg, or that does not use Go at all, that code
// will need to be written to expect and ignore the additional '\0'.
//
// If you need to send non-empty oob with p actually empty, and if the
// underlying socket type supports it, you can do so via a raw system call as
// follows:
//
// msg := &unix.Msghdr{
// Control: &oob[0],
// }
// msg.SetControllen(len(oob))
// n, _, errno := unix.Syscall(unix.SYS_SENDMSG, uintptr(fd), uintptr(unsafe.Pointer(msg)), flags)
func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) {
var iov [1]Iovec var iov [1]Iovec
if len(p) > 0 { if len(p) > 0 {
@ -394,9 +430,8 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error)
} }
// SendmsgBuffers sends a message on a socket to an address using the sendmsg // SendmsgBuffers sends a message on a socket to an address using the sendmsg
// system call. The flags are passed to sendmsg. Any non-control data written // system call. This function is equivalent to SendmsgN, but the non-control
// is gathered from buffers. The function returns the number of bytes written // data is gathered from buffers.
// to the socket.
func SendmsgBuffers(fd int, buffers [][]byte, oob []byte, to Sockaddr, flags int) (n int, err error) { func SendmsgBuffers(fd int, buffers [][]byte, oob []byte, to Sockaddr, flags int) (n int, err error) {
iov := make([]Iovec, len(buffers)) iov := make([]Iovec, len(buffers))
for i := range buffers { for i := range buffers {
@ -543,7 +578,7 @@ func Lutimes(path string, tv []Timeval) error {
return UtimesNanoAt(AT_FDCWD, path, ts, AT_SYMLINK_NOFOLLOW) return UtimesNanoAt(AT_FDCWD, path, ts, AT_SYMLINK_NOFOLLOW)
} }
// emptyIovec reports whether there are no bytes in the slice of Iovec. // emptyIovecs reports whether there are no bytes in the slice of Iovec.
func emptyIovecs(iov []Iovec) bool { func emptyIovecs(iov []Iovec) bool {
for i := range iov { for i := range iov {
if iov[i].Len > 0 { if iov[i].Len > 0 {

View File

@ -9,7 +9,7 @@ package unix
import "time" import "time"
// TimespecToNSec returns the time stored in ts as nanoseconds. // TimespecToNsec returns the time stored in ts as nanoseconds.
func TimespecToNsec(ts Timespec) int64 { return ts.Nano() } func TimespecToNsec(ts Timespec) int64 { return ts.Nano() }
// NsecToTimespec converts a number of nanoseconds into a Timespec. // NsecToTimespec converts a number of nanoseconds into a Timespec.

View File

@ -36,9 +36,14 @@ func xattrnamespace(fullattr string) (ns int, attr string, err error) {
func initxattrdest(dest []byte, idx int) (d unsafe.Pointer) { func initxattrdest(dest []byte, idx int) (d unsafe.Pointer) {
if len(dest) > idx { if len(dest) > idx {
return unsafe.Pointer(&dest[idx]) return unsafe.Pointer(&dest[idx])
} else {
return unsafe.Pointer(_zero)
} }
if dest != nil {
// extattr_get_file and extattr_list_file treat NULL differently from
// a non-NULL pointer of length zero. Preserve the property of nilness,
// even if we can't use dest directly.
return unsafe.Pointer(&_zero)
}
return nil
} }
// FreeBSD and NetBSD implement their own syscalls to handle extended attributes // FreeBSD and NetBSD implement their own syscalls to handle extended attributes

View File

@ -457,7 +457,6 @@ const (
B600 = 0x8 B600 = 0x8
B75 = 0x2 B75 = 0x2
B9600 = 0xd B9600 = 0xd
BALLOON_KVM_MAGIC = 0x13661366
BDEVFS_MAGIC = 0x62646576 BDEVFS_MAGIC = 0x62646576
BINDERFS_SUPER_MAGIC = 0x6c6f6f70 BINDERFS_SUPER_MAGIC = 0x6c6f6f70
BINFMTFS_MAGIC = 0x42494e4d BINFMTFS_MAGIC = 0x42494e4d
@ -563,6 +562,7 @@ const (
BUS_USB = 0x3 BUS_USB = 0x3
BUS_VIRTUAL = 0x6 BUS_VIRTUAL = 0x6
CAN_BCM = 0x2 CAN_BCM = 0x2
CAN_BUS_OFF_THRESHOLD = 0x100
CAN_CTRLMODE_3_SAMPLES = 0x4 CAN_CTRLMODE_3_SAMPLES = 0x4
CAN_CTRLMODE_BERR_REPORTING = 0x10 CAN_CTRLMODE_BERR_REPORTING = 0x10
CAN_CTRLMODE_CC_LEN8_DLC = 0x100 CAN_CTRLMODE_CC_LEN8_DLC = 0x100
@ -577,9 +577,12 @@ const (
CAN_EFF_FLAG = 0x80000000 CAN_EFF_FLAG = 0x80000000
CAN_EFF_ID_BITS = 0x1d CAN_EFF_ID_BITS = 0x1d
CAN_EFF_MASK = 0x1fffffff CAN_EFF_MASK = 0x1fffffff
CAN_ERROR_PASSIVE_THRESHOLD = 0x80
CAN_ERROR_WARNING_THRESHOLD = 0x60
CAN_ERR_ACK = 0x20 CAN_ERR_ACK = 0x20
CAN_ERR_BUSERROR = 0x80 CAN_ERR_BUSERROR = 0x80
CAN_ERR_BUSOFF = 0x40 CAN_ERR_BUSOFF = 0x40
CAN_ERR_CNT = 0x200
CAN_ERR_CRTL = 0x4 CAN_ERR_CRTL = 0x4
CAN_ERR_CRTL_ACTIVE = 0x40 CAN_ERR_CRTL_ACTIVE = 0x40
CAN_ERR_CRTL_RX_OVERFLOW = 0x1 CAN_ERR_CRTL_RX_OVERFLOW = 0x1
@ -820,9 +823,9 @@ const (
DM_UUID_FLAG = 0x4000 DM_UUID_FLAG = 0x4000
DM_UUID_LEN = 0x81 DM_UUID_LEN = 0x81
DM_VERSION = 0xc138fd00 DM_VERSION = 0xc138fd00
DM_VERSION_EXTRA = "-ioctl (2022-02-22)" DM_VERSION_EXTRA = "-ioctl (2022-07-28)"
DM_VERSION_MAJOR = 0x4 DM_VERSION_MAJOR = 0x4
DM_VERSION_MINOR = 0x2e DM_VERSION_MINOR = 0x2f
DM_VERSION_PATCHLEVEL = 0x0 DM_VERSION_PATCHLEVEL = 0x0
DT_BLK = 0x6 DT_BLK = 0x6
DT_CHR = 0x2 DT_CHR = 0x2
@ -1049,6 +1052,7 @@ const (
ETH_P_CAIF = 0xf7 ETH_P_CAIF = 0xf7
ETH_P_CAN = 0xc ETH_P_CAN = 0xc
ETH_P_CANFD = 0xd ETH_P_CANFD = 0xd
ETH_P_CANXL = 0xe
ETH_P_CFM = 0x8902 ETH_P_CFM = 0x8902
ETH_P_CONTROL = 0x16 ETH_P_CONTROL = 0x16
ETH_P_CUST = 0x6006 ETH_P_CUST = 0x6006
@ -1060,6 +1064,7 @@ const (
ETH_P_DNA_RT = 0x6003 ETH_P_DNA_RT = 0x6003
ETH_P_DSA = 0x1b ETH_P_DSA = 0x1b
ETH_P_DSA_8021Q = 0xdadb ETH_P_DSA_8021Q = 0xdadb
ETH_P_DSA_A5PSW = 0xe001
ETH_P_ECONET = 0x18 ETH_P_ECONET = 0x18
ETH_P_EDSA = 0xdada ETH_P_EDSA = 0xdada
ETH_P_ERSPAN = 0x88be ETH_P_ERSPAN = 0x88be
@ -1194,8 +1199,10 @@ const (
FAN_MARK_EVICTABLE = 0x200 FAN_MARK_EVICTABLE = 0x200
FAN_MARK_FILESYSTEM = 0x100 FAN_MARK_FILESYSTEM = 0x100
FAN_MARK_FLUSH = 0x80 FAN_MARK_FLUSH = 0x80
FAN_MARK_IGNORE = 0x400
FAN_MARK_IGNORED_MASK = 0x20 FAN_MARK_IGNORED_MASK = 0x20
FAN_MARK_IGNORED_SURV_MODIFY = 0x40 FAN_MARK_IGNORED_SURV_MODIFY = 0x40
FAN_MARK_IGNORE_SURV = 0x440
FAN_MARK_INODE = 0x0 FAN_MARK_INODE = 0x0
FAN_MARK_MOUNT = 0x10 FAN_MARK_MOUNT = 0x10
FAN_MARK_ONLYDIR = 0x8 FAN_MARK_ONLYDIR = 0x8
@ -1253,6 +1260,7 @@ const (
FSCRYPT_MODE_AES_128_CBC = 0x5 FSCRYPT_MODE_AES_128_CBC = 0x5
FSCRYPT_MODE_AES_128_CTS = 0x6 FSCRYPT_MODE_AES_128_CTS = 0x6
FSCRYPT_MODE_AES_256_CTS = 0x4 FSCRYPT_MODE_AES_256_CTS = 0x4
FSCRYPT_MODE_AES_256_HCTR2 = 0xa
FSCRYPT_MODE_AES_256_XTS = 0x1 FSCRYPT_MODE_AES_256_XTS = 0x1
FSCRYPT_POLICY_FLAGS_PAD_16 = 0x2 FSCRYPT_POLICY_FLAGS_PAD_16 = 0x2
FSCRYPT_POLICY_FLAGS_PAD_32 = 0x3 FSCRYPT_POLICY_FLAGS_PAD_32 = 0x3
@ -1430,6 +1438,7 @@ const (
IFF_NOARP = 0x80 IFF_NOARP = 0x80
IFF_NOFILTER = 0x1000 IFF_NOFILTER = 0x1000
IFF_NOTRAILERS = 0x20 IFF_NOTRAILERS = 0x20
IFF_NO_CARRIER = 0x40
IFF_NO_PI = 0x1000 IFF_NO_PI = 0x1000
IFF_ONE_QUEUE = 0x2000 IFF_ONE_QUEUE = 0x2000
IFF_PERSIST = 0x800 IFF_PERSIST = 0x800
@ -1805,6 +1814,7 @@ const (
MADV_DONTDUMP = 0x10 MADV_DONTDUMP = 0x10
MADV_DONTFORK = 0xa MADV_DONTFORK = 0xa
MADV_DONTNEED = 0x4 MADV_DONTNEED = 0x4
MADV_DONTNEED_LOCKED = 0x18
MADV_FREE = 0x8 MADV_FREE = 0x8
MADV_HUGEPAGE = 0xe MADV_HUGEPAGE = 0xe
MADV_HWPOISON = 0x64 MADV_HWPOISON = 0x64
@ -1846,7 +1856,7 @@ const (
MFD_ALLOW_SEALING = 0x2 MFD_ALLOW_SEALING = 0x2
MFD_CLOEXEC = 0x1 MFD_CLOEXEC = 0x1
MFD_HUGETLB = 0x4 MFD_HUGETLB = 0x4
MFD_HUGE_16GB = -0x78000000 MFD_HUGE_16GB = 0x88000000
MFD_HUGE_16MB = 0x60000000 MFD_HUGE_16MB = 0x60000000
MFD_HUGE_1GB = 0x78000000 MFD_HUGE_1GB = 0x78000000
MFD_HUGE_1MB = 0x50000000 MFD_HUGE_1MB = 0x50000000
@ -2212,6 +2222,11 @@ const (
PERF_AUX_FLAG_PARTIAL = 0x4 PERF_AUX_FLAG_PARTIAL = 0x4
PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK = 0xff00 PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK = 0xff00
PERF_AUX_FLAG_TRUNCATED = 0x1 PERF_AUX_FLAG_TRUNCATED = 0x1
PERF_BR_ARM64_DEBUG_DATA = 0x7
PERF_BR_ARM64_DEBUG_EXIT = 0x5
PERF_BR_ARM64_DEBUG_HALT = 0x4
PERF_BR_ARM64_DEBUG_INST = 0x6
PERF_BR_ARM64_FIQ = 0x3
PERF_FLAG_FD_CLOEXEC = 0x8 PERF_FLAG_FD_CLOEXEC = 0x8
PERF_FLAG_FD_NO_GROUP = 0x1 PERF_FLAG_FD_NO_GROUP = 0x1
PERF_FLAG_FD_OUTPUT = 0x2 PERF_FLAG_FD_OUTPUT = 0x2
@ -2232,6 +2247,8 @@ const (
PERF_MEM_LOCK_NA = 0x1 PERF_MEM_LOCK_NA = 0x1
PERF_MEM_LOCK_SHIFT = 0x18 PERF_MEM_LOCK_SHIFT = 0x18
PERF_MEM_LVLNUM_ANY_CACHE = 0xb PERF_MEM_LVLNUM_ANY_CACHE = 0xb
PERF_MEM_LVLNUM_CXL = 0x9
PERF_MEM_LVLNUM_IO = 0xa
PERF_MEM_LVLNUM_L1 = 0x1 PERF_MEM_LVLNUM_L1 = 0x1
PERF_MEM_LVLNUM_L2 = 0x2 PERF_MEM_LVLNUM_L2 = 0x2
PERF_MEM_LVLNUM_L3 = 0x3 PERF_MEM_LVLNUM_L3 = 0x3
@ -2265,6 +2282,7 @@ const (
PERF_MEM_REMOTE_REMOTE = 0x1 PERF_MEM_REMOTE_REMOTE = 0x1
PERF_MEM_REMOTE_SHIFT = 0x25 PERF_MEM_REMOTE_SHIFT = 0x25
PERF_MEM_SNOOPX_FWD = 0x1 PERF_MEM_SNOOPX_FWD = 0x1
PERF_MEM_SNOOPX_PEER = 0x2
PERF_MEM_SNOOPX_SHIFT = 0x26 PERF_MEM_SNOOPX_SHIFT = 0x26
PERF_MEM_SNOOP_HIT = 0x4 PERF_MEM_SNOOP_HIT = 0x4
PERF_MEM_SNOOP_HITM = 0x10 PERF_MEM_SNOOP_HITM = 0x10
@ -2301,7 +2319,6 @@ const (
PERF_SAMPLE_BRANCH_PLM_ALL = 0x7 PERF_SAMPLE_BRANCH_PLM_ALL = 0x7
PERF_SAMPLE_WEIGHT_TYPE = 0x1004000 PERF_SAMPLE_WEIGHT_TYPE = 0x1004000
PIPEFS_MAGIC = 0x50495045 PIPEFS_MAGIC = 0x50495045
PPC_CMM_MAGIC = 0xc7571590
PPPIOCGNPMODE = 0xc008744c PPPIOCGNPMODE = 0xc008744c
PPPIOCNEWUNIT = 0xc004743e PPPIOCNEWUNIT = 0xc004743e
PRIO_PGRP = 0x1 PRIO_PGRP = 0x1
@ -2999,6 +3016,7 @@ const (
STATX_BLOCKS = 0x400 STATX_BLOCKS = 0x400
STATX_BTIME = 0x800 STATX_BTIME = 0x800
STATX_CTIME = 0x80 STATX_CTIME = 0x80
STATX_DIOALIGN = 0x2000
STATX_GID = 0x10 STATX_GID = 0x10
STATX_INO = 0x100 STATX_INO = 0x100
STATX_MNT_ID = 0x1000 STATX_MNT_ID = 0x1000
@ -3392,9 +3410,7 @@ const (
XDP_ZEROCOPY = 0x4 XDP_ZEROCOPY = 0x4
XENFS_SUPER_MAGIC = 0xabba1974 XENFS_SUPER_MAGIC = 0xabba1974
XFS_SUPER_MAGIC = 0x58465342 XFS_SUPER_MAGIC = 0x58465342
Z3FOLD_MAGIC = 0x33
ZONEFS_MAGIC = 0x5a4f4653 ZONEFS_MAGIC = 0x5a4f4653
ZSMALLOC_MAGIC = 0x58295829
_HIDIOCGRAWNAME_LEN = 0x80 _HIDIOCGRAWNAME_LEN = 0x80
_HIDIOCGRAWPHYS_LEN = 0x40 _HIDIOCGRAWPHYS_LEN = 0x40
_HIDIOCGRAWUNIQ_LEN = 0x40 _HIDIOCGRAWUNIQ_LEN = 0x40

View File

@ -133,6 +133,7 @@ const (
MEMGETREGIONCOUNT = 0x80044d07 MEMGETREGIONCOUNT = 0x80044d07
MEMISLOCKED = 0x80084d17 MEMISLOCKED = 0x80084d17
MEMLOCK = 0x40084d05 MEMLOCK = 0x40084d05
MEMREAD = 0xc03c4d1a
MEMREADOOB = 0xc00c4d04 MEMREADOOB = 0xc00c4d04
MEMSETBADBLOCK = 0x40084d0c MEMSETBADBLOCK = 0x40084d0c
MEMUNLOCK = 0x40084d06 MEMUNLOCK = 0x40084d06

View File

@ -133,6 +133,7 @@ const (
MEMGETREGIONCOUNT = 0x80044d07 MEMGETREGIONCOUNT = 0x80044d07
MEMISLOCKED = 0x80084d17 MEMISLOCKED = 0x80084d17
MEMLOCK = 0x40084d05 MEMLOCK = 0x40084d05
MEMREAD = 0xc0404d1a
MEMREADOOB = 0xc0104d04 MEMREADOOB = 0xc0104d04
MEMSETBADBLOCK = 0x40084d0c MEMSETBADBLOCK = 0x40084d0c
MEMUNLOCK = 0x40084d06 MEMUNLOCK = 0x40084d06

View File

@ -131,6 +131,7 @@ const (
MEMGETREGIONCOUNT = 0x80044d07 MEMGETREGIONCOUNT = 0x80044d07
MEMISLOCKED = 0x80084d17 MEMISLOCKED = 0x80084d17
MEMLOCK = 0x40084d05 MEMLOCK = 0x40084d05
MEMREAD = 0xc0404d1a
MEMREADOOB = 0xc00c4d04 MEMREADOOB = 0xc00c4d04
MEMSETBADBLOCK = 0x40084d0c MEMSETBADBLOCK = 0x40084d0c
MEMUNLOCK = 0x40084d06 MEMUNLOCK = 0x40084d06

View File

@ -134,6 +134,7 @@ const (
MEMGETREGIONCOUNT = 0x80044d07 MEMGETREGIONCOUNT = 0x80044d07
MEMISLOCKED = 0x80084d17 MEMISLOCKED = 0x80084d17
MEMLOCK = 0x40084d05 MEMLOCK = 0x40084d05
MEMREAD = 0xc0404d1a
MEMREADOOB = 0xc0104d04 MEMREADOOB = 0xc0104d04
MEMSETBADBLOCK = 0x40084d0c MEMSETBADBLOCK = 0x40084d0c
MEMUNLOCK = 0x40084d06 MEMUNLOCK = 0x40084d06

View File

@ -132,6 +132,7 @@ const (
MEMGETREGIONCOUNT = 0x80044d07 MEMGETREGIONCOUNT = 0x80044d07
MEMISLOCKED = 0x80084d17 MEMISLOCKED = 0x80084d17
MEMLOCK = 0x40084d05 MEMLOCK = 0x40084d05
MEMREAD = 0xc0404d1a
MEMREADOOB = 0xc0104d04 MEMREADOOB = 0xc0104d04
MEMSETBADBLOCK = 0x40084d0c MEMSETBADBLOCK = 0x40084d0c
MEMUNLOCK = 0x40084d06 MEMUNLOCK = 0x40084d06

View File

@ -131,6 +131,7 @@ const (
MEMGETREGIONCOUNT = 0x40044d07 MEMGETREGIONCOUNT = 0x40044d07
MEMISLOCKED = 0x40084d17 MEMISLOCKED = 0x40084d17
MEMLOCK = 0x80084d05 MEMLOCK = 0x80084d05
MEMREAD = 0xc0404d1a
MEMREADOOB = 0xc00c4d04 MEMREADOOB = 0xc00c4d04
MEMSETBADBLOCK = 0x80084d0c MEMSETBADBLOCK = 0x80084d0c
MEMUNLOCK = 0x80084d06 MEMUNLOCK = 0x80084d06

View File

@ -131,6 +131,7 @@ const (
MEMGETREGIONCOUNT = 0x40044d07 MEMGETREGIONCOUNT = 0x40044d07
MEMISLOCKED = 0x40084d17 MEMISLOCKED = 0x40084d17
MEMLOCK = 0x80084d05 MEMLOCK = 0x80084d05
MEMREAD = 0xc0404d1a
MEMREADOOB = 0xc0104d04 MEMREADOOB = 0xc0104d04
MEMSETBADBLOCK = 0x80084d0c MEMSETBADBLOCK = 0x80084d0c
MEMUNLOCK = 0x80084d06 MEMUNLOCK = 0x80084d06

View File

@ -131,6 +131,7 @@ const (
MEMGETREGIONCOUNT = 0x40044d07 MEMGETREGIONCOUNT = 0x40044d07
MEMISLOCKED = 0x40084d17 MEMISLOCKED = 0x40084d17
MEMLOCK = 0x80084d05 MEMLOCK = 0x80084d05
MEMREAD = 0xc0404d1a
MEMREADOOB = 0xc0104d04 MEMREADOOB = 0xc0104d04
MEMSETBADBLOCK = 0x80084d0c MEMSETBADBLOCK = 0x80084d0c
MEMUNLOCK = 0x80084d06 MEMUNLOCK = 0x80084d06

Some files were not shown because too many files have changed in this diff Show More