Merge pull request #8372 from hashicorp/update-go-metrics

vendor: Update github.com/armon/go-metrics to v0.3.3
This commit is contained in:
Kyle Havlovitz 2020-07-23 12:28:15 -07:00 committed by GitHub
commit bd8d3afc6a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
102 changed files with 9765 additions and 1358 deletions

6
go.mod
View File

@ -13,7 +13,7 @@ require (
github.com/NYTimes/gziphandler v1.0.1
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878
github.com/armon/go-metrics v0.3.3
github.com/armon/go-radix v1.0.0
github.com/aws/aws-sdk-go v1.25.41
github.com/coredns/coredns v1.1.2
@ -37,6 +37,7 @@ require (
github.com/hashicorp/go-connlimit v0.3.0
github.com/hashicorp/go-discover v0.0.0-20200501174627-ad1e96bde088
github.com/hashicorp/go-hclog v0.12.0
github.com/hashicorp/go-immutable-radix v1.2.0 // indirect
github.com/hashicorp/go-memdb v1.1.0
github.com/hashicorp/go-msgpack v0.5.5
github.com/hashicorp/go-multierror v1.1.0
@ -70,11 +71,10 @@ require (
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/pkg/errors v0.8.1
github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 // indirect
github.com/prometheus/client_golang v1.0.0
github.com/prometheus/client_golang v1.4.0
github.com/rboyer/safeio v0.2.1
github.com/ryanuber/columnize v2.1.0+incompatible
github.com/shirou/gopsutil v2.20.6-0.20200630091542-01afd763e6c0+incompatible
github.com/sirupsen/logrus v1.4.2 // indirect
github.com/stretchr/testify v1.5.1
go.opencensus.io v0.22.0 // indirect
go.uber.org/goleak v1.0.0

31
go.sum
View File

@ -36,6 +36,8 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DataDog/datadog-go v2.2.0+incompatible h1:V5BKkxACZLjzHjSgBbr2gvLA2Ae49yhc6CSY7MLy5k4=
github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/Microsoft/go-winio v0.4.3 h1:M3NHMuPgMSUPdE5epwNUHlRPSVzHs8HpRTrVXhR0myo=
github.com/Microsoft/go-winio v0.4.3/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
@ -49,13 +51,17 @@ github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrU
github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KMJuWmfCkcxl09JwdlqwDZZ6U14=
github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM=
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg=
github.com/armon/go-metrics v0.3.3 h1:a9F4rlj7EWWrbj7BYw8J8+x+ZZkJeqzNyRk8hdPF+ro=
github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
@ -64,13 +70,18 @@ github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpi
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA=
@ -130,6 +141,7 @@ github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-check/check v0.0.0-20140225173054-eb6ee6f84d0a/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
@ -169,6 +181,9 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
@ -217,6 +232,8 @@ github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.1.0 h1:vN9wG1D6KG6YHRTWr8512cxGOVgTMEfgEdSj/hr8MPc=
github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.2.0 h1:l6UW37iCXwZkZoAbEYnptSHVE/cQ5bOTPYG5W3vf9+8=
github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-memdb v1.1.0 h1:ClvpUXpBA6UDs5+vc1h3wqe4UJU+rwum7CU219SeCbk=
github.com/hashicorp/go-memdb v1.1.0/go.mod h1:LWQ8R70vPrS4OEY9k28D2z8/Zzyu34NVzeRibGAzHO0=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
@ -299,6 +316,8 @@ github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBv
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
@ -409,21 +428,29 @@ github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.0 h1:YVIb/fVcOTMSqtqZWSKnHpSLBxu8DKgxq8z6RuBZwqI=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rboyer/safeio v0.2.1 h1:05xhhdRNAdS3apYm7JRjOqngf4xruaW959jmRxGDuSU=
github.com/rboyer/safeio v0.2.1/go.mod h1:Cq/cEPK+YXFn622lsQ0K4KsPZSPtaptHHEldsy7Fmig=
@ -535,6 +562,7 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI=
@ -580,6 +608,7 @@ golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9 h1:1/DFK4b7JH8DmkqhUk48onnSfrPzImPoVxuomtbT2nk=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8=
@ -663,6 +692,8 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -2,4 +2,3 @@
Package `statsd` provides a Go [dogstatsd](http://docs.datadoghq.com/guides/dogstatsd/) client. Dogstatsd extends Statsd, adding tags
and histograms.

130
vendor/github.com/DataDog/datadog-go/statsd/buffer.go generated vendored Normal file
View File

@ -0,0 +1,130 @@
package statsd
type bufferFullError string
func (e bufferFullError) Error() string { return string(e) }
const errBufferFull = bufferFullError("statsd buffer is full")
const metricOverhead = 512
// statsdBuffer is a buffer containing statsd messages
// this struct methods are NOT safe for concurent use
type statsdBuffer struct {
buffer []byte
maxSize int
maxElements int
elementCount int
}
func newStatsdBuffer(maxSize, maxElements int) *statsdBuffer {
return &statsdBuffer{
buffer: make([]byte, 0, maxSize+metricOverhead), // pre-allocate the needed size + metricOverhead to avoid having Go re-allocate on it's own if an element does not fit
maxSize: maxSize,
maxElements: maxElements,
}
}
func (b *statsdBuffer) writeGauge(namespace string, globalTags []string, name string, value float64, tags []string, rate float64) error {
if b.elementCount >= b.maxElements {
return errBufferFull
}
originalBuffer := b.buffer
b.writeSeparator()
b.buffer = appendGauge(b.buffer, namespace, globalTags, name, value, tags, rate)
return b.validateNewElement(originalBuffer)
}
func (b *statsdBuffer) writeCount(namespace string, globalTags []string, name string, value int64, tags []string, rate float64) error {
if b.elementCount >= b.maxElements {
return errBufferFull
}
originalBuffer := b.buffer
b.writeSeparator()
b.buffer = appendCount(b.buffer, namespace, globalTags, name, value, tags, rate)
return b.validateNewElement(originalBuffer)
}
func (b *statsdBuffer) writeHistogram(namespace string, globalTags []string, name string, value float64, tags []string, rate float64) error {
if b.elementCount >= b.maxElements {
return errBufferFull
}
originalBuffer := b.buffer
b.writeSeparator()
b.buffer = appendHistogram(b.buffer, namespace, globalTags, name, value, tags, rate)
return b.validateNewElement(originalBuffer)
}
func (b *statsdBuffer) writeDistribution(namespace string, globalTags []string, name string, value float64, tags []string, rate float64) error {
if b.elementCount >= b.maxElements {
return errBufferFull
}
originalBuffer := b.buffer
b.writeSeparator()
b.buffer = appendDistribution(b.buffer, namespace, globalTags, name, value, tags, rate)
return b.validateNewElement(originalBuffer)
}
func (b *statsdBuffer) writeSet(namespace string, globalTags []string, name string, value string, tags []string, rate float64) error {
if b.elementCount >= b.maxElements {
return errBufferFull
}
originalBuffer := b.buffer
b.writeSeparator()
b.buffer = appendSet(b.buffer, namespace, globalTags, name, value, tags, rate)
return b.validateNewElement(originalBuffer)
}
func (b *statsdBuffer) writeTiming(namespace string, globalTags []string, name string, value float64, tags []string, rate float64) error {
if b.elementCount >= b.maxElements {
return errBufferFull
}
originalBuffer := b.buffer
b.writeSeparator()
b.buffer = appendTiming(b.buffer, namespace, globalTags, name, value, tags, rate)
return b.validateNewElement(originalBuffer)
}
func (b *statsdBuffer) writeEvent(event Event, globalTags []string) error {
if b.elementCount >= b.maxElements {
return errBufferFull
}
originalBuffer := b.buffer
b.writeSeparator()
b.buffer = appendEvent(b.buffer, event, globalTags)
return b.validateNewElement(originalBuffer)
}
func (b *statsdBuffer) writeServiceCheck(serviceCheck ServiceCheck, globalTags []string) error {
if b.elementCount >= b.maxElements {
return errBufferFull
}
originalBuffer := b.buffer
b.writeSeparator()
b.buffer = appendServiceCheck(b.buffer, serviceCheck, globalTags)
return b.validateNewElement(originalBuffer)
}
func (b *statsdBuffer) validateNewElement(originalBuffer []byte) error {
if len(b.buffer) > b.maxSize {
b.buffer = originalBuffer
return errBufferFull
}
b.elementCount++
return nil
}
func (b *statsdBuffer) writeSeparator() {
if b.elementCount != 0 {
b.buffer = appendSeparator(b.buffer)
}
}
func (b *statsdBuffer) reset() {
b.buffer = b.buffer[:0]
b.elementCount = 0
}
func (b *statsdBuffer) bytes() []byte {
return b.buffer
}

View File

@ -0,0 +1,40 @@
package statsd
type bufferPool struct {
pool chan *statsdBuffer
bufferMaxSize int
bufferMaxElements int
}
func newBufferPool(poolSize, bufferMaxSize, bufferMaxElements int) *bufferPool {
p := &bufferPool{
pool: make(chan *statsdBuffer, poolSize),
bufferMaxSize: bufferMaxSize,
bufferMaxElements: bufferMaxElements,
}
for i := 0; i < poolSize; i++ {
p.addNewBuffer()
}
return p
}
func (p *bufferPool) addNewBuffer() {
p.pool <- newStatsdBuffer(p.bufferMaxSize, p.bufferMaxElements)
}
func (p *bufferPool) borrowBuffer() *statsdBuffer {
select {
case b := <-p.pool:
return b
default:
return newStatsdBuffer(p.bufferMaxSize, p.bufferMaxElements)
}
}
func (p *bufferPool) returnBuffer(buffer *statsdBuffer) {
buffer.reset()
select {
case p.pool <- buffer:
default:
}
}

91
vendor/github.com/DataDog/datadog-go/statsd/event.go generated vendored Normal file
View File

@ -0,0 +1,91 @@
package statsd
import (
"fmt"
"time"
)
// Events support
// EventAlertType and EventAlertPriority became exported types after this issue was submitted: https://github.com/DataDog/datadog-go/issues/41
// The reason why they got exported is so that client code can directly use the types.
// EventAlertType is the alert type for events
type EventAlertType string
const (
// Info is the "info" AlertType for events
Info EventAlertType = "info"
// Error is the "error" AlertType for events
Error EventAlertType = "error"
// Warning is the "warning" AlertType for events
Warning EventAlertType = "warning"
// Success is the "success" AlertType for events
Success EventAlertType = "success"
)
// EventPriority is the event priority for events
type EventPriority string
const (
// Normal is the "normal" Priority for events
Normal EventPriority = "normal"
// Low is the "low" Priority for events
Low EventPriority = "low"
)
// An Event is an object that can be posted to your DataDog event stream.
type Event struct {
// Title of the event. Required.
Title string
// Text is the description of the event. Required.
Text string
// Timestamp is a timestamp for the event. If not provided, the dogstatsd
// server will set this to the current time.
Timestamp time.Time
// Hostname for the event.
Hostname string
// AggregationKey groups this event with others of the same key.
AggregationKey string
// Priority of the event. Can be statsd.Low or statsd.Normal.
Priority EventPriority
// SourceTypeName is a source type for the event.
SourceTypeName string
// AlertType can be statsd.Info, statsd.Error, statsd.Warning, or statsd.Success.
// If absent, the default value applied by the dogstatsd server is Info.
AlertType EventAlertType
// Tags for the event.
Tags []string
}
// NewEvent creates a new event with the given title and text. Error checking
// against these values is done at send-time, or upon running e.Check.
func NewEvent(title, text string) *Event {
return &Event{
Title: title,
Text: text,
}
}
// Check verifies that an event is valid.
func (e Event) Check() error {
if len(e.Title) == 0 {
return fmt.Errorf("statsd.Event title is required")
}
if len(e.Text) == 0 {
return fmt.Errorf("statsd.Event text is required")
}
return nil
}
// Encode returns the dogstatsd wire protocol representation for an event.
// Tags may be passed which will be added to the encoded output but not to
// the Event's list of tags, eg. for default tags.
func (e Event) Encode(tags ...string) (string, error) {
err := e.Check()
if err != nil {
return "", err
}
var buffer []byte
buffer = appendEvent(buffer, e, tags)
return string(buffer), nil
}

232
vendor/github.com/DataDog/datadog-go/statsd/format.go generated vendored Normal file
View File

@ -0,0 +1,232 @@
package statsd
import (
"strconv"
"strings"
)
var (
gaugeSymbol = []byte("g")
countSymbol = []byte("c")
histogramSymbol = []byte("h")
distributionSymbol = []byte("d")
setSymbol = []byte("s")
timingSymbol = []byte("ms")
)
func appendHeader(buffer []byte, namespace string, name string) []byte {
if namespace != "" {
buffer = append(buffer, namespace...)
}
buffer = append(buffer, name...)
buffer = append(buffer, ':')
return buffer
}
func appendRate(buffer []byte, rate float64) []byte {
if rate < 1 {
buffer = append(buffer, "|@"...)
buffer = strconv.AppendFloat(buffer, rate, 'f', -1, 64)
}
return buffer
}
func appendWithoutNewlines(buffer []byte, s string) []byte {
// fastpath for strings without newlines
if strings.IndexByte(s, '\n') == -1 {
return append(buffer, s...)
}
for _, b := range []byte(s) {
if b != '\n' {
buffer = append(buffer, b)
}
}
return buffer
}
func appendTags(buffer []byte, globalTags []string, tags []string) []byte {
if len(globalTags) == 0 && len(tags) == 0 {
return buffer
}
buffer = append(buffer, "|#"...)
firstTag := true
for _, tag := range globalTags {
if !firstTag {
buffer = append(buffer, ',')
}
buffer = appendWithoutNewlines(buffer, tag)
firstTag = false
}
for _, tag := range tags {
if !firstTag {
buffer = append(buffer, ',')
}
buffer = appendWithoutNewlines(buffer, tag)
firstTag = false
}
return buffer
}
func appendFloatMetric(buffer []byte, typeSymbol []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64, precision int) []byte {
buffer = appendHeader(buffer, namespace, name)
buffer = strconv.AppendFloat(buffer, value, 'f', precision, 64)
buffer = append(buffer, '|')
buffer = append(buffer, typeSymbol...)
buffer = appendRate(buffer, rate)
buffer = appendTags(buffer, globalTags, tags)
return buffer
}
func appendIntegerMetric(buffer []byte, typeSymbol []byte, namespace string, globalTags []string, name string, value int64, tags []string, rate float64) []byte {
buffer = appendHeader(buffer, namespace, name)
buffer = strconv.AppendInt(buffer, value, 10)
buffer = append(buffer, '|')
buffer = append(buffer, typeSymbol...)
buffer = appendRate(buffer, rate)
buffer = appendTags(buffer, globalTags, tags)
return buffer
}
func appendStringMetric(buffer []byte, typeSymbol []byte, namespace string, globalTags []string, name string, value string, tags []string, rate float64) []byte {
buffer = appendHeader(buffer, namespace, name)
buffer = append(buffer, value...)
buffer = append(buffer, '|')
buffer = append(buffer, typeSymbol...)
buffer = appendRate(buffer, rate)
buffer = appendTags(buffer, globalTags, tags)
return buffer
}
func appendGauge(buffer []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64) []byte {
return appendFloatMetric(buffer, gaugeSymbol, namespace, globalTags, name, value, tags, rate, -1)
}
func appendCount(buffer []byte, namespace string, globalTags []string, name string, value int64, tags []string, rate float64) []byte {
return appendIntegerMetric(buffer, countSymbol, namespace, globalTags, name, value, tags, rate)
}
func appendHistogram(buffer []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64) []byte {
return appendFloatMetric(buffer, histogramSymbol, namespace, globalTags, name, value, tags, rate, -1)
}
func appendDistribution(buffer []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64) []byte {
return appendFloatMetric(buffer, distributionSymbol, namespace, globalTags, name, value, tags, rate, -1)
}
func appendSet(buffer []byte, namespace string, globalTags []string, name string, value string, tags []string, rate float64) []byte {
return appendStringMetric(buffer, setSymbol, namespace, globalTags, name, value, tags, rate)
}
func appendTiming(buffer []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64) []byte {
return appendFloatMetric(buffer, timingSymbol, namespace, globalTags, name, value, tags, rate, 6)
}
func escapedEventTextLen(text string) int {
return len(text) + strings.Count(text, "\n")
}
func appendEscapedEventText(buffer []byte, text string) []byte {
for _, b := range []byte(text) {
if b != '\n' {
buffer = append(buffer, b)
} else {
buffer = append(buffer, "\\n"...)
}
}
return buffer
}
func appendEvent(buffer []byte, event Event, globalTags []string) []byte {
escapedTextLen := escapedEventTextLen(event.Text)
buffer = append(buffer, "_e{"...)
buffer = strconv.AppendInt(buffer, int64(len(event.Title)), 10)
buffer = append(buffer, ',')
buffer = strconv.AppendInt(buffer, int64(escapedTextLen), 10)
buffer = append(buffer, "}:"...)
buffer = append(buffer, event.Title...)
buffer = append(buffer, '|')
if escapedTextLen != len(event.Text) {
buffer = appendEscapedEventText(buffer, event.Text)
} else {
buffer = append(buffer, event.Text...)
}
if !event.Timestamp.IsZero() {
buffer = append(buffer, "|d:"...)
buffer = strconv.AppendInt(buffer, int64(event.Timestamp.Unix()), 10)
}
if len(event.Hostname) != 0 {
buffer = append(buffer, "|h:"...)
buffer = append(buffer, event.Hostname...)
}
if len(event.AggregationKey) != 0 {
buffer = append(buffer, "|k:"...)
buffer = append(buffer, event.AggregationKey...)
}
if len(event.Priority) != 0 {
buffer = append(buffer, "|p:"...)
buffer = append(buffer, event.Priority...)
}
if len(event.SourceTypeName) != 0 {
buffer = append(buffer, "|s:"...)
buffer = append(buffer, event.SourceTypeName...)
}
if len(event.AlertType) != 0 {
buffer = append(buffer, "|t:"...)
buffer = append(buffer, string(event.AlertType)...)
}
buffer = appendTags(buffer, globalTags, event.Tags)
return buffer
}
func appendEscapedServiceCheckText(buffer []byte, text string) []byte {
for i := 0; i < len(text); i++ {
if text[i] == '\n' {
buffer = append(buffer, "\\n"...)
} else if text[i] == 'm' && i+1 < len(text) && text[i+1] == ':' {
buffer = append(buffer, "m\\:"...)
i++
} else {
buffer = append(buffer, text[i])
}
}
return buffer
}
func appendServiceCheck(buffer []byte, serviceCheck ServiceCheck, globalTags []string) []byte {
buffer = append(buffer, "_sc|"...)
buffer = append(buffer, serviceCheck.Name...)
buffer = append(buffer, '|')
buffer = strconv.AppendInt(buffer, int64(serviceCheck.Status), 10)
if !serviceCheck.Timestamp.IsZero() {
buffer = append(buffer, "|d:"...)
buffer = strconv.AppendInt(buffer, int64(serviceCheck.Timestamp.Unix()), 10)
}
if len(serviceCheck.Hostname) != 0 {
buffer = append(buffer, "|h:"...)
buffer = append(buffer, serviceCheck.Hostname...)
}
buffer = appendTags(buffer, globalTags, serviceCheck.Tags)
if len(serviceCheck.Message) != 0 {
buffer = append(buffer, "|m:"...)
buffer = appendEscapedServiceCheckText(buffer, serviceCheck.Message)
}
return buffer
}
func appendSeparator(buffer []byte) []byte {
return append(buffer, '\n')
}

91
vendor/github.com/DataDog/datadog-go/statsd/noop.go generated vendored Normal file
View File

@ -0,0 +1,91 @@
package statsd
import "time"
// NoOpClient is a statsd client that does nothing. Can be useful in testing
// situations for library users.
type NoOpClient struct{}
// Gauge does nothing and returns nil
func (n *NoOpClient) Gauge(name string, value float64, tags []string, rate float64) error {
return nil
}
// Count does nothing and returns nil
func (n *NoOpClient) Count(name string, value int64, tags []string, rate float64) error {
return nil
}
// Histogram does nothing and returns nil
func (n *NoOpClient) Histogram(name string, value float64, tags []string, rate float64) error {
return nil
}
// Distribution does nothing and returns nil
func (n *NoOpClient) Distribution(name string, value float64, tags []string, rate float64) error {
return nil
}
// Decr does nothing and returns nil
func (n *NoOpClient) Decr(name string, tags []string, rate float64) error {
return nil
}
// Incr does nothing and returns nil
func (n *NoOpClient) Incr(name string, tags []string, rate float64) error {
return nil
}
// Set does nothing and returns nil
func (n *NoOpClient) Set(name string, value string, tags []string, rate float64) error {
return nil
}
// Timing does nothing and returns nil
func (n *NoOpClient) Timing(name string, value time.Duration, tags []string, rate float64) error {
return nil
}
// TimeInMilliseconds does nothing and returns nil
func (n *NoOpClient) TimeInMilliseconds(name string, value float64, tags []string, rate float64) error {
return nil
}
// Event does nothing and returns nil
func (n *NoOpClient) Event(e *Event) error {
return nil
}
// SimpleEvent does nothing and returns nil
func (n *NoOpClient) SimpleEvent(title, text string) error {
return nil
}
// ServiceCheck does nothing and returns nil
func (n *NoOpClient) ServiceCheck(sc *ServiceCheck) error {
return nil
}
// SimpleServiceCheck does nothing and returns nil
func (n *NoOpClient) SimpleServiceCheck(name string, status ServiceCheckStatus) error {
return nil
}
// Close does nothing and returns nil
func (n *NoOpClient) Close() error {
return nil
}
// Flush does nothing and returns nil
func (n *NoOpClient) Flush() error {
return nil
}
// SetWriteTimeout does nothing and returns nil
func (n *NoOpClient) SetWriteTimeout(d time.Duration) error {
return nil
}
// Verify that NoOpClient implements the ClientInterface.
// https://golang.org/doc/faq#guarantee_satisfies_interface
var _ ClientInterface = &NoOpClient{}

View File

@ -1,18 +1,25 @@
package statsd
import "time"
import (
"math"
"time"
)
var (
// DefaultNamespace is the default value for the Namespace option
DefaultNamespace = ""
// DefaultTags is the default value for the Tags option
DefaultTags = []string{}
// DefaultBuffered is the default value for the Buffered option
DefaultBuffered = false
// DefaultMaxBytesPerPayload is the default value for the MaxBytesPerPayload option
DefaultMaxBytesPerPayload = 0
// DefaultMaxMessagesPerPayload is the default value for the MaxMessagesPerPayload option
DefaultMaxMessagesPerPayload = 16
// DefaultAsyncUDS is the default value for the AsyncUDS option
DefaultAsyncUDS = false
DefaultMaxMessagesPerPayload = math.MaxInt32
// DefaultBufferPoolSize is the default value for the DefaultBufferPoolSize option
DefaultBufferPoolSize = 0
// DefaultBufferFlushInterval is the default value for the BufferFlushInterval option
DefaultBufferFlushInterval = 100 * time.Millisecond
// DefaultSenderQueueSize is the default value for the DefaultSenderQueueSize option
DefaultSenderQueueSize = 0
// DefaultWriteTimeoutUDS is the default value for the WriteTimeoutUDS option
DefaultWriteTimeoutUDS = 1 * time.Millisecond
)
@ -23,16 +30,23 @@ type Options struct {
Namespace string
// Tags are global tags to be applied to every metrics, events and service checks.
Tags []string
// Buffered allows to pack multiple DogStatsD messages in one payload. Messages will be buffered
// until the total size of the payload exceeds MaxMessagesPerPayload metrics, events and/or service
// checks or after 100ms since the payload startedto be built.
Buffered bool
// MaxBytesPerPayload is the maximum number of bytes a single payload will contain.
// The magic value 0 will set the option to the optimal size for the transport
// protocol used when creating the client: 1432 for UDP and 8192 for UDS.
MaxBytesPerPayload int
// MaxMessagesPerPayload is the maximum number of metrics, events and/or service checks a single payload will contain.
// Note that this option only takes effect when the client is buffered.
// This option can be set to `1` to create an unbuffered client.
MaxMessagesPerPayload int
// AsyncUDS allows to switch between async and blocking mode for UDS.
// Blocking mode allows for error checking but does not guarentee that calls won't block the execution.
AsyncUDS bool
// BufferPoolSize is the size of the pool of buffers in number of buffers.
// The magic value 0 will set the option to the optimal size for the transport
// protocol used when creating the client: 2048 for UDP and 512 for UDS.
BufferPoolSize int
// BufferFlushInterval is the interval after which the current buffer will get flushed.
BufferFlushInterval time.Duration
// SenderQueueSize is the size of the sender queue in number of buffers.
// The magic value 0 will set the option to the optimal size for the transport
// protocol used when creating the client: 2048 for UDP and 512 for UDS.
SenderQueueSize int
// WriteTimeoutUDS is the timeout after which a UDS packet is dropped.
WriteTimeoutUDS time.Duration
}
@ -41,9 +55,11 @@ func resolveOptions(options []Option) (*Options, error) {
o := &Options{
Namespace: DefaultNamespace,
Tags: DefaultTags,
Buffered: DefaultBuffered,
MaxBytesPerPayload: DefaultMaxBytesPerPayload,
MaxMessagesPerPayload: DefaultMaxMessagesPerPayload,
AsyncUDS: DefaultAsyncUDS,
BufferPoolSize: DefaultBufferPoolSize,
BufferFlushInterval: DefaultBufferFlushInterval,
SenderQueueSize: DefaultSenderQueueSize,
WriteTimeoutUDS: DefaultWriteTimeoutUDS,
}
@ -76,14 +92,6 @@ func WithTags(tags []string) Option {
}
}
// Buffered sets the Buffered option.
func Buffered() Option {
return func(o *Options) error {
o.Buffered = true
return nil
}
}
// WithMaxMessagesPerPayload sets the MaxMessagesPerPayload option.
func WithMaxMessagesPerPayload(maxMessagesPerPayload int) Option {
return func(o *Options) error {
@ -92,10 +100,34 @@ func WithMaxMessagesPerPayload(maxMessagesPerPayload int) Option {
}
}
// WithAsyncUDS sets the AsyncUDS option.
func WithAsyncUDS() Option {
// WithMaxBytesPerPayload sets the MaxBytesPerPayload option.
func WithMaxBytesPerPayload(MaxBytesPerPayload int) Option {
return func(o *Options) error {
o.AsyncUDS = true
o.MaxBytesPerPayload = MaxBytesPerPayload
return nil
}
}
// WithBufferPoolSize sets the BufferPoolSize option.
func WithBufferPoolSize(bufferPoolSize int) Option {
return func(o *Options) error {
o.BufferPoolSize = bufferPoolSize
return nil
}
}
// WithBufferFlushInterval sets the BufferFlushInterval option.
func WithBufferFlushInterval(bufferFlushInterval time.Duration) Option {
return func(o *Options) error {
o.BufferFlushInterval = bufferFlushInterval
return nil
}
}
// WithSenderQueueSize sets the SenderQueueSize option.
func WithSenderQueueSize(senderQueueSize int) Option {
return func(o *Options) error {
o.SenderQueueSize = senderQueueSize
return nil
}
}

116
vendor/github.com/DataDog/datadog-go/statsd/sender.go generated vendored Normal file
View File

@ -0,0 +1,116 @@
package statsd
import (
"sync/atomic"
"time"
)
// A statsdWriter offers a standard interface regardless of the underlying
// protocol. For now UDS and UPD writers are available.
// Attention: the underlying buffer of `data` is reused after a `statsdWriter.Write` call.
// `statsdWriter.Write` must be synchronous.
type statsdWriter interface {
Write(data []byte) (n int, err error)
SetWriteTimeout(time.Duration) error
Close() error
}
// SenderMetrics contains metrics about the health of the sender
type SenderMetrics struct {
TotalSentBytes uint64
TotalSentPayloads uint64
TotalDroppedPayloads uint64
TotalDroppedBytes uint64
TotalDroppedPayloadsQueueFull uint64
TotalDroppedBytesQueueFull uint64
TotalDroppedPayloadsWriter uint64
TotalDroppedBytesWriter uint64
}
type sender struct {
transport statsdWriter
pool *bufferPool
queue chan *statsdBuffer
metrics SenderMetrics
stop chan struct{}
}
func newSender(transport statsdWriter, queueSize int, pool *bufferPool) *sender {
sender := &sender{
transport: transport,
pool: pool,
queue: make(chan *statsdBuffer, queueSize),
stop: make(chan struct{}),
}
go sender.sendLoop()
return sender
}
func (s *sender) send(buffer *statsdBuffer) {
select {
case s.queue <- buffer:
default:
atomic.AddUint64(&s.metrics.TotalDroppedPayloads, 1)
atomic.AddUint64(&s.metrics.TotalDroppedBytes, uint64(len(buffer.bytes())))
atomic.AddUint64(&s.metrics.TotalDroppedPayloadsQueueFull, 1)
atomic.AddUint64(&s.metrics.TotalDroppedBytesQueueFull, uint64(len(buffer.bytes())))
s.pool.returnBuffer(buffer)
}
}
func (s *sender) write(buffer *statsdBuffer) {
_, err := s.transport.Write(buffer.bytes())
if err != nil {
atomic.AddUint64(&s.metrics.TotalDroppedPayloads, 1)
atomic.AddUint64(&s.metrics.TotalDroppedBytes, uint64(len(buffer.bytes())))
atomic.AddUint64(&s.metrics.TotalDroppedPayloadsWriter, 1)
atomic.AddUint64(&s.metrics.TotalDroppedBytesWriter, uint64(len(buffer.bytes())))
} else {
atomic.AddUint64(&s.metrics.TotalSentPayloads, 1)
atomic.AddUint64(&s.metrics.TotalSentBytes, uint64(len(buffer.bytes())))
}
s.pool.returnBuffer(buffer)
}
func (s *sender) flushMetrics() SenderMetrics {
return SenderMetrics{
TotalSentBytes: atomic.SwapUint64(&s.metrics.TotalSentBytes, 0),
TotalSentPayloads: atomic.SwapUint64(&s.metrics.TotalSentPayloads, 0),
TotalDroppedPayloads: atomic.SwapUint64(&s.metrics.TotalDroppedPayloads, 0),
TotalDroppedBytes: atomic.SwapUint64(&s.metrics.TotalDroppedBytes, 0),
TotalDroppedPayloadsQueueFull: atomic.SwapUint64(&s.metrics.TotalDroppedPayloadsQueueFull, 0),
TotalDroppedBytesQueueFull: atomic.SwapUint64(&s.metrics.TotalDroppedBytesQueueFull, 0),
TotalDroppedPayloadsWriter: atomic.SwapUint64(&s.metrics.TotalDroppedPayloadsWriter, 0),
TotalDroppedBytesWriter: atomic.SwapUint64(&s.metrics.TotalDroppedBytesWriter, 0),
}
}
func (s *sender) sendLoop() {
for {
select {
case buffer := <-s.queue:
s.write(buffer)
case <-s.stop:
return
}
}
}
func (s *sender) flush() {
for {
select {
case buffer := <-s.queue:
s.write(buffer)
default:
return
}
}
}
func (s *sender) close() error {
s.flush()
err := s.transport.Close()
close(s.stop)
return err
}

View File

@ -0,0 +1,70 @@
package statsd
import (
"fmt"
"time"
)
// ServiceCheckStatus support
type ServiceCheckStatus byte
const (
// Ok is the "ok" ServiceCheck status
Ok ServiceCheckStatus = 0
// Warn is the "warning" ServiceCheck status
Warn ServiceCheckStatus = 1
// Critical is the "critical" ServiceCheck status
Critical ServiceCheckStatus = 2
// Unknown is the "unknown" ServiceCheck status
Unknown ServiceCheckStatus = 3
)
// A ServiceCheck is an object that contains status of DataDog service check.
type ServiceCheck struct {
// Name of the service check. Required.
Name string
// Status of service check. Required.
Status ServiceCheckStatus
// Timestamp is a timestamp for the serviceCheck. If not provided, the dogstatsd
// server will set this to the current time.
Timestamp time.Time
// Hostname for the serviceCheck.
Hostname string
// A message describing the current state of the serviceCheck.
Message string
// Tags for the serviceCheck.
Tags []string
}
// NewServiceCheck creates a new serviceCheck with the given name and status. Error checking
// against these values is done at send-time, or upon running sc.Check.
func NewServiceCheck(name string, status ServiceCheckStatus) *ServiceCheck {
return &ServiceCheck{
Name: name,
Status: status,
}
}
// Check verifies that a service check is valid.
func (sc ServiceCheck) Check() error {
if len(sc.Name) == 0 {
return fmt.Errorf("statsd.ServiceCheck name is required")
}
if byte(sc.Status) < 0 || byte(sc.Status) > 3 {
return fmt.Errorf("statsd.ServiceCheck status has invalid value")
}
return nil
}
// Encode returns the dogstatsd wire protocol representation for a service check.
// Tags may be passed which will be added to the encoded output but not to
// the Service Check's list of tags, eg. for default tags.
func (sc ServiceCheck) Encode(tags ...string) (string, error) {
err := sc.Check()
if err != nil {
return "", err
}
var buffer []byte
buffer = appendServiceCheck(buffer, sc, tags)
return string(buffer), nil
}

View File

@ -24,28 +24,21 @@ statsd is based on go-statsd-client.
package statsd
import (
"bytes"
"errors"
"fmt"
"io"
"math/rand"
"os"
"strconv"
"strings"
"sync"
"time"
)
/*
OptimalPayloadSize defines the optimal payload size for a UDP datagram, 1432 bytes
OptimalUDPPayloadSize defines the optimal payload size for a UDP datagram, 1432 bytes
is optimal for regular networks with an MTU of 1500 so datagrams don't get
fragmented. It's generally recommended not to fragment UDP datagrams as losing
a single fragment will cause the entire datagram to be lost.
This can be increased if your network has a greater MTU or you don't mind UDP
datagrams getting fragmented. The practical limit is MaxUDPPayloadSize
*/
const OptimalPayloadSize = 1432
const OptimalUDPPayloadSize = 1432
/*
MaxUDPPayloadSize defines the maximum payload size for a UDP datagram.
@ -55,6 +48,29 @@ any number greater than that will see frames being cut out.
*/
const MaxUDPPayloadSize = 65467
// DefaultUDPBufferPoolSize is the default size of the buffer pool for UDP clients.
const DefaultUDPBufferPoolSize = 2048
// DefaultUDSBufferPoolSize is the default size of the buffer pool for UDS clients.
const DefaultUDSBufferPoolSize = 512
/*
DefaultMaxAgentPayloadSize is the default maximum payload size the agent
can receive. This can be adjusted by changing dogstatsd_buffer_size in the
agent configuration file datadog.yaml.
*/
const DefaultMaxAgentPayloadSize = 8192
/*
TelemetryInterval is the interval at which telemetry will be sent by the client.
*/
const TelemetryInterval = 10 * time.Second
/*
clientTelemetryTag is a tag identifying this specific client.
*/
var clientTelemetryTag = "client:go"
/*
UnixAddressPrefix holds the prefix to use to enable Unix Domain Socket
traffic instead of UDP.
@ -67,74 +83,177 @@ const (
entityIDTagName = "dd.internal.entity_id"
)
/*
Stat suffixes
*/
var (
gaugeSuffix = []byte("|g")
countSuffix = []byte("|c")
histogramSuffix = []byte("|h")
distributionSuffix = []byte("|d")
decrSuffix = []byte("-1|c")
incrSuffix = []byte("1|c")
setSuffix = []byte("|s")
timingSuffix = []byte("|ms")
type metricType int
const (
gauge metricType = iota
count
histogram
distribution
set
timing
event
serviceCheck
)
// A statsdWriter offers a standard interface regardless of the underlying
// protocol. For now UDS and UPD writers are available.
type statsdWriter interface {
Write(data []byte) (n int, err error)
SetWriteTimeout(time.Duration) error
type metric struct {
metricType metricType
namespace string
globalTags []string
name string
fvalue float64
ivalue int64
svalue string
evalue *Event
scvalue *ServiceCheck
tags []string
rate float64
}
type noClientErr string
// ErrNoClient is returned if statsd reporting methods are invoked on
// a nil client.
const ErrNoClient = noClientErr("statsd client is nil")
func (e noClientErr) Error() string {
return string(e)
}
// ClientInterface is an interface that exposes the common client functions for the
// purpose of being able to provide a no-op client or even mocking. This can aid
// downstream users' with their testing.
type ClientInterface interface {
// Gauge measures the value of a metric at a particular time.
Gauge(name string, value float64, tags []string, rate float64) error
// Count tracks how many times something happened per second.
Count(name string, value int64, tags []string, rate float64) error
// Histogram tracks the statistical distribution of a set of values on each host.
Histogram(name string, value float64, tags []string, rate float64) error
// Distribution tracks the statistical distribution of a set of values across your infrastructure.
Distribution(name string, value float64, tags []string, rate float64) error
// Decr is just Count of -1
Decr(name string, tags []string, rate float64) error
// Incr is just Count of 1
Incr(name string, tags []string, rate float64) error
// Set counts the number of unique elements in a group.
Set(name string, value string, tags []string, rate float64) error
// Timing sends timing information, it is an alias for TimeInMilliseconds
Timing(name string, value time.Duration, tags []string, rate float64) error
// TimeInMilliseconds sends timing information in milliseconds.
// It is flushed by statsd with percentiles, mean and other info (https://github.com/etsy/statsd/blob/master/docs/metric_types.md#timing)
TimeInMilliseconds(name string, value float64, tags []string, rate float64) error
// Event sends the provided Event.
Event(e *Event) error
// SimpleEvent sends an event with the provided title and text.
SimpleEvent(title, text string) error
// ServiceCheck sends the provided ServiceCheck.
ServiceCheck(sc *ServiceCheck) error
// SimpleServiceCheck sends an serviceCheck with the provided name and status.
SimpleServiceCheck(name string, status ServiceCheckStatus) error
// Close the client connection.
Close() error
// Flush forces a flush of all the queued dogstatsd payloads.
Flush() error
// SetWriteTimeout allows the user to set a custom write timeout.
SetWriteTimeout(d time.Duration) error
}
// A Client is a handle for sending messages to dogstatsd. It is safe to
// use one Client from multiple goroutines simultaneously.
type Client struct {
// Writer handles the underlying networking protocol
writer statsdWriter
// Sender handles the underlying networking protocol
sender *sender
// Namespace to prepend to all statsd calls
Namespace string
// Tags are global tags to be added to every statsd call
Tags []string
// skipErrors turns off error passing and allows UDS to emulate UDP behaviour
SkipErrors bool
// BufferLength is the length of the buffer in commands.
bufferLength int
flushTime time.Duration
commands [][]byte
buffer bytes.Buffer
stop chan struct{}
SkipErrors bool
flushTime time.Duration
bufferPool *bufferPool
buffer *statsdBuffer
telemetryTags []string
stop chan struct{}
sync.Mutex
}
// Verify that Client implements the ClientInterface.
// https://golang.org/doc/faq#guarantee_satisfies_interface
var _ ClientInterface = &Client{}
// New returns a pointer to a new Client given an addr in the format "hostname:port" or
// "unix:///path/to/socket".
func New(addr string, options ...Option) (*Client, error) {
var w statsdWriter
o, err := resolveOptions(options)
if err != nil {
return nil, err
}
var w statsdWriter
var writerType string
optimalPayloadSize := OptimalUDPPayloadSize
defaultBufferPoolSize := DefaultUDPBufferPoolSize
if !strings.HasPrefix(addr, UnixAddressPrefix) {
w, err = newUDPWriter(addr)
} else if o.AsyncUDS {
w, err = newAsyncUdsWriter(addr[len(UnixAddressPrefix)-1:])
writerType = "udp"
} else {
w, err = newBlockingUdsWriter(addr[len(UnixAddressPrefix)-1:])
// FIXME: The agent has a performance pitfall preventing us from using better defaults here.
// Once it's fixed, use `DefaultMaxAgentPayloadSize` and `DefaultUDSBufferPoolSize` instead.
optimalPayloadSize = OptimalUDPPayloadSize
defaultBufferPoolSize = DefaultUDPBufferPoolSize
w, err = newUDSWriter(addr[len(UnixAddressPrefix)-1:])
writerType = "uds"
}
if err != nil {
return nil, err
}
if o.MaxBytesPerPayload == 0 {
o.MaxBytesPerPayload = optimalPayloadSize
}
if o.BufferPoolSize == 0 {
o.BufferPoolSize = defaultBufferPoolSize
}
if o.SenderQueueSize == 0 {
o.SenderQueueSize = defaultBufferPoolSize
}
return newWithWriter(w, o, writerType)
}
// NewWithWriter creates a new Client with given writer. Writer is a
// io.WriteCloser + SetWriteTimeout(time.Duration) error
func NewWithWriter(w statsdWriter, options ...Option) (*Client, error) {
o, err := resolveOptions(options)
if err != nil {
return nil, err
}
return newWithWriter(w, o, "custom")
}
func newWithWriter(w statsdWriter, o *Options, writerName string) (*Client, error) {
w.SetWriteTimeout(o.WriteTimeoutUDS)
c := Client{
Namespace: o.Namespace,
Tags: o.Tags,
writer: w,
Namespace: o.Namespace,
Tags: o.Tags,
telemetryTags: []string{clientTelemetryTag, "transport:" + writerName},
}
// Inject DD_ENTITY_ID as a constant tag if found
@ -144,85 +263,36 @@ func New(addr string, options ...Option) (*Client, error) {
c.Tags = append(c.Tags, entityTag)
}
if o.Buffered {
c.bufferLength = o.MaxMessagesPerPayload
c.commands = make([][]byte, 0, o.MaxMessagesPerPayload)
c.flushTime = time.Millisecond * 100
c.stop = make(chan struct{}, 1)
go c.watch()
if o.MaxBytesPerPayload == 0 {
o.MaxBytesPerPayload = OptimalUDPPayloadSize
}
c.bufferPool = newBufferPool(o.BufferPoolSize, o.MaxBytesPerPayload, o.MaxMessagesPerPayload)
c.buffer = c.bufferPool.borrowBuffer()
c.sender = newSender(w, o.SenderQueueSize, c.bufferPool)
c.flushTime = o.BufferFlushInterval
c.stop = make(chan struct{}, 1)
go c.watch()
go c.telemetry()
return &c, nil
}
// NewWithWriter creates a new Client with given writer. Writer is a
// io.WriteCloser + SetWriteTimeout(time.Duration) error
func NewWithWriter(w statsdWriter) (*Client, error) {
client := &Client{writer: w, SkipErrors: false}
// Inject DD_ENTITY_ID as a constant tag if found
entityID := os.Getenv(entityIDEnvName)
if entityID != "" {
entityTag := fmt.Sprintf("%s:%s", entityIDTagName, entityID)
client.Tags = append(client.Tags, entityTag)
}
return client, nil
}
// NewBuffered returns a Client that buffers its output and sends it in chunks.
// Buflen is the length of the buffer in number of commands.
//
// When addr is empty, the client will default to a UDP client and use the DD_AGENT_HOST
// and (optionally) the DD_DOGSTATSD_PORT environment variables to build the target address.
func NewBuffered(addr string, buflen int) (*Client, error) {
return New(addr, Buffered(), WithMaxMessagesPerPayload(buflen))
}
// format a message from its name, value, tags and rate. Also adds global
// namespace and tags.
func (c *Client) format(name string, value interface{}, suffix []byte, tags []string, rate float64) []byte {
// preallocated buffer, stack allocated as long as it doesn't escape
buf := make([]byte, 0, 200)
if c.Namespace != "" {
buf = append(buf, c.Namespace...)
}
buf = append(buf, name...)
buf = append(buf, ':')
switch val := value.(type) {
case float64:
buf = strconv.AppendFloat(buf, val, 'f', 6, 64)
case int64:
buf = strconv.AppendInt(buf, val, 10)
case string:
buf = append(buf, val...)
default:
// do nothing
}
buf = append(buf, suffix...)
if rate < 1 {
buf = append(buf, "|@"...)
buf = strconv.AppendFloat(buf, rate, 'f', -1, 64)
}
buf = appendTagString(buf, c.Tags, tags)
// non-zeroing copy to avoid referencing a larger than necessary underlying array
return append([]byte(nil), buf...)
return New(addr, WithMaxMessagesPerPayload(buflen))
}
// SetWriteTimeout allows the user to set a custom UDS write timeout. Not supported for UDP.
func (c *Client) SetWriteTimeout(d time.Duration) error {
if c == nil {
return fmt.Errorf("Client is nil")
return ErrNoClient
}
return c.writer.SetWriteTimeout(d)
return c.sender.transport.SetWriteTimeout(d)
}
func (c *Client) watch() {
@ -232,10 +302,7 @@ func (c *Client) watch() {
select {
case <-ticker.C:
c.Lock()
if len(c.commands) > 0 {
// FIXME: eating error here
c.flushLocked()
}
c.flushUnsafe()
c.Unlock()
case <-c.stop:
ticker.Stop()
@ -244,165 +311,149 @@ func (c *Client) watch() {
}
}
func (c *Client) append(cmd []byte) error {
func (c *Client) telemetry() {
ticker := time.NewTicker(TelemetryInterval)
for {
select {
case <-ticker.C:
metrics := c.sender.flushMetrics()
c.telemetryCount("datadog.dogstatsd.client.packets_sent", int64(metrics.TotalSentPayloads), c.telemetryTags, 1)
c.telemetryCount("datadog.dogstatsd.client.bytes_sent", int64(metrics.TotalSentBytes), c.telemetryTags, 1)
c.telemetryCount("datadog.dogstatsd.client.packets_dropped", int64(metrics.TotalDroppedPayloads), c.telemetryTags, 1)
c.telemetryCount("datadog.dogstatsd.client.bytes_dropped", int64(metrics.TotalDroppedBytes), c.telemetryTags, 1)
c.telemetryCount("datadog.dogstatsd.client.packets_dropped_queue", int64(metrics.TotalDroppedPayloadsQueueFull), c.telemetryTags, 1)
c.telemetryCount("datadog.dogstatsd.client.bytes_dropped_queue", int64(metrics.TotalDroppedBytesQueueFull), c.telemetryTags, 1)
c.telemetryCount("datadog.dogstatsd.client.packets_dropped_writer", int64(metrics.TotalDroppedPayloadsWriter), c.telemetryTags, 1)
c.telemetryCount("datadog.dogstatsd.client.bytes_dropped_writer", int64(metrics.TotalDroppedBytesWriter), c.telemetryTags, 1)
case <-c.stop:
ticker.Stop()
return
}
}
}
// same as Count but without global namespace / tags
func (c *Client) telemetryCount(name string, value int64, tags []string, rate float64) {
c.addMetric(metric{metricType: count, name: name, ivalue: value, tags: tags, rate: rate})
}
// Flush forces a flush of all the queued dogstatsd payloads
// This method is blocking and will not return until everything is sent
// through the network
func (c *Client) Flush() error {
if c == nil {
return ErrNoClient
}
c.Lock()
defer c.Unlock()
c.commands = append(c.commands, cmd)
// if we should flush, lets do it
if len(c.commands) == c.bufferLength {
if err := c.flushLocked(); err != nil {
return err
}
c.flushUnsafe()
c.sender.flush()
return nil
}
// flush the current buffer. Lock must be held by caller.
// flushed buffer written to the network asynchronously.
func (c *Client) flushUnsafe() {
if len(c.buffer.bytes()) > 0 {
c.sender.send(c.buffer)
c.buffer = c.bufferPool.borrowBuffer()
}
}
func (c *Client) shouldSample(rate float64) bool {
if rate < 1 && rand.Float64() > rate {
return true
}
return false
}
func (c *Client) globalTags() []string {
if c != nil {
return c.Tags
}
return nil
}
func (c *Client) joinMaxSize(cmds [][]byte, sep string, maxSize int) ([][]byte, []int) {
c.buffer.Reset() //clear buffer
var frames [][]byte
var ncmds []int
sepBytes := []byte(sep)
sepLen := len(sep)
elem := 0
for _, cmd := range cmds {
needed := len(cmd)
if elem != 0 {
needed = needed + sepLen
}
if c.buffer.Len()+needed <= maxSize {
if elem != 0 {
c.buffer.Write(sepBytes)
}
c.buffer.Write(cmd)
elem++
} else {
frames = append(frames, copyAndResetBuffer(&c.buffer))
ncmds = append(ncmds, elem)
// if cmd is bigger than maxSize it will get flushed on next loop
c.buffer.Write(cmd)
elem = 1
}
func (c *Client) namespace() string {
if c != nil {
return c.Namespace
}
//add whatever is left! if there's actually something
if c.buffer.Len() > 0 {
frames = append(frames, copyAndResetBuffer(&c.buffer))
ncmds = append(ncmds, elem)
}
return frames, ncmds
return ""
}
func copyAndResetBuffer(buf *bytes.Buffer) []byte {
tmpBuf := make([]byte, buf.Len())
copy(tmpBuf, buf.Bytes())
buf.Reset()
return tmpBuf
func (c *Client) writeMetricUnsafe(m metric) error {
switch m.metricType {
case gauge:
return c.buffer.writeGauge(m.namespace, m.globalTags, m.name, m.fvalue, m.tags, m.rate)
case count:
return c.buffer.writeCount(m.namespace, m.globalTags, m.name, m.ivalue, m.tags, m.rate)
case histogram:
return c.buffer.writeHistogram(m.namespace, m.globalTags, m.name, m.fvalue, m.tags, m.rate)
case distribution:
return c.buffer.writeDistribution(m.namespace, m.globalTags, m.name, m.fvalue, m.tags, m.rate)
case set:
return c.buffer.writeSet(m.namespace, m.globalTags, m.name, m.svalue, m.tags, m.rate)
case timing:
return c.buffer.writeTiming(m.namespace, m.globalTags, m.name, m.fvalue, m.tags, m.rate)
case event:
return c.buffer.writeEvent(*m.evalue, m.globalTags)
case serviceCheck:
return c.buffer.writeServiceCheck(*m.scvalue, m.globalTags)
default:
return nil
}
}
// Flush forces a flush of the pending commands in the buffer
func (c *Client) Flush() error {
func (c *Client) addMetric(m metric) error {
if c == nil {
return fmt.Errorf("Client is nil")
return ErrNoClient
}
if c.shouldSample(m.rate) {
return nil
}
c.Lock()
defer c.Unlock()
return c.flushLocked()
}
// flush the commands in the buffer. Lock must be held by caller.
func (c *Client) flushLocked() error {
frames, flushable := c.joinMaxSize(c.commands, "\n", OptimalPayloadSize)
var err error
cmdsFlushed := 0
for i, data := range frames {
_, e := c.writer.Write(data)
if e != nil {
err = e
break
}
cmdsFlushed += flushable[i]
}
// clear the slice with a slice op, doesn't realloc
if cmdsFlushed == len(c.commands) {
c.commands = c.commands[:0]
} else {
//this case will cause a future realloc...
// drop problematic command though (sorry).
c.commands = c.commands[cmdsFlushed+1:]
if err = c.writeMetricUnsafe(m); err == errBufferFull {
c.flushUnsafe()
err = c.writeMetricUnsafe(m)
}
c.Unlock()
return err
}
func (c *Client) sendMsg(msg []byte) error {
// return an error if message is bigger than MaxUDPPayloadSize
if len(msg) > MaxUDPPayloadSize {
return errors.New("message size exceeds MaxUDPPayloadSize")
}
// if this client is buffered, then we'll just append this
if c.bufferLength > 0 {
return c.append(msg)
}
_, err := c.writer.Write(msg)
if c.SkipErrors {
return nil
}
return err
}
// send handles sampling and sends the message over UDP. It also adds global namespace prefixes and tags.
func (c *Client) send(name string, value interface{}, suffix []byte, tags []string, rate float64) error {
if c == nil {
return fmt.Errorf("Client is nil")
}
if rate < 1 && rand.Float64() > rate {
return nil
}
data := c.format(name, value, suffix, tags, rate)
return c.sendMsg(data)
}
// Gauge measures the value of a metric at a particular time.
func (c *Client) Gauge(name string, value float64, tags []string, rate float64) error {
return c.send(name, value, gaugeSuffix, tags, rate)
return c.addMetric(metric{namespace: c.namespace(), globalTags: c.globalTags(), metricType: gauge, name: name, fvalue: value, tags: tags, rate: rate})
}
// Count tracks how many times something happened per second.
func (c *Client) Count(name string, value int64, tags []string, rate float64) error {
return c.send(name, value, countSuffix, tags, rate)
return c.addMetric(metric{namespace: c.namespace(), globalTags: c.globalTags(), metricType: count, name: name, ivalue: value, tags: tags, rate: rate})
}
// Histogram tracks the statistical distribution of a set of values on each host.
func (c *Client) Histogram(name string, value float64, tags []string, rate float64) error {
return c.send(name, value, histogramSuffix, tags, rate)
return c.addMetric(metric{namespace: c.namespace(), globalTags: c.globalTags(), metricType: histogram, name: name, fvalue: value, tags: tags, rate: rate})
}
// Distribution tracks the statistical distribution of a set of values across your infrastructure.
func (c *Client) Distribution(name string, value float64, tags []string, rate float64) error {
return c.send(name, value, distributionSuffix, tags, rate)
return c.addMetric(metric{namespace: c.namespace(), globalTags: c.globalTags(), metricType: distribution, name: name, fvalue: value, tags: tags, rate: rate})
}
// Decr is just Count of -1
func (c *Client) Decr(name string, tags []string, rate float64) error {
return c.send(name, nil, decrSuffix, tags, rate)
return c.Count(name, -1, tags, rate)
}
// Incr is just Count of 1
func (c *Client) Incr(name string, tags []string, rate float64) error {
return c.send(name, nil, incrSuffix, tags, rate)
return c.Count(name, 1, tags, rate)
}
// Set counts the number of unique elements in a group.
func (c *Client) Set(name string, value string, tags []string, rate float64) error {
return c.send(name, value, setSuffix, tags, rate)
return c.addMetric(metric{namespace: c.namespace(), globalTags: c.globalTags(), metricType: set, name: name, svalue: value, tags: tags, rate: rate})
}
// Timing sends timing information, it is an alias for TimeInMilliseconds
@ -413,19 +464,12 @@ func (c *Client) Timing(name string, value time.Duration, tags []string, rate fl
// TimeInMilliseconds sends timing information in milliseconds.
// It is flushed by statsd with percentiles, mean and other info (https://github.com/etsy/statsd/blob/master/docs/metric_types.md#timing)
func (c *Client) TimeInMilliseconds(name string, value float64, tags []string, rate float64) error {
return c.send(name, value, timingSuffix, tags, rate)
return c.addMetric(metric{namespace: c.namespace(), globalTags: c.globalTags(), metricType: timing, name: name, fvalue: value, tags: tags, rate: rate})
}
// Event sends the provided Event.
func (c *Client) Event(e *Event) error {
if c == nil {
return fmt.Errorf("Client is nil")
}
stat, err := e.Encode(c.Tags...)
if err != nil {
return err
}
return c.sendMsg([]byte(stat))
return c.addMetric(metric{globalTags: c.globalTags(), metricType: event, evalue: e, rate: 1})
}
// SimpleEvent sends an event with the provided title and text.
@ -436,14 +480,7 @@ func (c *Client) SimpleEvent(title, text string) error {
// ServiceCheck sends the provided ServiceCheck.
func (c *Client) ServiceCheck(sc *ServiceCheck) error {
if c == nil {
return fmt.Errorf("Client is nil")
}
stat, err := sc.Encode(c.Tags...)
if err != nil {
return err
}
return c.sendMsg([]byte(stat))
return c.addMetric(metric{globalTags: c.globalTags(), metricType: serviceCheck, scvalue: sc, rate: 1})
}
// SimpleServiceCheck sends an serviceCheck with the provided name and status.
@ -455,303 +492,12 @@ func (c *Client) SimpleServiceCheck(name string, status ServiceCheckStatus) erro
// Close the client connection.
func (c *Client) Close() error {
if c == nil {
return fmt.Errorf("Client is nil")
return ErrNoClient
}
select {
case c.stop <- struct{}{}:
default:
}
// if this client is buffered, flush before closing the writer
if c.bufferLength > 0 {
if err := c.Flush(); err != nil {
return err
}
}
return c.writer.Close()
}
// Events support
// EventAlertType and EventAlertPriority became exported types after this issue was submitted: https://github.com/DataDog/datadog-go/issues/41
// The reason why they got exported is so that client code can directly use the types.
// EventAlertType is the alert type for events
type EventAlertType string
const (
// Info is the "info" AlertType for events
Info EventAlertType = "info"
// Error is the "error" AlertType for events
Error EventAlertType = "error"
// Warning is the "warning" AlertType for events
Warning EventAlertType = "warning"
// Success is the "success" AlertType for events
Success EventAlertType = "success"
)
// EventPriority is the event priority for events
type EventPriority string
const (
// Normal is the "normal" Priority for events
Normal EventPriority = "normal"
// Low is the "low" Priority for events
Low EventPriority = "low"
)
// An Event is an object that can be posted to your DataDog event stream.
type Event struct {
// Title of the event. Required.
Title string
// Text is the description of the event. Required.
Text string
// Timestamp is a timestamp for the event. If not provided, the dogstatsd
// server will set this to the current time.
Timestamp time.Time
// Hostname for the event.
Hostname string
// AggregationKey groups this event with others of the same key.
AggregationKey string
// Priority of the event. Can be statsd.Low or statsd.Normal.
Priority EventPriority
// SourceTypeName is a source type for the event.
SourceTypeName string
// AlertType can be statsd.Info, statsd.Error, statsd.Warning, or statsd.Success.
// If absent, the default value applied by the dogstatsd server is Info.
AlertType EventAlertType
// Tags for the event.
Tags []string
}
// NewEvent creates a new event with the given title and text. Error checking
// against these values is done at send-time, or upon running e.Check.
func NewEvent(title, text string) *Event {
return &Event{
Title: title,
Text: text,
}
}
// Check verifies that an event is valid.
func (e Event) Check() error {
if len(e.Title) == 0 {
return fmt.Errorf("statsd.Event title is required")
}
if len(e.Text) == 0 {
return fmt.Errorf("statsd.Event text is required")
}
return nil
}
// Encode returns the dogstatsd wire protocol representation for an event.
// Tags may be passed which will be added to the encoded output but not to
// the Event's list of tags, eg. for default tags.
func (e Event) Encode(tags ...string) (string, error) {
err := e.Check()
if err != nil {
return "", err
}
text := e.escapedText()
var buffer bytes.Buffer
buffer.WriteString("_e{")
buffer.WriteString(strconv.FormatInt(int64(len(e.Title)), 10))
buffer.WriteRune(',')
buffer.WriteString(strconv.FormatInt(int64(len(text)), 10))
buffer.WriteString("}:")
buffer.WriteString(e.Title)
buffer.WriteRune('|')
buffer.WriteString(text)
if !e.Timestamp.IsZero() {
buffer.WriteString("|d:")
buffer.WriteString(strconv.FormatInt(int64(e.Timestamp.Unix()), 10))
}
if len(e.Hostname) != 0 {
buffer.WriteString("|h:")
buffer.WriteString(e.Hostname)
}
if len(e.AggregationKey) != 0 {
buffer.WriteString("|k:")
buffer.WriteString(e.AggregationKey)
}
if len(e.Priority) != 0 {
buffer.WriteString("|p:")
buffer.WriteString(string(e.Priority))
}
if len(e.SourceTypeName) != 0 {
buffer.WriteString("|s:")
buffer.WriteString(e.SourceTypeName)
}
if len(e.AlertType) != 0 {
buffer.WriteString("|t:")
buffer.WriteString(string(e.AlertType))
}
writeTagString(&buffer, tags, e.Tags)
return buffer.String(), nil
}
// ServiceCheckStatus support
type ServiceCheckStatus byte
const (
// Ok is the "ok" ServiceCheck status
Ok ServiceCheckStatus = 0
// Warn is the "warning" ServiceCheck status
Warn ServiceCheckStatus = 1
// Critical is the "critical" ServiceCheck status
Critical ServiceCheckStatus = 2
// Unknown is the "unknown" ServiceCheck status
Unknown ServiceCheckStatus = 3
)
// An ServiceCheck is an object that contains status of DataDog service check.
type ServiceCheck struct {
// Name of the service check. Required.
Name string
// Status of service check. Required.
Status ServiceCheckStatus
// Timestamp is a timestamp for the serviceCheck. If not provided, the dogstatsd
// server will set this to the current time.
Timestamp time.Time
// Hostname for the serviceCheck.
Hostname string
// A message describing the current state of the serviceCheck.
Message string
// Tags for the serviceCheck.
Tags []string
}
// NewServiceCheck creates a new serviceCheck with the given name and status. Error checking
// against these values is done at send-time, or upon running sc.Check.
func NewServiceCheck(name string, status ServiceCheckStatus) *ServiceCheck {
return &ServiceCheck{
Name: name,
Status: status,
}
}
// Check verifies that an event is valid.
func (sc ServiceCheck) Check() error {
if len(sc.Name) == 0 {
return fmt.Errorf("statsd.ServiceCheck name is required")
}
if byte(sc.Status) < 0 || byte(sc.Status) > 3 {
return fmt.Errorf("statsd.ServiceCheck status has invalid value")
}
return nil
}
// Encode returns the dogstatsd wire protocol representation for an serviceCheck.
// Tags may be passed which will be added to the encoded output but not to
// the Event's list of tags, eg. for default tags.
func (sc ServiceCheck) Encode(tags ...string) (string, error) {
err := sc.Check()
if err != nil {
return "", err
}
message := sc.escapedMessage()
var buffer bytes.Buffer
buffer.WriteString("_sc|")
buffer.WriteString(sc.Name)
buffer.WriteRune('|')
buffer.WriteString(strconv.FormatInt(int64(sc.Status), 10))
if !sc.Timestamp.IsZero() {
buffer.WriteString("|d:")
buffer.WriteString(strconv.FormatInt(int64(sc.Timestamp.Unix()), 10))
}
if len(sc.Hostname) != 0 {
buffer.WriteString("|h:")
buffer.WriteString(sc.Hostname)
}
writeTagString(&buffer, tags, sc.Tags)
if len(message) != 0 {
buffer.WriteString("|m:")
buffer.WriteString(message)
}
return buffer.String(), nil
}
func (e Event) escapedText() string {
return strings.Replace(e.Text, "\n", "\\n", -1)
}
func (sc ServiceCheck) escapedMessage() string {
msg := strings.Replace(sc.Message, "\n", "\\n", -1)
return strings.Replace(msg, "m:", `m\:`, -1)
}
func removeNewlines(str string) string {
return strings.Replace(str, "\n", "", -1)
}
func writeTagString(w io.Writer, tagList1, tagList2 []string) {
// the tag lists may be shared with other callers, so we cannot modify
// them in any way (which means we cannot append to them either)
// therefore we must make an entirely separate copy just for this call
totalLen := len(tagList1) + len(tagList2)
if totalLen == 0 {
return
}
tags := make([]string, 0, totalLen)
tags = append(tags, tagList1...)
tags = append(tags, tagList2...)
io.WriteString(w, "|#")
io.WriteString(w, removeNewlines(tags[0]))
for _, tag := range tags[1:] {
io.WriteString(w, ",")
io.WriteString(w, removeNewlines(tag))
}
}
func appendTagString(buf []byte, tagList1, tagList2 []string) []byte {
if len(tagList1) == 0 {
if len(tagList2) == 0 {
return buf
}
tagList1 = tagList2
tagList2 = nil
}
buf = append(buf, "|#"...)
buf = appendWithoutNewlines(buf, tagList1[0])
for _, tag := range tagList1[1:] {
buf = append(buf, ',')
buf = appendWithoutNewlines(buf, tag)
}
for _, tag := range tagList2 {
buf = append(buf, ',')
buf = appendWithoutNewlines(buf, tag)
}
return buf
}
func appendWithoutNewlines(buf []byte, s string) []byte {
// fastpath for strings without newlines
if strings.IndexByte(s, '\n') == -1 {
return append(buf, s...)
}
for _, b := range []byte(s) {
if b != '\n' {
buf = append(buf, b)
}
}
return buf
c.Flush()
return c.sender.close()
}

View File

@ -1,6 +1,8 @@
package statsd
import (
"net"
"sync"
"time"
)
@ -9,3 +11,88 @@ UDSTimeout holds the default timeout for UDS socket writes, as they can get
blocking when the receiving buffer is full.
*/
const defaultUDSTimeout = 1 * time.Millisecond
// udsWriter is an internal class wrapping around management of UDS connection
type udsWriter struct {
// Address to send metrics to, needed to allow reconnection on error
addr net.Addr
// Established connection object, or nil if not connected yet
conn net.Conn
// write timeout
writeTimeout time.Duration
sync.RWMutex // used to lock conn / writer can replace it
}
// newUDSWriter returns a pointer to a new udsWriter given a socket file path as addr.
func newUDSWriter(addr string) (*udsWriter, error) {
udsAddr, err := net.ResolveUnixAddr("unixgram", addr)
if err != nil {
return nil, err
}
// Defer connection to first Write
writer := &udsWriter{addr: udsAddr, conn: nil, writeTimeout: defaultUDSTimeout}
return writer, nil
}
// SetWriteTimeout allows the user to set a custom write timeout
func (w *udsWriter) SetWriteTimeout(d time.Duration) error {
w.writeTimeout = d
return nil
}
// Write data to the UDS connection with write timeout and minimal error handling:
// create the connection if nil, and destroy it if the statsd server has disconnected
func (w *udsWriter) Write(data []byte) (int, error) {
conn, err := w.ensureConnection()
if err != nil {
return 0, err
}
conn.SetWriteDeadline(time.Now().Add(w.writeTimeout))
n, e := conn.Write(data)
if err, isNetworkErr := e.(net.Error); !isNetworkErr || !err.Temporary() {
// Statsd server disconnected, retry connecting at next packet
w.unsetConnection()
return 0, e
}
return n, e
}
func (w *udsWriter) Close() error {
if w.conn != nil {
return w.conn.Close()
}
return nil
}
func (w *udsWriter) ensureConnection() (net.Conn, error) {
// Check if we've already got a socket we can use
w.RLock()
currentConn := w.conn
w.RUnlock()
if currentConn != nil {
return currentConn, nil
}
// Looks like we might need to connect - try again with write locking.
w.Lock()
defer w.Unlock()
if w.conn != nil {
return w.conn, nil
}
newConn, err := net.Dial(w.addr.Network(), w.addr.String())
if err != nil {
return nil, err
}
w.conn = newConn
return newConn, nil
}
func (w *udsWriter) unsetConnection() {
w.Lock()
defer w.Unlock()
w.conn = nil
}

View File

@ -1,113 +0,0 @@
package statsd
import (
"fmt"
"net"
"time"
)
// asyncUdsWriter is an internal class wrapping around management of UDS connection
type asyncUdsWriter struct {
// Address to send metrics to, needed to allow reconnection on error
addr net.Addr
// Established connection object, or nil if not connected yet
conn net.Conn
// write timeout
writeTimeout time.Duration
// datagramQueue is the queue of datagrams ready to be sent
datagramQueue chan []byte
stopChan chan struct{}
}
// New returns a pointer to a new asyncUdsWriter given a socket file path as addr.
func newAsyncUdsWriter(addr string) (*asyncUdsWriter, error) {
udsAddr, err := net.ResolveUnixAddr("unixgram", addr)
if err != nil {
return nil, err
}
writer := &asyncUdsWriter{
addr: udsAddr,
conn: nil,
writeTimeout: defaultUDSTimeout,
// 8192 * 8KB = 65.5MB
datagramQueue: make(chan []byte, 8192),
stopChan: make(chan struct{}, 1),
}
go writer.sendLoop()
return writer, nil
}
func (w *asyncUdsWriter) sendLoop() {
for {
select {
case datagram := <-w.datagramQueue:
w.write(datagram)
case <-w.stopChan:
return
}
}
}
// SetWriteTimeout allows the user to set a custom write timeout
func (w *asyncUdsWriter) SetWriteTimeout(d time.Duration) error {
w.writeTimeout = d
return nil
}
// Write data to the UDS connection with write timeout and minimal error handling:
// create the connection if nil, and destroy it if the statsd server has disconnected
func (w *asyncUdsWriter) Write(data []byte) (int, error) {
select {
case w.datagramQueue <- data:
return len(data), nil
default:
return 0, fmt.Errorf("uds datagram queue is full (the agent might not be able to keep up)")
}
}
// write writes the given data to the UDS.
// This function is **not** thread safe.
func (w *asyncUdsWriter) write(data []byte) (int, error) {
conn, err := w.ensureConnection()
if err != nil {
return 0, err
}
conn.SetWriteDeadline(time.Now().Add(w.writeTimeout))
n, err := conn.Write(data)
if e, isNetworkErr := err.(net.Error); !isNetworkErr || !e.Temporary() {
// err is not temporary, Statsd server disconnected, retry connecting at next packet
w.unsetConnection()
return 0, e
}
return n, err
}
func (w *asyncUdsWriter) Close() error {
close(w.stopChan)
if w.conn != nil {
return w.conn.Close()
}
return nil
}
func (w *asyncUdsWriter) ensureConnection() (net.Conn, error) {
if w.conn != nil {
return w.conn, nil
}
newConn, err := net.Dial(w.addr.Network(), w.addr.String())
if err != nil {
return nil, err
}
w.conn = newConn
return newConn, nil
}
func (w *asyncUdsWriter) unsetConnection() {
w.conn = nil
}

View File

@ -1,92 +0,0 @@
package statsd
import (
"net"
"sync"
"time"
)
// blockingUdsWriter is an internal class wrapping around management of UDS connection
type blockingUdsWriter struct {
// Address to send metrics to, needed to allow reconnection on error
addr net.Addr
// Established connection object, or nil if not connected yet
conn net.Conn
// write timeout
writeTimeout time.Duration
sync.RWMutex // used to lock conn / writer can replace it
}
// New returns a pointer to a new blockingUdsWriter given a socket file path as addr.
func newBlockingUdsWriter(addr string) (*blockingUdsWriter, error) {
udsAddr, err := net.ResolveUnixAddr("unixgram", addr)
if err != nil {
return nil, err
}
// Defer connection to first Write
writer := &blockingUdsWriter{addr: udsAddr, conn: nil, writeTimeout: defaultUDSTimeout}
return writer, nil
}
// SetWriteTimeout allows the user to set a custom write timeout
func (w *blockingUdsWriter) SetWriteTimeout(d time.Duration) error {
w.writeTimeout = d
return nil
}
// Write data to the UDS connection with write timeout and minimal error handling:
// create the connection if nil, and destroy it if the statsd server has disconnected
func (w *blockingUdsWriter) Write(data []byte) (int, error) {
conn, err := w.ensureConnection()
if err != nil {
return 0, err
}
conn.SetWriteDeadline(time.Now().Add(w.writeTimeout))
n, e := conn.Write(data)
if err, isNetworkErr := e.(net.Error); !isNetworkErr || !err.Temporary() {
// Statsd server disconnected, retry connecting at next packet
w.unsetConnection()
return 0, e
}
return n, e
}
func (w *blockingUdsWriter) Close() error {
if w.conn != nil {
return w.conn.Close()
}
return nil
}
func (w *blockingUdsWriter) ensureConnection() (net.Conn, error) {
// Check if we've already got a socket we can use
w.RLock()
currentConn := w.conn
w.RUnlock()
if currentConn != nil {
return currentConn, nil
}
// Looks like we might need to connect - try again with write locking.
w.Lock()
defer w.Unlock()
if w.conn != nil {
return w.conn, nil
}
newConn, err := net.Dial(w.addr.Network(), w.addr.String())
if err != nil {
return nil, err
}
w.conn = newConn
return newConn, nil
}
func (w *blockingUdsWriter) unsetConnection() {
w.Lock()
defer w.Unlock()
w.conn = nil
}

View File

@ -3,14 +3,15 @@ module github.com/armon/go-metrics
go 1.12
require (
github.com/DataDog/datadog-go v2.2.0+incompatible
github.com/DataDog/datadog-go v3.2.0+incompatible
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible
github.com/circonus-labs/circonusllhist v0.1.3 // indirect
github.com/golang/protobuf v1.3.2
github.com/hashicorp/go-immutable-radix v1.0.0
github.com/hashicorp/go-retryablehttp v0.5.3 // indirect
github.com/pascaldekloe/goe v0.1.0
github.com/pkg/errors v0.8.1 // indirect
github.com/prometheus/client_golang v0.9.2
github.com/stretchr/testify v1.3.0 // indirect
github.com/prometheus/client_golang v1.4.0
github.com/prometheus/client_model v0.2.0
github.com/prometheus/common v0.9.1
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 // indirect
)

View File

@ -1,15 +1,39 @@
github.com/DataDog/datadog-go v2.2.0+incompatible h1:V5BKkxACZLjzHjSgBbr2gvLA2Ae49yhc6CSY7MLy5k4=
github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
@ -20,27 +44,82 @@ github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCS
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740=
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.0 h1:YVIb/fVcOTMSqtqZWSKnHpSLBxu8DKgxq8z6RuBZwqI=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@ -10,6 +10,8 @@ import (
"time"
)
var spaceReplacer = strings.NewReplacer(" ", "_")
// InmemSink provides a MetricSink that does in-memory aggregation
// without sending metrics over a network. It can be embedded within
// an application to provide profiling information.
@ -312,36 +314,21 @@ func (i *InmemSink) getInterval() *IntervalMetrics {
// Flattens the key for formatting, removes spaces
func (i *InmemSink) flattenKey(parts []string) string {
buf := &bytes.Buffer{}
replacer := strings.NewReplacer(" ", "_")
if len(parts) > 0 {
replacer.WriteString(buf, parts[0])
}
for _, part := range parts[1:] {
replacer.WriteString(buf, ".")
replacer.WriteString(buf, part)
}
joined := strings.Join(parts, ".")
spaceReplacer.WriteString(buf, joined)
return buf.String()
}
// Flattens the key for formatting along with its labels, removes spaces
func (i *InmemSink) flattenKeyLabels(parts []string, labels []Label) (string, string) {
buf := &bytes.Buffer{}
replacer := strings.NewReplacer(" ", "_")
if len(parts) > 0 {
replacer.WriteString(buf, parts[0])
}
for _, part := range parts[1:] {
replacer.WriteString(buf, ".")
replacer.WriteString(buf, part)
}
key := buf.String()
key := i.flattenKey(parts)
buf := bytes.NewBufferString(key)
for _, label := range labels {
replacer.WriteString(buf, fmt.Sprintf(";%s=%s", label.Name, label.Value))
spaceReplacer.WriteString(buf, fmt.Sprintf(";%s=%s", label.Name, label.Value))
}
return buf.String(), key

View File

@ -269,10 +269,25 @@ func (m *Metrics) emitRuntimeStats() {
m.lastNumGC = num
}
// Inserts a string value at an index into the slice
// Creates a new slice with the provided string value as the first element
// and the provided slice values as the remaining values.
// Ordering of the values in the provided input slice is kept in tact in the output slice.
func insert(i int, v string, s []string) []string {
s = append(s, "")
copy(s[i+1:], s[i:])
s[i] = v
return s
// Allocate new slice to avoid modifying the input slice
newS := make([]string, len(s)+1)
// Copy s[0, i-1] into newS
for j := 0; j < i; j++ {
newS[j] = s[j]
}
// Insert provided element at index i
newS[i] = v
// Copy s[i, len(s)-1] into newS starting at newS[i+1]
for j := i; j < len(s); j++ {
newS[j+1] = s[j]
}
return newS
}

View File

@ -1,9 +1,10 @@
// +build go1.3
// +build go1.9
package prometheus
import (
"fmt"
"log"
"strings"
"sync"
"time"
@ -12,6 +13,7 @@ import (
"github.com/armon/go-metrics"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/push"
)
var (
@ -30,11 +32,11 @@ type PrometheusOpts struct {
}
type PrometheusSink struct {
mu sync.Mutex
gauges map[string]prometheus.Gauge
summaries map[string]prometheus.Summary
counters map[string]prometheus.Counter
updates map[string]time.Time
// If these will ever be copied, they should be converted to *sync.Map values and initialized appropriately
gauges sync.Map
summaries sync.Map
counters sync.Map
updates sync.Map
expiration time.Duration
}
@ -46,10 +48,10 @@ func NewPrometheusSink() (*PrometheusSink, error) {
// NewPrometheusSinkFrom creates a new PrometheusSink using the passed options.
func NewPrometheusSinkFrom(opts PrometheusOpts) (*PrometheusSink, error) {
sink := &PrometheusSink{
gauges: make(map[string]prometheus.Gauge),
summaries: make(map[string]prometheus.Summary),
counters: make(map[string]prometheus.Counter),
updates: make(map[string]time.Time),
gauges: sync.Map{},
summaries: sync.Map{},
counters: sync.Map{},
updates: sync.Map{},
expiration: opts.Expiration,
}
@ -67,38 +69,38 @@ func (p *PrometheusSink) Describe(c chan<- *prometheus.Desc) {
// logic to clean up ephemeral metrics if their value haven't been set for a
// duration exceeding our allowed expiration time.
func (p *PrometheusSink) Collect(c chan<- prometheus.Metric) {
p.mu.Lock()
defer p.mu.Unlock()
expire := p.expiration != 0
now := time.Now()
for k, v := range p.gauges {
last := p.updates[k]
if expire && last.Add(p.expiration).Before(now) {
delete(p.updates, k)
delete(p.gauges, k)
p.gauges.Range(func(k, v interface{}) bool {
last, _ := p.updates.Load(k)
if expire && last.(time.Time).Add(p.expiration).Before(now) {
p.updates.Delete(k)
p.gauges.Delete(k)
} else {
v.Collect(c)
v.(prometheus.Gauge).Collect(c)
}
}
for k, v := range p.summaries {
last := p.updates[k]
if expire && last.Add(p.expiration).Before(now) {
delete(p.updates, k)
delete(p.summaries, k)
return true
})
p.summaries.Range(func(k, v interface{}) bool {
last, _ := p.updates.Load(k)
if expire && last.(time.Time).Add(p.expiration).Before(now) {
p.updates.Delete(k)
p.summaries.Delete(k)
} else {
v.Collect(c)
v.(prometheus.Summary).Collect(c)
}
}
for k, v := range p.counters {
last := p.updates[k]
if expire && last.Add(p.expiration).Before(now) {
delete(p.updates, k)
delete(p.counters, k)
return true
})
p.counters.Range(func(k, v interface{}) bool {
last, _ := p.updates.Load(k)
if expire && last.(time.Time).Add(p.expiration).Before(now) {
p.updates.Delete(k)
p.counters.Delete(k)
} else {
v.Collect(c)
v.(prometheus.Counter).Collect(c)
}
}
return true
})
}
var forbiddenChars = regexp.MustCompile("[ .=\\-/]")
@ -128,20 +130,18 @@ func (p *PrometheusSink) SetGauge(parts []string, val float32) {
}
func (p *PrometheusSink) SetGaugeWithLabels(parts []string, val float32, labels []metrics.Label) {
p.mu.Lock()
defer p.mu.Unlock()
key, hash := p.flattenKey(parts, labels)
g, ok := p.gauges[hash]
g, ok := p.gauges.Load(hash)
if !ok {
g = prometheus.NewGauge(prometheus.GaugeOpts{
Name: key,
Help: key,
ConstLabels: prometheusLabels(labels),
})
p.gauges[hash] = g
p.gauges.Store(hash, g)
}
g.Set(float64(val))
p.updates[hash] = time.Now()
g.(prometheus.Gauge).Set(float64(val))
p.updates.Store(hash, time.Now())
}
func (p *PrometheusSink) AddSample(parts []string, val float32) {
@ -149,21 +149,20 @@ func (p *PrometheusSink) AddSample(parts []string, val float32) {
}
func (p *PrometheusSink) AddSampleWithLabels(parts []string, val float32, labels []metrics.Label) {
p.mu.Lock()
defer p.mu.Unlock()
key, hash := p.flattenKey(parts, labels)
g, ok := p.summaries[hash]
g, ok := p.summaries.Load(hash)
if !ok {
g = prometheus.NewSummary(prometheus.SummaryOpts{
Name: key,
Help: key,
MaxAge: 10 * time.Second,
ConstLabels: prometheusLabels(labels),
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
})
p.summaries[hash] = g
p.summaries.Store(hash, g)
}
g.Observe(float64(val))
p.updates[hash] = time.Now()
g.(prometheus.Summary).Observe(float64(val))
p.updates.Store(hash, time.Now())
}
// EmitKey is not implemented. Prometheus doesnt offer a type for which an
@ -177,18 +176,71 @@ func (p *PrometheusSink) IncrCounter(parts []string, val float32) {
}
func (p *PrometheusSink) IncrCounterWithLabels(parts []string, val float32, labels []metrics.Label) {
p.mu.Lock()
defer p.mu.Unlock()
key, hash := p.flattenKey(parts, labels)
g, ok := p.counters[hash]
g, ok := p.counters.Load(hash)
if !ok {
g = prometheus.NewCounter(prometheus.CounterOpts{
Name: key,
Help: key,
ConstLabels: prometheusLabels(labels),
})
p.counters[hash] = g
p.counters.Store(hash, g)
}
g.Add(float64(val))
p.updates[hash] = time.Now()
g.(prometheus.Counter).Add(float64(val))
p.updates.Store(hash, time.Now())
}
type PrometheusPushSink struct {
*PrometheusSink
pusher *push.Pusher
address string
pushInterval time.Duration
stopChan chan struct{}
}
func NewPrometheusPushSink(address string, pushIterval time.Duration, name string) (*PrometheusPushSink, error) {
promSink := &PrometheusSink{
gauges: sync.Map{},
summaries: sync.Map{},
counters: sync.Map{},
updates: sync.Map{},
expiration: 60 * time.Second,
}
pusher := push.New(address, name).Collector(promSink)
sink := &PrometheusPushSink{
promSink,
pusher,
address,
pushIterval,
make(chan struct{}),
}
sink.flushMetrics()
return sink, nil
}
func (s *PrometheusPushSink) flushMetrics() {
ticker := time.NewTicker(s.pushInterval)
go func() {
for {
select {
case <-ticker.C:
err := s.pusher.Push()
if err != nil {
log.Printf("[ERR] Error pushing to Prometheus! Err: %s", err)
}
case <-s.stopChan:
ticker.Stop()
return
}
}
}()
}
func (s *PrometheusPushSink) Shutdown() {
close(s.stopChan)
}

8
vendor/github.com/cespare/xxhash/v2/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,8 @@
language: go
go:
- "1.x"
- master
env:
- TAGS=""
- TAGS="-tags purego"
script: go test $TAGS -v ./...

22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt generated vendored Normal file
View File

@ -0,0 +1,22 @@
Copyright (c) 2016 Caleb Spare
MIT License
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

67
vendor/github.com/cespare/xxhash/v2/README.md generated vendored Normal file
View File

@ -0,0 +1,67 @@
# xxhash
[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash)
xxhash is a Go implementation of the 64-bit
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
high-quality hashing algorithm that is much faster than anything in the Go
standard library.
This package provides a straightforward API:
```
func Sum64(b []byte) uint64
func Sum64String(s string) uint64
type Digest struct{ ... }
func New() *Digest
```
The `Digest` type implements hash.Hash64. Its key methods are:
```
func (*Digest) Write([]byte) (int, error)
func (*Digest) WriteString(string) (int, error)
func (*Digest) Sum64() uint64
```
This implementation provides a fast pure-Go implementation and an even faster
assembly implementation for amd64.
## Compatibility
This package is in a module and the latest code is in version 2 of the module.
You need a version of Go with at least "minimal module compatibility" to use
github.com/cespare/xxhash/v2:
* 1.9.7+ for Go 1.9
* 1.10.3+ for Go 1.10
* Go 1.11 or later
I recommend using the latest release of Go.
## Benchmarks
Here are some quick benchmarks comparing the pure-Go and assembly
implementations of Sum64.
| input size | purego | asm |
| --- | --- | --- |
| 5 B | 979.66 MB/s | 1291.17 MB/s |
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
the following commands under Go 1.11.2:
```
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
```
## Projects using this package
- [InfluxDB](https://github.com/influxdata/influxdb)
- [Prometheus](https://github.com/prometheus/prometheus)
- [FreeCache](https://github.com/coocood/freecache)

3
vendor/github.com/cespare/xxhash/v2/go.mod generated vendored Normal file
View File

@ -0,0 +1,3 @@
module github.com/cespare/xxhash/v2
go 1.11

0
vendor/github.com/cespare/xxhash/v2/go.sum generated vendored Normal file
View File

236
vendor/github.com/cespare/xxhash/v2/xxhash.go generated vendored Normal file
View File

@ -0,0 +1,236 @@
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
// at http://cyan4973.github.io/xxHash/.
package xxhash
import (
"encoding/binary"
"errors"
"math/bits"
)
const (
prime1 uint64 = 11400714785074694791
prime2 uint64 = 14029467366897019727
prime3 uint64 = 1609587929392839161
prime4 uint64 = 9650029242287828579
prime5 uint64 = 2870177450012600261
)
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
// possible in the Go code is worth a small (but measurable) performance boost
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
// convenience in the Go code in a few places where we need to intentionally
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
// result overflows a uint64).
var (
prime1v = prime1
prime2v = prime2
prime3v = prime3
prime4v = prime4
prime5v = prime5
)
// Digest implements hash.Hash64.
type Digest struct {
v1 uint64
v2 uint64
v3 uint64
v4 uint64
total uint64
mem [32]byte
n int // how much of mem is used
}
// New creates a new Digest that computes the 64-bit xxHash algorithm.
func New() *Digest {
var d Digest
d.Reset()
return &d
}
// Reset clears the Digest's state so that it can be reused.
func (d *Digest) Reset() {
d.v1 = prime1v + prime2
d.v2 = prime2
d.v3 = 0
d.v4 = -prime1v
d.total = 0
d.n = 0
}
// Size always returns 8 bytes.
func (d *Digest) Size() int { return 8 }
// BlockSize always returns 32 bytes.
func (d *Digest) BlockSize() int { return 32 }
// Write adds more data to d. It always returns len(b), nil.
func (d *Digest) Write(b []byte) (n int, err error) {
n = len(b)
d.total += uint64(n)
if d.n+n < 32 {
// This new data doesn't even fill the current block.
copy(d.mem[d.n:], b)
d.n += n
return
}
if d.n > 0 {
// Finish off the partial block.
copy(d.mem[d.n:], b)
d.v1 = round(d.v1, u64(d.mem[0:8]))
d.v2 = round(d.v2, u64(d.mem[8:16]))
d.v3 = round(d.v3, u64(d.mem[16:24]))
d.v4 = round(d.v4, u64(d.mem[24:32]))
b = b[32-d.n:]
d.n = 0
}
if len(b) >= 32 {
// One or more full blocks left.
nw := writeBlocks(d, b)
b = b[nw:]
}
// Store any remaining partial block.
copy(d.mem[:], b)
d.n = len(b)
return
}
// Sum appends the current hash to b and returns the resulting slice.
func (d *Digest) Sum(b []byte) []byte {
s := d.Sum64()
return append(
b,
byte(s>>56),
byte(s>>48),
byte(s>>40),
byte(s>>32),
byte(s>>24),
byte(s>>16),
byte(s>>8),
byte(s),
)
}
// Sum64 returns the current hash.
func (d *Digest) Sum64() uint64 {
var h uint64
if d.total >= 32 {
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
h = mergeRound(h, v1)
h = mergeRound(h, v2)
h = mergeRound(h, v3)
h = mergeRound(h, v4)
} else {
h = d.v3 + prime5
}
h += d.total
i, end := 0, d.n
for ; i+8 <= end; i += 8 {
k1 := round(0, u64(d.mem[i:i+8]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
if i+4 <= end {
h ^= uint64(u32(d.mem[i:i+4])) * prime1
h = rol23(h)*prime2 + prime3
i += 4
}
for i < end {
h ^= uint64(d.mem[i]) * prime5
h = rol11(h) * prime1
i++
}
h ^= h >> 33
h *= prime2
h ^= h >> 29
h *= prime3
h ^= h >> 32
return h
}
const (
magic = "xxh\x06"
marshaledSize = len(magic) + 8*5 + 32
)
// MarshalBinary implements the encoding.BinaryMarshaler interface.
func (d *Digest) MarshalBinary() ([]byte, error) {
b := make([]byte, 0, marshaledSize)
b = append(b, magic...)
b = appendUint64(b, d.v1)
b = appendUint64(b, d.v2)
b = appendUint64(b, d.v3)
b = appendUint64(b, d.v4)
b = appendUint64(b, d.total)
b = append(b, d.mem[:d.n]...)
b = b[:len(b)+len(d.mem)-d.n]
return b, nil
}
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
func (d *Digest) UnmarshalBinary(b []byte) error {
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
return errors.New("xxhash: invalid hash state identifier")
}
if len(b) != marshaledSize {
return errors.New("xxhash: invalid hash state size")
}
b = b[len(magic):]
b, d.v1 = consumeUint64(b)
b, d.v2 = consumeUint64(b)
b, d.v3 = consumeUint64(b)
b, d.v4 = consumeUint64(b)
b, d.total = consumeUint64(b)
copy(d.mem[:], b)
b = b[len(d.mem):]
d.n = int(d.total % uint64(len(d.mem)))
return nil
}
func appendUint64(b []byte, x uint64) []byte {
var a [8]byte
binary.LittleEndian.PutUint64(a[:], x)
return append(b, a[:]...)
}
func consumeUint64(b []byte) ([]byte, uint64) {
x := u64(b)
return b[8:], x
}
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
func round(acc, input uint64) uint64 {
acc += input * prime2
acc = rol31(acc)
acc *= prime1
return acc
}
func mergeRound(acc, val uint64) uint64 {
val = round(0, val)
acc ^= val
acc = acc*prime1 + prime4
return acc
}
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }

13
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go generated vendored Normal file
View File

@ -0,0 +1,13 @@
// +build !appengine
// +build gc
// +build !purego
package xxhash
// Sum64 computes the 64-bit xxHash digest of b.
//
//go:noescape
func Sum64(b []byte) uint64
//go:noescape
func writeBlocks(d *Digest, b []byte) int

215
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s generated vendored Normal file
View File

@ -0,0 +1,215 @@
// +build !appengine
// +build gc
// +build !purego
#include "textflag.h"
// Register allocation:
// AX h
// CX pointer to advance through b
// DX n
// BX loop end
// R8 v1, k1
// R9 v2
// R10 v3
// R11 v4
// R12 tmp
// R13 prime1v
// R14 prime2v
// R15 prime4v
// round reads from and advances the buffer pointer in CX.
// It assumes that R13 has prime1v and R14 has prime2v.
#define round(r) \
MOVQ (CX), R12 \
ADDQ $8, CX \
IMULQ R14, R12 \
ADDQ R12, r \
ROLQ $31, r \
IMULQ R13, r
// mergeRound applies a merge round on the two registers acc and val.
// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
#define mergeRound(acc, val) \
IMULQ R14, val \
ROLQ $31, val \
IMULQ R13, val \
XORQ val, acc \
IMULQ R13, acc \
ADDQ R15, acc
// func Sum64(b []byte) uint64
TEXT ·Sum64(SB), NOSPLIT, $0-32
// Load fixed primes.
MOVQ ·prime1v(SB), R13
MOVQ ·prime2v(SB), R14
MOVQ ·prime4v(SB), R15
// Load slice.
MOVQ b_base+0(FP), CX
MOVQ b_len+8(FP), DX
LEAQ (CX)(DX*1), BX
// The first loop limit will be len(b)-32.
SUBQ $32, BX
// Check whether we have at least one block.
CMPQ DX, $32
JLT noBlocks
// Set up initial state (v1, v2, v3, v4).
MOVQ R13, R8
ADDQ R14, R8
MOVQ R14, R9
XORQ R10, R10
XORQ R11, R11
SUBQ R13, R11
// Loop until CX > BX.
blockLoop:
round(R8)
round(R9)
round(R10)
round(R11)
CMPQ CX, BX
JLE blockLoop
MOVQ R8, AX
ROLQ $1, AX
MOVQ R9, R12
ROLQ $7, R12
ADDQ R12, AX
MOVQ R10, R12
ROLQ $12, R12
ADDQ R12, AX
MOVQ R11, R12
ROLQ $18, R12
ADDQ R12, AX
mergeRound(AX, R8)
mergeRound(AX, R9)
mergeRound(AX, R10)
mergeRound(AX, R11)
JMP afterBlocks
noBlocks:
MOVQ ·prime5v(SB), AX
afterBlocks:
ADDQ DX, AX
// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
ADDQ $24, BX
CMPQ CX, BX
JG fourByte
wordLoop:
// Calculate k1.
MOVQ (CX), R8
ADDQ $8, CX
IMULQ R14, R8
ROLQ $31, R8
IMULQ R13, R8
XORQ R8, AX
ROLQ $27, AX
IMULQ R13, AX
ADDQ R15, AX
CMPQ CX, BX
JLE wordLoop
fourByte:
ADDQ $4, BX
CMPQ CX, BX
JG singles
MOVL (CX), R8
ADDQ $4, CX
IMULQ R13, R8
XORQ R8, AX
ROLQ $23, AX
IMULQ R14, AX
ADDQ ·prime3v(SB), AX
singles:
ADDQ $4, BX
CMPQ CX, BX
JGE finalize
singlesLoop:
MOVBQZX (CX), R12
ADDQ $1, CX
IMULQ ·prime5v(SB), R12
XORQ R12, AX
ROLQ $11, AX
IMULQ R13, AX
CMPQ CX, BX
JL singlesLoop
finalize:
MOVQ AX, R12
SHRQ $33, R12
XORQ R12, AX
IMULQ R14, AX
MOVQ AX, R12
SHRQ $29, R12
XORQ R12, AX
IMULQ ·prime3v(SB), AX
MOVQ AX, R12
SHRQ $32, R12
XORQ R12, AX
MOVQ AX, ret+24(FP)
RET
// writeBlocks uses the same registers as above except that it uses AX to store
// the d pointer.
// func writeBlocks(d *Digest, b []byte) int
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
// Load fixed primes needed for round.
MOVQ ·prime1v(SB), R13
MOVQ ·prime2v(SB), R14
// Load slice.
MOVQ b_base+8(FP), CX
MOVQ b_len+16(FP), DX
LEAQ (CX)(DX*1), BX
SUBQ $32, BX
// Load vN from d.
MOVQ d+0(FP), AX
MOVQ 0(AX), R8 // v1
MOVQ 8(AX), R9 // v2
MOVQ 16(AX), R10 // v3
MOVQ 24(AX), R11 // v4
// We don't need to check the loop condition here; this function is
// always called with at least one block of data to process.
blockLoop:
round(R8)
round(R9)
round(R10)
round(R11)
CMPQ CX, BX
JLE blockLoop
// Copy vN back to d.
MOVQ R8, 0(AX)
MOVQ R9, 8(AX)
MOVQ R10, 16(AX)
MOVQ R11, 24(AX)
// The number of bytes written is CX minus the old base pointer.
SUBQ b_base+8(FP), CX
MOVQ CX, ret+32(FP)
RET

76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go generated vendored Normal file
View File

@ -0,0 +1,76 @@
// +build !amd64 appengine !gc purego
package xxhash
// Sum64 computes the 64-bit xxHash digest of b.
func Sum64(b []byte) uint64 {
// A simpler version would be
// d := New()
// d.Write(b)
// return d.Sum64()
// but this is faster, particularly for small inputs.
n := len(b)
var h uint64
if n >= 32 {
v1 := prime1v + prime2
v2 := prime2
v3 := uint64(0)
v4 := -prime1v
for len(b) >= 32 {
v1 = round(v1, u64(b[0:8:len(b)]))
v2 = round(v2, u64(b[8:16:len(b)]))
v3 = round(v3, u64(b[16:24:len(b)]))
v4 = round(v4, u64(b[24:32:len(b)]))
b = b[32:len(b):len(b)]
}
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
h = mergeRound(h, v1)
h = mergeRound(h, v2)
h = mergeRound(h, v3)
h = mergeRound(h, v4)
} else {
h = prime5
}
h += uint64(n)
i, end := 0, len(b)
for ; i+8 <= end; i += 8 {
k1 := round(0, u64(b[i:i+8:len(b)]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
if i+4 <= end {
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
h = rol23(h)*prime2 + prime3
i += 4
}
for ; i < end; i++ {
h ^= uint64(b[i]) * prime5
h = rol11(h) * prime1
}
h ^= h >> 33
h *= prime2
h ^= h >> 29
h *= prime3
h ^= h >> 32
return h
}
func writeBlocks(d *Digest, b []byte) int {
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
n := len(b)
for len(b) >= 32 {
v1 = round(v1, u64(b[0:8:len(b)]))
v2 = round(v2, u64(b[8:16:len(b)]))
v3 = round(v3, u64(b[16:24:len(b)]))
v4 = round(v4, u64(b[24:32:len(b)]))
b = b[32:len(b):len(b)]
}
d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
return n - len(b)
}

15
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
// +build appengine
// This file contains the safe implementations of otherwise unsafe-using code.
package xxhash
// Sum64String computes the 64-bit xxHash digest of s.
func Sum64String(s string) uint64 {
return Sum64([]byte(s))
}
// WriteString adds more data to d. It always returns len(s), nil.
func (d *Digest) WriteString(s string) (n int, err error) {
return d.Write([]byte(s))
}

46
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go generated vendored Normal file
View File

@ -0,0 +1,46 @@
// +build !appengine
// This file encapsulates usage of unsafe.
// xxhash_safe.go contains the safe implementations.
package xxhash
import (
"reflect"
"unsafe"
)
// Notes:
//
// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
// for some discussion about these unsafe conversions.
//
// In the future it's possible that compiler optimizations will make these
// unsafe operations unnecessary: https://golang.org/issue/2205.
//
// Both of these wrapper functions still incur function call overhead since they
// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
// for strings to squeeze out a bit more speed. Mid-stack inlining should
// eventually fix this.
// Sum64String computes the 64-bit xxHash digest of s.
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
func Sum64String(s string) uint64 {
var b []byte
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
bh.Len = len(s)
bh.Cap = len(s)
return Sum64(b)
}
// WriteString adds more data to d. It always returns len(s), nil.
// It may be faster than Write([]byte(s)) by avoiding a copy.
func (d *Digest) WriteString(s string) (n int, err error) {
var b []byte
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
bh.Len = len(s)
bh.Cap = len(s)
return d.Write(b)
}

View File

@ -1,3 +0,0 @@
language: go
go:
- tip

View File

@ -1,4 +1,4 @@
# 1.1.0 (May 22nd, 2019)
# UNRELEASED
FEATURES

View File

@ -1,4 +1,4 @@
go-immutable-radix [![Build Status](https://travis-ci.org/hashicorp/go-immutable-radix.png)](https://travis-ci.org/hashicorp/go-immutable-radix)
go-immutable-radix [![CircleCI](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master)
=========
Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree).

View File

@ -86,6 +86,20 @@ func (t *Tree) Txn() *Txn {
return txn
}
// Clone makes an independent copy of the transaction. The new transaction
// does not track any nodes and has TrackMutate turned off. The cloned transaction will contain any uncommitted writes in the original transaction but further mutations to either will be independent and result in different radix trees on Commit. A cloned transaction may be passed to another goroutine and mutated there independently however each transaction may only be mutated in a single thread.
func (t *Txn) Clone() *Txn {
// reset the writable node cache to avoid leaking future writes into the clone
t.writable = nil
txn := &Txn{
root: t.root,
snap: t.snap,
size: t.size,
}
return txn
}
// TrackMutate can be used to toggle if mutations are tracked. If this is enabled
// then notifications will be issued for affected internal nodes and leaves when
// the transaction is committed.

View File

@ -74,6 +74,7 @@ type Iterator struct {
buf []byte
head int
tail int
depth int
captureStartedAt int
captured []byte
Error error
@ -88,6 +89,7 @@ func NewIterator(cfg API) *Iterator {
buf: nil,
head: 0,
tail: 0,
depth: 0,
}
}
@ -99,6 +101,7 @@ func Parse(cfg API, reader io.Reader, bufSize int) *Iterator {
buf: make([]byte, bufSize),
head: 0,
tail: 0,
depth: 0,
}
}
@ -110,6 +113,7 @@ func ParseBytes(cfg API, input []byte) *Iterator {
buf: input,
head: 0,
tail: len(input),
depth: 0,
}
}
@ -128,6 +132,7 @@ func (iter *Iterator) Reset(reader io.Reader) *Iterator {
iter.reader = reader
iter.head = 0
iter.tail = 0
iter.depth = 0
return iter
}
@ -137,6 +142,7 @@ func (iter *Iterator) ResetBytes(input []byte) *Iterator {
iter.buf = input
iter.head = 0
iter.tail = len(input)
iter.depth = 0
return iter
}
@ -320,3 +326,24 @@ func (iter *Iterator) Read() interface{} {
return nil
}
}
// limit maximum depth of nesting, as allowed by https://tools.ietf.org/html/rfc7159#section-9
const maxDepth = 10000
func (iter *Iterator) incrementDepth() (success bool) {
iter.depth++
if iter.depth <= maxDepth {
return true
}
iter.ReportError("incrementDepth", "exceeded max depth")
return false
}
func (iter *Iterator) decrementDepth() (success bool) {
iter.depth--
if iter.depth >= 0 {
return true
}
iter.ReportError("decrementDepth", "unexpected negative nesting")
return false
}

View File

@ -28,26 +28,32 @@ func (iter *Iterator) ReadArray() (ret bool) {
func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) {
c := iter.nextToken()
if c == '[' {
if !iter.incrementDepth() {
return false
}
c = iter.nextToken()
if c != ']' {
iter.unreadByte()
if !callback(iter) {
iter.decrementDepth()
return false
}
c = iter.nextToken()
for c == ',' {
if !callback(iter) {
iter.decrementDepth()
return false
}
c = iter.nextToken()
}
if c != ']' {
iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c}))
iter.decrementDepth()
return false
}
return true
return iter.decrementDepth()
}
return true
return iter.decrementDepth()
}
if c == 'n' {
iter.skipThreeBytes('u', 'l', 'l')

View File

@ -112,6 +112,9 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
c := iter.nextToken()
var field string
if c == '{' {
if !iter.incrementDepth() {
return false
}
c = iter.nextToken()
if c == '"' {
iter.unreadByte()
@ -121,6 +124,7 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
}
if !callback(iter, field) {
iter.decrementDepth()
return false
}
c = iter.nextToken()
@ -131,20 +135,23 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
}
if !callback(iter, field) {
iter.decrementDepth()
return false
}
c = iter.nextToken()
}
if c != '}' {
iter.ReportError("ReadObjectCB", `object not ended with }`)
iter.decrementDepth()
return false
}
return true
return iter.decrementDepth()
}
if c == '}' {
return true
return iter.decrementDepth()
}
iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c}))
iter.decrementDepth()
return false
}
if c == 'n' {
@ -159,15 +166,20 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool {
c := iter.nextToken()
if c == '{' {
if !iter.incrementDepth() {
return false
}
c = iter.nextToken()
if c == '"' {
iter.unreadByte()
field := iter.ReadString()
if iter.nextToken() != ':' {
iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
iter.decrementDepth()
return false
}
if !callback(iter, field) {
iter.decrementDepth()
return false
}
c = iter.nextToken()
@ -175,23 +187,27 @@ func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool {
field = iter.ReadString()
if iter.nextToken() != ':' {
iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
iter.decrementDepth()
return false
}
if !callback(iter, field) {
iter.decrementDepth()
return false
}
c = iter.nextToken()
}
if c != '}' {
iter.ReportError("ReadMapCB", `object not ended with }`)
iter.decrementDepth()
return false
}
return true
return iter.decrementDepth()
}
if c == '}' {
return true
return iter.decrementDepth()
}
iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c}))
iter.decrementDepth()
return false
}
if c == 'n' {

View File

@ -22,6 +22,9 @@ func (iter *Iterator) skipNumber() {
func (iter *Iterator) skipArray() {
level := 1
if !iter.incrementDepth() {
return
}
for {
for i := iter.head; i < iter.tail; i++ {
switch iter.buf[i] {
@ -31,8 +34,14 @@ func (iter *Iterator) skipArray() {
i = iter.head - 1 // it will be i++ soon
case '[': // If open symbol, increase level
level++
if !iter.incrementDepth() {
return
}
case ']': // If close symbol, increase level
level--
if !iter.decrementDepth() {
return
}
// If we have returned to the original level, we're done
if level == 0 {
@ -50,6 +59,10 @@ func (iter *Iterator) skipArray() {
func (iter *Iterator) skipObject() {
level := 1
if !iter.incrementDepth() {
return
}
for {
for i := iter.head; i < iter.tail; i++ {
switch iter.buf[i] {
@ -59,8 +72,14 @@ func (iter *Iterator) skipObject() {
i = iter.head - 1 // it will be i++ soon
case '{': // If open symbol, increase level
level++
if !iter.incrementDepth() {
return
}
case '}': // If close symbol, increase level
level--
if !iter.decrementDepth() {
return
}
// If we have returned to the original level, we're done
if level == 0 {

View File

@ -60,6 +60,7 @@ func (b *ctx) append(prefix string) *ctx {
// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal
func (iter *Iterator) ReadVal(obj interface{}) {
depth := iter.depth
cacheKey := reflect2.RTypeOf(obj)
decoder := iter.cfg.getDecoderFromCache(cacheKey)
if decoder == nil {
@ -76,6 +77,10 @@ func (iter *Iterator) ReadVal(obj interface{}) {
return
}
decoder.Decode(ptr, iter)
if iter.depth != depth {
iter.ReportError("ReadVal", "unexpected mismatched nesting")
return
}
}
// WriteVal copy the go interface into underlying JSON, same as json.Marshal

View File

@ -341,10 +341,10 @@ func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor {
if ctx.onlyTaggedField && !hastag && !field.Anonymous() {
continue
}
tagParts := strings.Split(tag, ",")
if tag == "-" {
if tag == "-" || field.Name() == "_" {
continue
}
tagParts := strings.Split(tag, ",")
if field.Anonymous() && (tag == "" || tagParts[0] == "") {
if field.Type().Kind() == reflect.Struct {
structDescriptor := describeStruct(ctx, field.Type())

View File

@ -249,6 +249,10 @@ type mapEncoder struct {
}
func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
if *(*unsafe.Pointer)(ptr) == nil {
stream.WriteNil()
return
}
stream.WriteObjectStart()
iter := encoder.mapType.UnsafeIterate(ptr)
for i := 0; iter.HasNext(); i++ {
@ -286,16 +290,17 @@ func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteObjectStart()
mapIter := encoder.mapType.UnsafeIterate(ptr)
subStream := stream.cfg.BorrowStream(nil)
subStream.Attachment = stream.Attachment
subIter := stream.cfg.BorrowIterator(nil)
keyValues := encodedKeyValues{}
for mapIter.HasNext() {
subStream.buf = make([]byte, 0, 64)
key, elem := mapIter.UnsafeNext()
subStreamIndex := subStream.Buffered()
encoder.keyEncoder.Encode(key, subStream)
if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil {
stream.Error = subStream.Error
}
encodedKey := subStream.Buffer()
encodedKey := subStream.Buffer()[subStreamIndex:]
subIter.ResetBytes(encodedKey)
decodedKey := subIter.ReadString()
if stream.indention > 0 {
@ -306,7 +311,7 @@ func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
encoder.elemEncoder.Encode(elem, subStream)
keyValues = append(keyValues, encodedKV{
key: decodedKey,
keyValue: subStream.Buffer(),
keyValue: subStream.Buffer()[subStreamIndex:],
})
}
sort.Sort(keyValues)
@ -316,6 +321,9 @@ func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
}
stream.Write(keyValue.keyValue)
}
if subStream.Error != nil && stream.Error == nil {
stream.Error = subStream.Error
}
stream.WriteObjectEnd()
stream.cfg.ReturnStream(subStream)
stream.cfg.ReturnIterator(subIter)

View File

@ -3,8 +3,9 @@ package jsoniter
import (
"encoding"
"encoding/json"
"github.com/modern-go/reflect2"
"unsafe"
"github.com/modern-go/reflect2"
)
var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem()
@ -93,10 +94,17 @@ func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteNil()
return
}
bytes, err := json.Marshal(obj)
marshaler := obj.(json.Marshaler)
bytes, err := marshaler.MarshalJSON()
if err != nil {
stream.Error = err
} else {
// html escape was already done by jsoniter
// but the extra '\n' should be trimed
l := len(bytes)
if l > 0 && bytes[l-1] == '\n' {
bytes = bytes[:l-1]
}
stream.Write(bytes)
}
}

View File

@ -500,6 +500,9 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator)
if !iter.readObjectStart() {
return
}
if !iter.incrementDepth() {
return
}
var c byte
for c = ','; c == ','; c = iter.nextToken() {
decoder.decodeOneField(ptr, iter)
@ -510,6 +513,7 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator)
if c != '}' {
iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c}))
}
iter.decrementDepth()
}
func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) {
@ -571,6 +575,9 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator)
if !iter.readObjectStart() {
return
}
if !iter.incrementDepth() {
return
}
for {
if iter.readFieldHash() == decoder.fieldHash {
decoder.fieldDecoder.Decode(ptr, iter)
@ -584,6 +591,7 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator)
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
}
type twoFieldsStructDecoder struct {
@ -598,6 +606,9 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
if !iter.readObjectStart() {
return
}
if !iter.incrementDepth() {
return
}
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
@ -614,6 +625,7 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
}
type threeFieldsStructDecoder struct {
@ -630,6 +642,9 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
if !iter.readObjectStart() {
return
}
if !iter.incrementDepth() {
return
}
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
@ -648,6 +663,7 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
}
type fourFieldsStructDecoder struct {
@ -666,6 +682,9 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
if !iter.readObjectStart() {
return
}
if !iter.incrementDepth() {
return
}
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
@ -686,6 +705,7 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
}
type fiveFieldsStructDecoder struct {
@ -706,6 +726,9 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
if !iter.readObjectStart() {
return
}
if !iter.incrementDepth() {
return
}
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
@ -728,6 +751,7 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
}
type sixFieldsStructDecoder struct {
@ -750,6 +774,9 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
if !iter.readObjectStart() {
return
}
if !iter.incrementDepth() {
return
}
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
@ -774,6 +801,7 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
}
type sevenFieldsStructDecoder struct {
@ -798,6 +826,9 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
if !iter.readObjectStart() {
return
}
if !iter.incrementDepth() {
return
}
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
@ -824,6 +855,7 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
}
type eightFieldsStructDecoder struct {
@ -850,6 +882,9 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
if !iter.readObjectStart() {
return
}
if !iter.incrementDepth() {
return
}
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
@ -878,6 +913,7 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
}
type nineFieldsStructDecoder struct {
@ -906,6 +942,9 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
if !iter.readObjectStart() {
return
}
if !iter.incrementDepth() {
return
}
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
@ -936,6 +975,7 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
}
type tenFieldsStructDecoder struct {
@ -966,6 +1006,9 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
if !iter.readObjectStart() {
return
}
if !iter.incrementDepth() {
return
}
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
@ -998,6 +1041,7 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
}
type structFieldDecoder struct {

View File

@ -200,6 +200,7 @@ type stringModeStringEncoder struct {
func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
tempStream := encoder.cfg.BorrowStream(nil)
tempStream.Attachment = stream.Attachment
defer encoder.cfg.ReturnStream(tempStream)
encoder.elemEncoder.Encode(ptr, tempStream)
stream.WriteString(string(tempStream.Buffer()))

View File

@ -17,6 +17,7 @@ import (
"errors"
"math"
"sync/atomic"
"time"
dto "github.com/prometheus/client_model/go"
)
@ -42,11 +43,27 @@ type Counter interface {
Add(float64)
}
// ExemplarAdder is implemented by Counters that offer the option of adding a
// value to the Counter together with an exemplar. Its AddWithExemplar method
// works like the Add method of the Counter interface but also replaces the
// currently saved exemplar (if any) with a new one, created from the provided
// value, the current time as timestamp, and the provided labels. Empty Labels
// will lead to a valid (label-less) exemplar. But if Labels is nil, the current
// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any
// of the provided labels are invalid, or if the provided labels contain more
// than 64 runes in total.
type ExemplarAdder interface {
AddWithExemplar(value float64, exemplar Labels)
}
// CounterOpts is an alias for Opts. See there for doc comments.
type CounterOpts Opts
// NewCounter creates a new Counter based on the provided CounterOpts.
//
// The returned implementation also implements ExemplarAdder. It is safe to
// perform the corresponding type assertion.
//
// The returned implementation tracks the counter value in two separate
// variables, a float64 and a uint64. The latter is used to track calls of the
// Inc method and calls of the Add method with a value that can be represented
@ -61,7 +78,7 @@ func NewCounter(opts CounterOpts) Counter {
nil,
opts.ConstLabels,
)
result := &counter{desc: desc, labelPairs: desc.constLabelPairs}
result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now}
result.init(result) // Init self-collection.
return result
}
@ -78,6 +95,9 @@ type counter struct {
desc *Desc
labelPairs []*dto.LabelPair
exemplar atomic.Value // Containing nil or a *dto.Exemplar.
now func() time.Time // To mock out time.Now() for testing.
}
func (c *counter) Desc() *Desc {
@ -88,6 +108,7 @@ func (c *counter) Add(v float64) {
if v < 0 {
panic(errors.New("counter cannot decrease in value"))
}
ival := uint64(v)
if float64(ival) == v {
atomic.AddUint64(&c.valInt, ival)
@ -103,6 +124,11 @@ func (c *counter) Add(v float64) {
}
}
func (c *counter) AddWithExemplar(v float64, e Labels) {
c.Add(v)
c.updateExemplar(v, e)
}
func (c *counter) Inc() {
atomic.AddUint64(&c.valInt, 1)
}
@ -112,7 +138,23 @@ func (c *counter) Write(out *dto.Metric) error {
ival := atomic.LoadUint64(&c.valInt)
val := fval + float64(ival)
return populateMetric(CounterValue, val, c.labelPairs, out)
var exemplar *dto.Exemplar
if e := c.exemplar.Load(); e != nil {
exemplar = e.(*dto.Exemplar)
}
return populateMetric(CounterValue, val, c.labelPairs, exemplar, out)
}
func (c *counter) updateExemplar(v float64, l Labels) {
if l == nil {
return
}
e, err := newExemplar(v, c.now(), l)
if err != nil {
panic(err)
}
c.exemplar.Store(e)
}
// CounterVec is a Collector that bundles a set of Counters that all share the

View File

@ -19,6 +19,7 @@ import (
"sort"
"strings"
"github.com/cespare/xxhash/v2"
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/model"
@ -126,24 +127,24 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
return d
}
vh := hashNew()
xxh := xxhash.New()
for _, val := range labelValues {
vh = hashAdd(vh, val)
vh = hashAddByte(vh, separatorByte)
xxh.WriteString(val)
xxh.Write(separatorByteSlice)
}
d.id = vh
d.id = xxh.Sum64()
// Sort labelNames so that order doesn't matter for the hash.
sort.Strings(labelNames)
// Now hash together (in this order) the help string and the sorted
// label names.
lh := hashNew()
lh = hashAdd(lh, help)
lh = hashAddByte(lh, separatorByte)
xxh.Reset()
xxh.WriteString(help)
xxh.Write(separatorByteSlice)
for _, labelName := range labelNames {
lh = hashAdd(lh, labelName)
lh = hashAddByte(lh, separatorByte)
xxh.WriteString(labelName)
xxh.Write(separatorByteSlice)
}
d.dimHash = lh
d.dimHash = xxh.Sum64()
d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
for n, v := range constLabels {

View File

@ -123,7 +123,7 @@ func (g *gauge) Sub(val float64) {
func (g *gauge) Write(out *dto.Metric) error {
val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
return populateMetric(GaugeValue, val, g.labelPairs, out)
return populateMetric(GaugeValue, val, g.labelPairs, nil, out)
}
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
@ -273,9 +273,12 @@ type GaugeFunc interface {
// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
// value reported is determined by calling the given function from within the
// Write method. Take into account that metric collection may happen
// concurrently. If that results in concurrent calls to Write, like in the case
// where a GaugeFunc is directly registered with Prometheus, the provided
// function must be concurrency-safe.
// concurrently. Therefore, it must be safe to call the provided function
// concurrently.
//
// NewGaugeFunc is a good way to create an “info” style metric with a constant
// value of 1. Example:
// https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56
func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
return newValueFunc(NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),

View File

@ -73,7 +73,7 @@ func NewGoCollector() Collector {
nil, nil),
gcDesc: NewDesc(
"go_gc_duration_seconds",
"A summary of the GC invocation durations.",
"A summary of the pause duration of garbage collection cycles.",
nil, nil),
goInfoDesc: NewDesc(
"go_info",

View File

@ -20,6 +20,7 @@ import (
"sort"
"sync"
"sync/atomic"
"time"
"github.com/golang/protobuf/proto"
@ -138,7 +139,7 @@ type HistogramOpts struct {
// better covered by target labels set by the scraping Prometheus
// server, or by one specific metric (e.g. a build_info or a
// machine_role metric). See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
ConstLabels Labels
// Buckets defines the buckets into which observations are counted. Each
@ -151,6 +152,10 @@ type HistogramOpts struct {
// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
// panics if the buckets in HistogramOpts are not in strictly increasing order.
//
// The returned implementation also implements ExemplarObserver. It is safe to
// perform the corresponding type assertion. Exemplars are tracked separately
// for each bucket.
func NewHistogram(opts HistogramOpts) Histogram {
return newHistogram(
NewDesc(
@ -187,7 +192,8 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
desc: desc,
upperBounds: opts.Buckets,
labelPairs: makeLabelPairs(desc, labelValues),
counts: [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}},
counts: [2]*histogramCounts{{}, {}},
now: time.Now,
}
for i, upperBound := range h.upperBounds {
if i < len(h.upperBounds)-1 {
@ -205,9 +211,10 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
}
}
// Finally we know the final length of h.upperBounds and can make buckets
// for both counts:
// for both counts as well as exemplars:
h.counts[0].buckets = make([]uint64, len(h.upperBounds))
h.counts[1].buckets = make([]uint64, len(h.upperBounds))
h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
h.init(h) // Init self-collection.
return h
@ -254,6 +261,9 @@ type histogram struct {
upperBounds []float64
labelPairs []*dto.LabelPair
exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
now func() time.Time // To mock out time.Now() for testing.
}
func (h *histogram) Desc() *Desc {
@ -261,36 +271,13 @@ func (h *histogram) Desc() *Desc {
}
func (h *histogram) Observe(v float64) {
// TODO(beorn7): For small numbers of buckets (<30), a linear search is
// slightly faster than the binary search. If we really care, we could
// switch from one search strategy to the other depending on the number
// of buckets.
//
// Microbenchmarks (BenchmarkHistogramNoLabels):
// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
i := sort.SearchFloat64s(h.upperBounds, v)
h.observe(v, h.findBucket(v))
}
// We increment h.countAndHotIdx so that the counter in the lower
// 63 bits gets incremented. At the same time, we get the new value
// back, which we can use to find the currently-hot counts.
n := atomic.AddUint64(&h.countAndHotIdx, 1)
hotCounts := h.counts[n>>63]
if i < len(h.upperBounds) {
atomic.AddUint64(&hotCounts.buckets[i], 1)
}
for {
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
break
}
}
// Increment count last as we take it as a signal that the observation
// is complete.
atomic.AddUint64(&hotCounts.count, 1)
func (h *histogram) ObserveWithExemplar(v float64, e Labels) {
i := h.findBucket(v)
h.observe(v, i)
h.updateExemplar(v, i, e)
}
func (h *histogram) Write(out *dto.Metric) error {
@ -329,6 +316,18 @@ func (h *histogram) Write(out *dto.Metric) error {
CumulativeCount: proto.Uint64(cumCount),
UpperBound: proto.Float64(upperBound),
}
if e := h.exemplars[i].Load(); e != nil {
his.Bucket[i].Exemplar = e.(*dto.Exemplar)
}
}
// If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly.
if e := h.exemplars[len(h.upperBounds)].Load(); e != nil {
b := &dto.Bucket{
CumulativeCount: proto.Uint64(count),
UpperBound: proto.Float64(math.Inf(1)),
Exemplar: e.(*dto.Exemplar),
}
his.Bucket = append(his.Bucket, b)
}
out.Histogram = his
@ -352,6 +351,57 @@ func (h *histogram) Write(out *dto.Metric) error {
return nil
}
// findBucket returns the index of the bucket for the provided value, or
// len(h.upperBounds) for the +Inf bucket.
func (h *histogram) findBucket(v float64) int {
// TODO(beorn7): For small numbers of buckets (<30), a linear search is
// slightly faster than the binary search. If we really care, we could
// switch from one search strategy to the other depending on the number
// of buckets.
//
// Microbenchmarks (BenchmarkHistogramNoLabels):
// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
return sort.SearchFloat64s(h.upperBounds, v)
}
// observe is the implementation for Observe without the findBucket part.
func (h *histogram) observe(v float64, bucket int) {
// We increment h.countAndHotIdx so that the counter in the lower
// 63 bits gets incremented. At the same time, we get the new value
// back, which we can use to find the currently-hot counts.
n := atomic.AddUint64(&h.countAndHotIdx, 1)
hotCounts := h.counts[n>>63]
if bucket < len(h.upperBounds) {
atomic.AddUint64(&hotCounts.buckets[bucket], 1)
}
for {
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
break
}
}
// Increment count last as we take it as a signal that the observation
// is complete.
atomic.AddUint64(&hotCounts.count, 1)
}
// updateExemplar replaces the exemplar for the provided bucket. With empty
// labels, it's a no-op. It panics if any of the labels is invalid.
func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
if l == nil {
return
}
e, err := newExemplar(v, h.now(), l)
if err != nil {
panic(err)
}
h.exemplars[bucket].Store(e)
}
// HistogramVec is a Collector that bundles a set of Histograms that all share the
// same Desc, but have different values for their variable labels. This is used
// if you want to count the same thing partitioned by various dimensions

View File

@ -18,11 +18,12 @@ import (
"time"
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go"
)
const separatorByte byte = 255
var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash.
// A Metric models a single sample value with its meta data being exported to
// Prometheus. Implementations of Metric in this package are Gauge, Counter,

View File

@ -50,3 +50,15 @@ type ObserverVec interface {
Collector
}
// ExemplarObserver is implemented by Observers that offer the option of
// observing a value together with an exemplar. Its ObserveWithExemplar method
// works like the Observe method of an Observer but also replaces the currently
// saved exemplar (if any) with a new one, created from the provided value, the
// current time as timestamp, and the provided Labels. Empty Labels will lead to
// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is
// left in place. ObserveWithExemplar panics if any of the provided labels are
// invalid or if the provided labels contain more than 64 runes in total.
type ExemplarObserver interface {
ObserveWithExemplar(value float64, exemplar Labels)
}

View File

@ -62,6 +62,8 @@ func (r *responseWriterDelegator) WriteHeader(code int) {
}
func (r *responseWriterDelegator) Write(b []byte) (int, error) {
// If applicable, call WriteHeader here so that observeWriteHeader is
// handled appropriately.
if !r.wroteHeader {
r.WriteHeader(http.StatusOK)
}
@ -82,12 +84,19 @@ func (d closeNotifierDelegator) CloseNotify() <-chan bool {
return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
func (d flusherDelegator) Flush() {
// If applicable, call WriteHeader here so that observeWriteHeader is
// handled appropriately.
if !d.wroteHeader {
d.WriteHeader(http.StatusOK)
}
d.ResponseWriter.(http.Flusher).Flush()
}
func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return d.ResponseWriter.(http.Hijacker).Hijack()
}
func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
// If applicable, call WriteHeader here so that observeWriteHeader is
// handled appropriately.
if !d.wroteHeader {
d.WriteHeader(http.StatusOK)
}

View File

@ -144,7 +144,12 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
}
}
contentType := expfmt.Negotiate(req.Header)
var contentType expfmt.Format
if opts.EnableOpenMetrics {
contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header)
} else {
contentType = expfmt.Negotiate(req.Header)
}
header := rsp.Header()
header.Set(contentTypeHeader, string(contentType))
@ -163,22 +168,38 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
enc := expfmt.NewEncoder(w, contentType)
var lastErr error
// handleError handles the error according to opts.ErrorHandling
// and returns true if we have to abort after the handling.
handleError := func(err error) bool {
if err == nil {
return false
}
lastErr = err
if opts.ErrorLog != nil {
opts.ErrorLog.Println("error encoding and sending metric family:", err)
}
errCnt.WithLabelValues("encoding").Inc()
switch opts.ErrorHandling {
case PanicOnError:
panic(err)
case HTTPErrorOnError:
httpError(rsp, err)
return true
}
// Do nothing in all other cases, including ContinueOnError.
return false
}
for _, mf := range mfs {
if err := enc.Encode(mf); err != nil {
lastErr = err
if opts.ErrorLog != nil {
opts.ErrorLog.Println("error encoding and sending metric family:", err)
}
errCnt.WithLabelValues("encoding").Inc()
switch opts.ErrorHandling {
case PanicOnError:
panic(err)
case ContinueOnError:
// Handled later.
case HTTPErrorOnError:
httpError(rsp, err)
return
}
if handleError(enc.Encode(mf)) {
return
}
}
if closer, ok := enc.(expfmt.Closer); ok {
// This in particular takes care of the final "# EOF\n" line for OpenMetrics.
if handleError(closer.Close()) {
return
}
}
@ -318,6 +339,16 @@ type HandlerOpts struct {
// away). Until the implementation is improved, it is recommended to
// implement a separate timeout in potentially slow Collectors.
Timeout time.Duration
// If true, the experimental OpenMetrics encoding is added to the
// possible options during content negotiation. Note that Prometheus
// 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is
// the only way to transmit exemplars. However, the move to OpenMetrics
// is not completely transparent. Most notably, the values of "quantile"
// labels of Summaries and "le" labels of Histograms are formatted with
// a trailing ".0" if they would otherwise look like integer numbers
// (which changes the identity of the resulting series on the Prometheus
// server).
EnableOpenMetrics bool
}
// gzipAccepted returns whether the client will accept gzip-encoded content.

View File

@ -0,0 +1,309 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package push provides functions to push metrics to a Pushgateway. It uses a
// builder approach. Create a Pusher with New and then add the various options
// by using its methods, finally calling Add or Push, like this:
//
// // Easy case:
// push.New("http://example.org/metrics", "my_job").Gatherer(myRegistry).Push()
//
// // Complex case:
// push.New("http://example.org/metrics", "my_job").
// Collector(myCollector1).
// Collector(myCollector2).
// Grouping("zone", "xy").
// Client(&myHTTPClient).
// BasicAuth("top", "secret").
// Add()
//
// See the examples section for more detailed examples.
//
// See the documentation of the Pushgateway to understand the meaning of
// the grouping key and the differences between Push and Add:
// https://github.com/prometheus/pushgateway
package push
import (
"bytes"
"encoding/base64"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/common/model"
"github.com/prometheus/client_golang/prometheus"
)
const (
contentTypeHeader = "Content-Type"
// base64Suffix is appended to a label name in the request URL path to
// mark the following label value as base64 encoded.
base64Suffix = "@base64"
)
// HTTPDoer is an interface for the one method of http.Client that is used by Pusher
type HTTPDoer interface {
Do(*http.Request) (*http.Response, error)
}
// Pusher manages a push to the Pushgateway. Use New to create one, configure it
// with its methods, and finally use the Add or Push method to push.
type Pusher struct {
error error
url, job string
grouping map[string]string
gatherers prometheus.Gatherers
registerer prometheus.Registerer
client HTTPDoer
useBasicAuth bool
username, password string
expfmt expfmt.Format
}
// New creates a new Pusher to push to the provided URL with the provided job
// name. You can use just host:port or ip:port as url, in which case “http://”
// is added automatically. Alternatively, include the schema in the
// URL. However, do not include the “/metrics/jobs/…” part.
func New(url, job string) *Pusher {
var (
reg = prometheus.NewRegistry()
err error
)
if !strings.Contains(url, "://") {
url = "http://" + url
}
if strings.HasSuffix(url, "/") {
url = url[:len(url)-1]
}
return &Pusher{
error: err,
url: url,
job: job,
grouping: map[string]string{},
gatherers: prometheus.Gatherers{reg},
registerer: reg,
client: &http.Client{},
expfmt: expfmt.FmtProtoDelim,
}
}
// Push collects/gathers all metrics from all Collectors and Gatherers added to
// this Pusher. Then, it pushes them to the Pushgateway configured while
// creating this Pusher, using the configured job name and any added grouping
// labels as grouping key. All previously pushed metrics with the same job and
// other grouping labels will be replaced with the metrics pushed by this
// call. (It uses HTTP method “PUT” to push to the Pushgateway.)
//
// Push returns the first error encountered by any method call (including this
// one) in the lifetime of the Pusher.
func (p *Pusher) Push() error {
return p.push(http.MethodPut)
}
// Add works like push, but only previously pushed metrics with the same name
// (and the same job and other grouping labels) will be replaced. (It uses HTTP
// method “POST” to push to the Pushgateway.)
func (p *Pusher) Add() error {
return p.push(http.MethodPost)
}
// Gatherer adds a Gatherer to the Pusher, from which metrics will be gathered
// to push them to the Pushgateway. The gathered metrics must not contain a job
// label of their own.
//
// For convenience, this method returns a pointer to the Pusher itself.
func (p *Pusher) Gatherer(g prometheus.Gatherer) *Pusher {
p.gatherers = append(p.gatherers, g)
return p
}
// Collector adds a Collector to the Pusher, from which metrics will be
// collected to push them to the Pushgateway. The collected metrics must not
// contain a job label of their own.
//
// For convenience, this method returns a pointer to the Pusher itself.
func (p *Pusher) Collector(c prometheus.Collector) *Pusher {
if p.error == nil {
p.error = p.registerer.Register(c)
}
return p
}
// Grouping adds a label pair to the grouping key of the Pusher, replacing any
// previously added label pair with the same label name. Note that setting any
// labels in the grouping key that are already contained in the metrics to push
// will lead to an error.
//
// For convenience, this method returns a pointer to the Pusher itself.
func (p *Pusher) Grouping(name, value string) *Pusher {
if p.error == nil {
if !model.LabelName(name).IsValid() {
p.error = fmt.Errorf("grouping label has invalid name: %s", name)
return p
}
p.grouping[name] = value
}
return p
}
// Client sets a custom HTTP client for the Pusher. For convenience, this method
// returns a pointer to the Pusher itself.
// Pusher only needs one method of the custom HTTP client: Do(*http.Request).
// Thus, rather than requiring a fully fledged http.Client,
// the provided client only needs to implement the HTTPDoer interface.
// Since *http.Client naturally implements that interface, it can still be used normally.
func (p *Pusher) Client(c HTTPDoer) *Pusher {
p.client = c
return p
}
// BasicAuth configures the Pusher to use HTTP Basic Authentication with the
// provided username and password. For convenience, this method returns a
// pointer to the Pusher itself.
func (p *Pusher) BasicAuth(username, password string) *Pusher {
p.useBasicAuth = true
p.username = username
p.password = password
return p
}
// Format configures the Pusher to use an encoding format given by the
// provided expfmt.Format. The default format is expfmt.FmtProtoDelim and
// should be used with the standard Prometheus Pushgateway. Custom
// implementations may require different formats. For convenience, this
// method returns a pointer to the Pusher itself.
func (p *Pusher) Format(format expfmt.Format) *Pusher {
p.expfmt = format
return p
}
// Delete sends a “DELETE” request to the Pushgateway configured while creating
// this Pusher, using the configured job name and any added grouping labels as
// grouping key. Any added Gatherers and Collectors added to this Pusher are
// ignored by this method.
//
// Delete returns the first error encountered by any method call (including this
// one) in the lifetime of the Pusher.
func (p *Pusher) Delete() error {
if p.error != nil {
return p.error
}
req, err := http.NewRequest(http.MethodDelete, p.fullURL(), nil)
if err != nil {
return err
}
if p.useBasicAuth {
req.SetBasicAuth(p.username, p.password)
}
resp, err := p.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusAccepted {
body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only.
return fmt.Errorf("unexpected status code %d while deleting %s: %s", resp.StatusCode, p.fullURL(), body)
}
return nil
}
func (p *Pusher) push(method string) error {
if p.error != nil {
return p.error
}
mfs, err := p.gatherers.Gather()
if err != nil {
return err
}
buf := &bytes.Buffer{}
enc := expfmt.NewEncoder(buf, p.expfmt)
// Check for pre-existing grouping labels:
for _, mf := range mfs {
for _, m := range mf.GetMetric() {
for _, l := range m.GetLabel() {
if l.GetName() == "job" {
return fmt.Errorf("pushed metric %s (%s) already contains a job label", mf.GetName(), m)
}
if _, ok := p.grouping[l.GetName()]; ok {
return fmt.Errorf(
"pushed metric %s (%s) already contains grouping label %s",
mf.GetName(), m, l.GetName(),
)
}
}
}
enc.Encode(mf)
}
req, err := http.NewRequest(method, p.fullURL(), buf)
if err != nil {
return err
}
if p.useBasicAuth {
req.SetBasicAuth(p.username, p.password)
}
req.Header.Set(contentTypeHeader, string(p.expfmt))
resp, err := p.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
// Pushgateway 0.10+ responds with StatusOK, earlier versions with StatusAccepted.
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted {
body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only.
return fmt.Errorf("unexpected status code %d while pushing to %s: %s", resp.StatusCode, p.fullURL(), body)
}
return nil
}
// fullURL assembles the URL used to push/delete metrics and returns it as a
// string. The job name and any grouping label values containing a '/' will
// trigger a base64 encoding of the affected component and proper suffixing of
// the preceding component. If the component does not contain a '/' but other
// special character, the usual url.QueryEscape is used for compatibility with
// older versions of the Pushgateway and for better readability.
func (p *Pusher) fullURL() string {
urlComponents := []string{}
if encodedJob, base64 := encodeComponent(p.job); base64 {
urlComponents = append(urlComponents, "job"+base64Suffix, encodedJob)
} else {
urlComponents = append(urlComponents, "job", encodedJob)
}
for ln, lv := range p.grouping {
if encodedLV, base64 := encodeComponent(lv); base64 {
urlComponents = append(urlComponents, ln+base64Suffix, encodedLV)
} else {
urlComponents = append(urlComponents, ln, encodedLV)
}
}
return fmt.Sprintf("%s/metrics/%s", p.url, strings.Join(urlComponents, "/"))
}
// encodeComponent encodes the provided string with base64.RawURLEncoding in
// case it contains '/'. If not, it uses url.QueryEscape instead. It returns
// true in the former case.
func encodeComponent(s string) (string, bool) {
if strings.Contains(s, "/") {
return base64.RawURLEncoding.EncodeToString([]byte(s)), true
}
return url.QueryEscape(s), false
}

View File

@ -25,6 +25,7 @@ import (
"sync"
"unicode/utf8"
"github.com/cespare/xxhash/v2"
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/expfmt"
@ -74,7 +75,7 @@ func NewRegistry() *Registry {
// NewPedanticRegistry returns a registry that checks during collection if each
// collected Metric is consistent with its reported Desc, and if the Desc has
// actually been registered with the registry. Unchecked Collectors (those whose
// Describe methed does not yield any descriptors) are excluded from the check.
// Describe method does not yield any descriptors) are excluded from the check.
//
// Usually, a Registry will be happy as long as the union of all collected
// Metrics is consistent and valid even if some metrics are not consistent with
@ -266,7 +267,7 @@ func (r *Registry) Register(c Collector) error {
descChan = make(chan *Desc, capDescChan)
newDescIDs = map[uint64]struct{}{}
newDimHashesByName = map[string]uint64{}
collectorID uint64 // Just a sum of all desc IDs.
collectorID uint64 // All desc IDs XOR'd together.
duplicateDescErr error
)
go func() {
@ -293,12 +294,12 @@ func (r *Registry) Register(c Collector) error {
if _, exists := r.descIDs[desc.id]; exists {
duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
}
// If it is not a duplicate desc in this collector, add it to
// If it is not a duplicate desc in this collector, XOR it to
// the collectorID. (We allow duplicate descs within the same
// collector, but their existence must be a no-op.)
if _, exists := newDescIDs[desc.id]; !exists {
newDescIDs[desc.id] = struct{}{}
collectorID += desc.id
collectorID ^= desc.id
}
// Are all the label names and the help string consistent with
@ -360,7 +361,7 @@ func (r *Registry) Unregister(c Collector) bool {
var (
descChan = make(chan *Desc, capDescChan)
descIDs = map[uint64]struct{}{}
collectorID uint64 // Just a sum of the desc IDs.
collectorID uint64 // All desc IDs XOR'd together.
)
go func() {
c.Describe(descChan)
@ -368,7 +369,7 @@ func (r *Registry) Unregister(c Collector) bool {
}()
for desc := range descChan {
if _, exists := descIDs[desc.id]; !exists {
collectorID += desc.id
collectorID ^= desc.id
descIDs[desc.id] = struct{}{}
}
}
@ -875,9 +876,9 @@ func checkMetricConsistency(
}
// Is the metric unique (i.e. no other metric with the same name and the same labels)?
h := hashNew()
h = hashAdd(h, name)
h = hashAddByte(h, separatorByte)
h := xxhash.New()
h.WriteString(name)
h.Write(separatorByteSlice)
// Make sure label pairs are sorted. We depend on it for the consistency
// check.
if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) {
@ -888,18 +889,19 @@ func checkMetricConsistency(
dtoMetric.Label = copiedLabels
}
for _, lp := range dtoMetric.Label {
h = hashAdd(h, lp.GetName())
h = hashAddByte(h, separatorByte)
h = hashAdd(h, lp.GetValue())
h = hashAddByte(h, separatorByte)
h.WriteString(lp.GetName())
h.Write(separatorByteSlice)
h.WriteString(lp.GetValue())
h.Write(separatorByteSlice)
}
if _, exists := metricHashes[h]; exists {
hSum := h.Sum64()
if _, exists := metricHashes[hSum]; exists {
return fmt.Errorf(
"collected metric %q { %s} was collected before with the same name and label values",
name, dtoMetric,
)
}
metricHashes[h] = struct{}{}
metricHashes[hSum] = struct{}{}
return nil
}

View File

@ -208,7 +208,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
s := &noObjectivesSummary{
desc: desc,
labelPairs: makeLabelPairs(desc, labelValues),
counts: [2]*summaryCounts{&summaryCounts{}, &summaryCounts{}},
counts: [2]*summaryCounts{{}, {}},
}
s.init(s) // Init self-collection.
return s

View File

@ -16,8 +16,11 @@ package prometheus
import (
"fmt"
"sort"
"time"
"unicode/utf8"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
dto "github.com/prometheus/client_model/go"
)
@ -69,7 +72,7 @@ func (v *valueFunc) Desc() *Desc {
}
func (v *valueFunc) Write(out *dto.Metric) error {
return populateMetric(v.valType, v.function(), v.labelPairs, out)
return populateMetric(v.valType, v.function(), v.labelPairs, nil, out)
}
// NewConstMetric returns a metric with one fixed value that cannot be
@ -116,19 +119,20 @@ func (m *constMetric) Desc() *Desc {
}
func (m *constMetric) Write(out *dto.Metric) error {
return populateMetric(m.valType, m.val, m.labelPairs, out)
return populateMetric(m.valType, m.val, m.labelPairs, nil, out)
}
func populateMetric(
t ValueType,
v float64,
labelPairs []*dto.LabelPair,
e *dto.Exemplar,
m *dto.Metric,
) error {
m.Label = labelPairs
switch t {
case CounterValue:
m.Counter = &dto.Counter{Value: proto.Float64(v)}
m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e}
case GaugeValue:
m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
case UntypedValue:
@ -160,3 +164,40 @@ func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
sort.Sort(labelPairSorter(labelPairs))
return labelPairs
}
// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels.
const ExemplarMaxRunes = 64
// newExemplar creates a new dto.Exemplar from the provided values. An error is
// returned if any of the label names or values are invalid or if the total
// number of runes in the label names and values exceeds ExemplarMaxRunes.
func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) {
e := &dto.Exemplar{}
e.Value = proto.Float64(value)
tsProto, err := ptypes.TimestampProto(ts)
if err != nil {
return nil, err
}
e.Timestamp = tsProto
labelPairs := make([]*dto.LabelPair, 0, len(l))
var runes int
for name, value := range l {
if !checkLabelName(name) {
return nil, fmt.Errorf("exemplar label name %q is invalid", name)
}
runes += utf8.RuneCountInString(name)
if !utf8.ValidString(value) {
return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value)
}
runes += utf8.RuneCountInString(value)
labelPairs = append(labelPairs, &dto.LabelPair{
Name: proto.String(name),
Value: proto.String(value),
})
}
if runes > ExemplarMaxRunes {
return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes)
}
e.Label = labelPairs
return e, nil
}

View File

@ -24,7 +24,7 @@ import (
// their label values. metricVec is not used directly (and therefore
// unexported). It is used as a building block for implementations of vectors of
// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec.
// It also handles label currying. It uses basicMetricVec internally.
// It also handles label currying.
type metricVec struct {
*metricMap

View File

@ -1,11 +1,14 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: metrics.proto
package io_prometheus_client // import "github.com/prometheus/client_model/go"
package io_prometheus_client
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@ -16,7 +19,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type MetricType int32
@ -35,6 +38,7 @@ var MetricType_name = map[int32]string{
3: "UNTYPED",
4: "HISTOGRAM",
}
var MetricType_value = map[string]int32{
"COUNTER": 0,
"GAUGE": 1,
@ -48,9 +52,11 @@ func (x MetricType) Enum() *MetricType {
*p = x
return p
}
func (x MetricType) String() string {
return proto.EnumName(MetricType_name, int32(x))
}
func (x *MetricType) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
if err != nil {
@ -59,8 +65,9 @@ func (x *MetricType) UnmarshalJSON(data []byte) error {
*x = MetricType(value)
return nil
}
func (MetricType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
return fileDescriptor_6039342a2ba47b72, []int{0}
}
type LabelPair struct {
@ -75,16 +82,17 @@ func (m *LabelPair) Reset() { *m = LabelPair{} }
func (m *LabelPair) String() string { return proto.CompactTextString(m) }
func (*LabelPair) ProtoMessage() {}
func (*LabelPair) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
return fileDescriptor_6039342a2ba47b72, []int{0}
}
func (m *LabelPair) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LabelPair.Unmarshal(m, b)
}
func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
}
func (dst *LabelPair) XXX_Merge(src proto.Message) {
xxx_messageInfo_LabelPair.Merge(dst, src)
func (m *LabelPair) XXX_Merge(src proto.Message) {
xxx_messageInfo_LabelPair.Merge(m, src)
}
func (m *LabelPair) XXX_Size() int {
return xxx_messageInfo_LabelPair.Size(m)
@ -120,16 +128,17 @@ func (m *Gauge) Reset() { *m = Gauge{} }
func (m *Gauge) String() string { return proto.CompactTextString(m) }
func (*Gauge) ProtoMessage() {}
func (*Gauge) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{1}
return fileDescriptor_6039342a2ba47b72, []int{1}
}
func (m *Gauge) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Gauge.Unmarshal(m, b)
}
func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
}
func (dst *Gauge) XXX_Merge(src proto.Message) {
xxx_messageInfo_Gauge.Merge(dst, src)
func (m *Gauge) XXX_Merge(src proto.Message) {
xxx_messageInfo_Gauge.Merge(m, src)
}
func (m *Gauge) XXX_Size() int {
return xxx_messageInfo_Gauge.Size(m)
@ -148,26 +157,28 @@ func (m *Gauge) GetValue() float64 {
}
type Counter struct {
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Counter) Reset() { *m = Counter{} }
func (m *Counter) String() string { return proto.CompactTextString(m) }
func (*Counter) ProtoMessage() {}
func (*Counter) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{2}
return fileDescriptor_6039342a2ba47b72, []int{2}
}
func (m *Counter) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Counter.Unmarshal(m, b)
}
func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Counter.Marshal(b, m, deterministic)
}
func (dst *Counter) XXX_Merge(src proto.Message) {
xxx_messageInfo_Counter.Merge(dst, src)
func (m *Counter) XXX_Merge(src proto.Message) {
xxx_messageInfo_Counter.Merge(m, src)
}
func (m *Counter) XXX_Size() int {
return xxx_messageInfo_Counter.Size(m)
@ -185,6 +196,13 @@ func (m *Counter) GetValue() float64 {
return 0
}
func (m *Counter) GetExemplar() *Exemplar {
if m != nil {
return m.Exemplar
}
return nil
}
type Quantile struct {
Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
@ -197,16 +215,17 @@ func (m *Quantile) Reset() { *m = Quantile{} }
func (m *Quantile) String() string { return proto.CompactTextString(m) }
func (*Quantile) ProtoMessage() {}
func (*Quantile) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{3}
return fileDescriptor_6039342a2ba47b72, []int{3}
}
func (m *Quantile) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Quantile.Unmarshal(m, b)
}
func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Quantile.Marshal(b, m, deterministic)
}
func (dst *Quantile) XXX_Merge(src proto.Message) {
xxx_messageInfo_Quantile.Merge(dst, src)
func (m *Quantile) XXX_Merge(src proto.Message) {
xxx_messageInfo_Quantile.Merge(m, src)
}
func (m *Quantile) XXX_Size() int {
return xxx_messageInfo_Quantile.Size(m)
@ -244,16 +263,17 @@ func (m *Summary) Reset() { *m = Summary{} }
func (m *Summary) String() string { return proto.CompactTextString(m) }
func (*Summary) ProtoMessage() {}
func (*Summary) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{4}
return fileDescriptor_6039342a2ba47b72, []int{4}
}
func (m *Summary) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Summary.Unmarshal(m, b)
}
func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
}
func (dst *Summary) XXX_Merge(src proto.Message) {
xxx_messageInfo_Summary.Merge(dst, src)
func (m *Summary) XXX_Merge(src proto.Message) {
xxx_messageInfo_Summary.Merge(m, src)
}
func (m *Summary) XXX_Size() int {
return xxx_messageInfo_Summary.Size(m)
@ -296,16 +316,17 @@ func (m *Untyped) Reset() { *m = Untyped{} }
func (m *Untyped) String() string { return proto.CompactTextString(m) }
func (*Untyped) ProtoMessage() {}
func (*Untyped) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{5}
return fileDescriptor_6039342a2ba47b72, []int{5}
}
func (m *Untyped) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Untyped.Unmarshal(m, b)
}
func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Untyped.Marshal(b, m, deterministic)
}
func (dst *Untyped) XXX_Merge(src proto.Message) {
xxx_messageInfo_Untyped.Merge(dst, src)
func (m *Untyped) XXX_Merge(src proto.Message) {
xxx_messageInfo_Untyped.Merge(m, src)
}
func (m *Untyped) XXX_Size() int {
return xxx_messageInfo_Untyped.Size(m)
@ -336,16 +357,17 @@ func (m *Histogram) Reset() { *m = Histogram{} }
func (m *Histogram) String() string { return proto.CompactTextString(m) }
func (*Histogram) ProtoMessage() {}
func (*Histogram) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{6}
return fileDescriptor_6039342a2ba47b72, []int{6}
}
func (m *Histogram) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Histogram.Unmarshal(m, b)
}
func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
}
func (dst *Histogram) XXX_Merge(src proto.Message) {
xxx_messageInfo_Histogram.Merge(dst, src)
func (m *Histogram) XXX_Merge(src proto.Message) {
xxx_messageInfo_Histogram.Merge(m, src)
}
func (m *Histogram) XXX_Size() int {
return xxx_messageInfo_Histogram.Size(m)
@ -378,27 +400,29 @@ func (m *Histogram) GetBucket() []*Bucket {
}
type Bucket struct {
CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Bucket) Reset() { *m = Bucket{} }
func (m *Bucket) String() string { return proto.CompactTextString(m) }
func (*Bucket) ProtoMessage() {}
func (*Bucket) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{7}
return fileDescriptor_6039342a2ba47b72, []int{7}
}
func (m *Bucket) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Bucket.Unmarshal(m, b)
}
func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
}
func (dst *Bucket) XXX_Merge(src proto.Message) {
xxx_messageInfo_Bucket.Merge(dst, src)
func (m *Bucket) XXX_Merge(src proto.Message) {
xxx_messageInfo_Bucket.Merge(m, src)
}
func (m *Bucket) XXX_Size() int {
return xxx_messageInfo_Bucket.Size(m)
@ -423,6 +447,68 @@ func (m *Bucket) GetUpperBound() float64 {
return 0
}
func (m *Bucket) GetExemplar() *Exemplar {
if m != nil {
return m.Exemplar
}
return nil
}
type Exemplar struct {
Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Exemplar) Reset() { *m = Exemplar{} }
func (m *Exemplar) String() string { return proto.CompactTextString(m) }
func (*Exemplar) ProtoMessage() {}
func (*Exemplar) Descriptor() ([]byte, []int) {
return fileDescriptor_6039342a2ba47b72, []int{8}
}
func (m *Exemplar) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Exemplar.Unmarshal(m, b)
}
func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
}
func (m *Exemplar) XXX_Merge(src proto.Message) {
xxx_messageInfo_Exemplar.Merge(m, src)
}
func (m *Exemplar) XXX_Size() int {
return xxx_messageInfo_Exemplar.Size(m)
}
func (m *Exemplar) XXX_DiscardUnknown() {
xxx_messageInfo_Exemplar.DiscardUnknown(m)
}
var xxx_messageInfo_Exemplar proto.InternalMessageInfo
func (m *Exemplar) GetLabel() []*LabelPair {
if m != nil {
return m.Label
}
return nil
}
func (m *Exemplar) GetValue() float64 {
if m != nil && m.Value != nil {
return *m.Value
}
return 0
}
func (m *Exemplar) GetTimestamp() *timestamp.Timestamp {
if m != nil {
return m.Timestamp
}
return nil
}
type Metric struct {
Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
@ -440,16 +526,17 @@ func (m *Metric) Reset() { *m = Metric{} }
func (m *Metric) String() string { return proto.CompactTextString(m) }
func (*Metric) ProtoMessage() {}
func (*Metric) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{8}
return fileDescriptor_6039342a2ba47b72, []int{9}
}
func (m *Metric) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Metric.Unmarshal(m, b)
}
func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
}
func (dst *Metric) XXX_Merge(src proto.Message) {
xxx_messageInfo_Metric.Merge(dst, src)
func (m *Metric) XXX_Merge(src proto.Message) {
xxx_messageInfo_Metric.Merge(m, src)
}
func (m *Metric) XXX_Size() int {
return xxx_messageInfo_Metric.Size(m)
@ -523,16 +610,17 @@ func (m *MetricFamily) Reset() { *m = MetricFamily{} }
func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
func (*MetricFamily) ProtoMessage() {}
func (*MetricFamily) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{9}
return fileDescriptor_6039342a2ba47b72, []int{10}
}
func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_MetricFamily.Unmarshal(m, b)
}
func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic)
}
func (dst *MetricFamily) XXX_Merge(src proto.Message) {
xxx_messageInfo_MetricFamily.Merge(dst, src)
func (m *MetricFamily) XXX_Merge(src proto.Message) {
xxx_messageInfo_MetricFamily.Merge(m, src)
}
func (m *MetricFamily) XXX_Size() int {
return xxx_messageInfo_MetricFamily.Size(m)
@ -572,6 +660,7 @@ func (m *MetricFamily) GetMetric() []*Metric {
}
func init() {
proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair")
proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge")
proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter")
@ -580,50 +669,55 @@ func init() {
proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar")
proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
}
func init() { proto.RegisterFile("metrics.proto", fileDescriptor_metrics_c97c9a2b9560cb8f) }
func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) }
var fileDescriptor_metrics_c97c9a2b9560cb8f = []byte{
// 591 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4e,
0x14, 0xfc, 0x99, 0xd8, 0x09, 0x7e, 0x86, 0x5f, 0xad, 0x15, 0x07, 0xab, 0x2d, 0x25, 0xcd, 0x89,
0xf6, 0x10, 0x54, 0x04, 0xaa, 0x44, 0xdb, 0x03, 0x50, 0x1a, 0x2a, 0xd5, 0x40, 0x37, 0xc9, 0x81,
0x5e, 0xac, 0x8d, 0x59, 0x25, 0x56, 0xbd, 0xb6, 0x6b, 0xef, 0x22, 0xe5, 0xdc, 0x43, 0xbf, 0x47,
0xbf, 0x68, 0xab, 0xfd, 0xe3, 0x18, 0x24, 0xc3, 0xa9, 0xb7, 0xb7, 0xf3, 0x66, 0xde, 0x8e, 0x77,
0xc7, 0x0b, 0x9b, 0x8c, 0xf2, 0x32, 0x89, 0xab, 0x61, 0x51, 0xe6, 0x3c, 0x47, 0x5b, 0x49, 0x2e,
0x2b, 0x46, 0xf9, 0x82, 0x8a, 0x6a, 0x18, 0xa7, 0x09, 0xcd, 0xf8, 0xe0, 0x10, 0xdc, 0x2f, 0x64,
0x46, 0xd3, 0x2b, 0x92, 0x94, 0x08, 0x81, 0x9d, 0x11, 0x46, 0x03, 0xab, 0x6f, 0xed, 0xba, 0x58,
0xd5, 0x68, 0x0b, 0x9c, 0x5b, 0x92, 0x0a, 0x1a, 0xac, 0x29, 0x50, 0x2f, 0x06, 0xdb, 0xe0, 0x8c,
0x88, 0x98, 0xdf, 0x69, 0x4b, 0x8d, 0x55, 0xb7, 0x77, 0xa0, 0x77, 0x9a, 0x8b, 0x8c, 0xd3, 0xf2,
0x01, 0xc2, 0x7b, 0x58, 0xff, 0x2a, 0x48, 0xc6, 0x93, 0x94, 0xa2, 0xa7, 0xb0, 0xfe, 0xc3, 0xd4,
0x86, 0xb4, 0x5a, 0xdf, 0xdf, 0x7d, 0xa5, 0xfe, 0x65, 0x41, 0x6f, 0x2c, 0x18, 0x23, 0xe5, 0x12,
0xbd, 0x84, 0x8d, 0x8a, 0xb0, 0x22, 0xa5, 0x51, 0x2c, 0x77, 0x54, 0x13, 0x6c, 0xec, 0x69, 0x4c,
0x99, 0x40, 0xdb, 0x00, 0x86, 0x52, 0x09, 0x66, 0x26, 0xb9, 0x1a, 0x19, 0x0b, 0x86, 0x8e, 0xee,
0xec, 0xdf, 0xe9, 0x77, 0x76, 0xbd, 0xfd, 0x17, 0xc3, 0xb6, 0xb3, 0x1a, 0xd6, 0x8e, 0x1b, 0x7f,
0xf2, 0x43, 0xa7, 0x19, 0x5f, 0x16, 0xf4, 0xe6, 0x81, 0x0f, 0xfd, 0x69, 0x81, 0x7b, 0x9e, 0x54,
0x3c, 0x9f, 0x97, 0x84, 0xfd, 0x03, 0xb3, 0x07, 0xd0, 0x9d, 0x89, 0xf8, 0x3b, 0xe5, 0xc6, 0xea,
0xf3, 0x76, 0xab, 0x27, 0x8a, 0x83, 0x0d, 0x77, 0x30, 0x81, 0xae, 0x46, 0xd0, 0x2b, 0xf0, 0x63,
0xc1, 0x44, 0x4a, 0x78, 0x72, 0x7b, 0xdf, 0xc5, 0x93, 0x06, 0xd7, 0x4e, 0x76, 0xc0, 0x13, 0x45,
0x41, 0xcb, 0x68, 0x96, 0x8b, 0xec, 0xc6, 0x58, 0x01, 0x05, 0x9d, 0x48, 0x64, 0xf0, 0x67, 0x0d,
0xba, 0xa1, 0xca, 0x18, 0x3a, 0x04, 0x27, 0x95, 0x31, 0x0a, 0x2c, 0xe5, 0x6a, 0xa7, 0xdd, 0xd5,
0x2a, 0x69, 0x58, 0xb3, 0xd1, 0x1b, 0x70, 0xe6, 0x32, 0x46, 0x6a, 0xb8, 0xb7, 0xff, 0xac, 0x5d,
0xa6, 0x92, 0x86, 0x35, 0x13, 0xbd, 0x85, 0x5e, 0xac, 0xa3, 0x15, 0x74, 0x94, 0x68, 0xbb, 0x5d,
0x64, 0xf2, 0x87, 0x6b, 0xb6, 0x14, 0x56, 0x3a, 0x33, 0x81, 0xfd, 0x98, 0xd0, 0x04, 0x0b, 0xd7,
0x6c, 0x29, 0x14, 0xfa, 0x8e, 0x03, 0xe7, 0x31, 0xa1, 0x09, 0x02, 0xae, 0xd9, 0xe8, 0x03, 0xb8,
0x8b, 0xfa, 0xea, 0x83, 0x9e, 0x92, 0x3e, 0x70, 0x30, 0xab, 0x84, 0xe0, 0x46, 0x21, 0xc3, 0xc2,
0x13, 0x46, 0x2b, 0x4e, 0x58, 0x11, 0xb1, 0x2a, 0xe8, 0xf6, 0xad, 0xdd, 0x0e, 0xf6, 0x56, 0x58,
0x58, 0x0d, 0x7e, 0x5b, 0xb0, 0xa1, 0x6f, 0xe0, 0x13, 0x61, 0x49, 0xba, 0x6c, 0xfd, 0x83, 0x11,
0xd8, 0x0b, 0x9a, 0x16, 0xe6, 0x07, 0x56, 0x35, 0x3a, 0x00, 0x5b, 0x7a, 0x54, 0x47, 0xf8, 0xff,
0x7e, 0xbf, 0xdd, 0x95, 0x9e, 0x3c, 0x59, 0x16, 0x14, 0x2b, 0xb6, 0x0c, 0x9f, 0x7e, 0x53, 0x02,
0xfb, 0xb1, 0xf0, 0x69, 0x1d, 0x36, 0xdc, 0xd7, 0x21, 0x40, 0x33, 0x09, 0x79, 0xd0, 0x3b, 0xbd,
0x9c, 0x5e, 0x4c, 0xce, 0xb0, 0xff, 0x1f, 0x72, 0xc1, 0x19, 0x1d, 0x4f, 0x47, 0x67, 0xbe, 0x25,
0xf1, 0xf1, 0x34, 0x0c, 0x8f, 0xf1, 0xb5, 0xbf, 0x26, 0x17, 0xd3, 0x8b, 0xc9, 0xf5, 0xd5, 0xd9,
0x47, 0xbf, 0x83, 0x36, 0xc1, 0x3d, 0xff, 0x3c, 0x9e, 0x5c, 0x8e, 0xf0, 0x71, 0xe8, 0xdb, 0x27,
0x18, 0x5a, 0x5f, 0xb2, 0x6f, 0x47, 0xf3, 0x84, 0x2f, 0xc4, 0x6c, 0x18, 0xe7, 0x6c, 0xaf, 0xe9,
0xee, 0xe9, 0x6e, 0xc4, 0xf2, 0x1b, 0x9a, 0xee, 0xcd, 0xf3, 0x77, 0x49, 0x1e, 0x35, 0xdd, 0x48,
0x77, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0x21, 0x7f, 0x64, 0x2b, 0x05, 0x00, 0x00,
var fileDescriptor_6039342a2ba47b72 = []byte{
// 665 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c,
0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55,
0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2,
0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e,
0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa,
0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66,
0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4,
0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45,
0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a,
0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d,
0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b,
0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22,
0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79,
0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0,
0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00,
0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01,
0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe,
0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55,
0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f,
0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31,
0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16,
0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e,
0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c,
0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f,
0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57,
0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64,
0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76,
0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7,
0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95,
0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed,
0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33,
0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07,
0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72,
0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56,
0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6,
0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f,
0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f,
0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27,
0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83,
0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24,
0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff,
0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00,
}

View File

@ -30,17 +30,38 @@ type Encoder interface {
Encode(*dto.MetricFamily) error
}
type encoder func(*dto.MetricFamily) error
func (e encoder) Encode(v *dto.MetricFamily) error {
return e(v)
// Closer is implemented by Encoders that need to be closed to finalize
// encoding. (For example, OpenMetrics needs a final `# EOF` line.)
//
// Note that all Encoder implementations returned from this package implement
// Closer, too, even if the Close call is a no-op. This happens in preparation
// for adding a Close method to the Encoder interface directly in a (mildly
// breaking) release in the future.
type Closer interface {
Close() error
}
// Negotiate returns the Content-Type based on the given Accept header.
// If no appropriate accepted type is found, FmtText is returned.
type encoderCloser struct {
encode func(*dto.MetricFamily) error
close func() error
}
func (ec encoderCloser) Encode(v *dto.MetricFamily) error {
return ec.encode(v)
}
func (ec encoderCloser) Close() error {
return ec.close()
}
// Negotiate returns the Content-Type based on the given Accept header. If no
// appropriate accepted type is found, FmtText is returned (which is the
// Prometheus text format). This function will never negotiate FmtOpenMetrics,
// as the support is still experimental. To include the option to negotiate
// FmtOpenMetrics, use NegotiateOpenMetrics.
func Negotiate(h http.Header) Format {
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
// Check for protocol buffer
ver := ac.Params["version"]
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
switch ac.Params["encoding"] {
case "delimited":
@ -51,8 +72,6 @@ func Negotiate(h http.Header) Format {
return FmtProtoCompact
}
}
// Check for text format.
ver := ac.Params["version"]
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
return FmtText
}
@ -60,29 +79,84 @@ func Negotiate(h http.Header) Format {
return FmtText
}
// NewEncoder returns a new encoder based on content type negotiation.
// NegotiateIncludingOpenMetrics works like Negotiate but includes
// FmtOpenMetrics as an option for the result. Note that this function is
// temporary and will disappear once FmtOpenMetrics is fully supported and as
// such may be negotiated by the normal Negotiate function.
func NegotiateIncludingOpenMetrics(h http.Header) Format {
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
ver := ac.Params["version"]
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
switch ac.Params["encoding"] {
case "delimited":
return FmtProtoDelim
case "text":
return FmtProtoText
case "compact-text":
return FmtProtoCompact
}
}
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
return FmtText
}
if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") {
return FmtOpenMetrics
}
}
return FmtText
}
// NewEncoder returns a new encoder based on content type negotiation. All
// Encoder implementations returned by NewEncoder also implement Closer, and
// callers should always call the Close method. It is currently only required
// for FmtOpenMetrics, but a future (breaking) release will add the Close method
// to the Encoder interface directly. The current version of the Encoder
// interface is kept for backwards compatibility.
func NewEncoder(w io.Writer, format Format) Encoder {
switch format {
case FmtProtoDelim:
return encoder(func(v *dto.MetricFamily) error {
_, err := pbutil.WriteDelimited(w, v)
return err
})
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := pbutil.WriteDelimited(w, v)
return err
},
close: func() error { return nil },
}
case FmtProtoCompact:
return encoder(func(v *dto.MetricFamily) error {
_, err := fmt.Fprintln(w, v.String())
return err
})
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := fmt.Fprintln(w, v.String())
return err
},
close: func() error { return nil },
}
case FmtProtoText:
return encoder(func(v *dto.MetricFamily) error {
_, err := fmt.Fprintln(w, proto.MarshalTextString(v))
return err
})
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := fmt.Fprintln(w, proto.MarshalTextString(v))
return err
},
close: func() error { return nil },
}
case FmtText:
return encoder(func(v *dto.MetricFamily) error {
_, err := MetricFamilyToText(w, v)
return err
})
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := MetricFamilyToText(w, v)
return err
},
close: func() error { return nil },
}
case FmtOpenMetrics:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := MetricFamilyToOpenMetrics(w, v)
return err
},
close: func() error {
_, err := FinalizeOpenMetrics(w)
return err
},
}
}
panic("expfmt.NewEncoder: unknown format")
panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format))
}

View File

@ -19,10 +19,12 @@ type Format string
// Constants to assemble the Content-Type values for the different wire protocols.
const (
TextVersion = "0.0.4"
ProtoType = `application/vnd.google.protobuf`
ProtoProtocol = `io.prometheus.client.MetricFamily`
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
TextVersion = "0.0.4"
ProtoType = `application/vnd.google.protobuf`
ProtoProtocol = `io.prometheus.client.MetricFamily`
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
OpenMetricsType = `application/openmetrics-text`
OpenMetricsVersion = "0.0.1"
// The Content-Type values for the different wire protocols.
FmtUnknown Format = `<unknown>`
@ -30,6 +32,7 @@ const (
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
FmtProtoText Format = ProtoFmt + ` encoding=text`
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8`
)
const (

View File

@ -0,0 +1,527 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package expfmt
import (
"bufio"
"bytes"
"fmt"
"io"
"math"
"strconv"
"strings"
"github.com/golang/protobuf/ptypes"
"github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go"
)
// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the
// OpenMetrics text format and writes the resulting lines to 'out'. It returns
// the number of bytes written and any error encountered. The output will have
// the same order as the input, no further sorting is performed. Furthermore,
// this function assumes the input is already sanitized and does not perform any
// sanity checks. If the input contains duplicate metrics or invalid metric or
// label names, the conversion will result in invalid text format output.
//
// This function fulfills the type 'expfmt.encoder'.
//
// Note that OpenMetrics requires a final `# EOF` line. Since this function acts
// on individual metric families, it is the responsibility of the caller to
// append this line to 'out' once all metric families have been written.
// Conveniently, this can be done by calling FinalizeOpenMetrics.
//
// The output should be fully OpenMetrics compliant. However, there are a few
// missing features and peculiarities to avoid complications when switching from
// Prometheus to OpenMetrics or vice versa:
//
// - Counters are expected to have the `_total` suffix in their metric name. In
// the output, the suffix will be truncated from the `# TYPE` and `# HELP`
// line. A counter with a missing `_total` suffix is not an error. However,
// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
// output.
//
// - No support for the following (optional) features: `# UNIT` line, `_created`
// line, info type, stateset type, gaugehistogram type.
//
// - The size of exemplar labels is not checked (i.e. it's possible to create
// exemplars that are larger than allowed by the OpenMetrics specification).
//
// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
// with a `NaN` value.)
func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) {
name := in.GetName()
if name == "" {
return 0, fmt.Errorf("MetricFamily has no name: %s", in)
}
// Try the interface upgrade. If it doesn't work, we'll use a
// bufio.Writer from the sync.Pool.
w, ok := out.(enhancedWriter)
if !ok {
b := bufPool.Get().(*bufio.Writer)
b.Reset(out)
w = b
defer func() {
bErr := b.Flush()
if err == nil {
err = bErr
}
bufPool.Put(b)
}()
}
var (
n int
metricType = in.GetType()
shortName = name
)
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") {
shortName = name[:len(name)-6]
}
// Comments, first HELP, then TYPE.
if in.Help != nil {
n, err = w.WriteString("# HELP ")
written += n
if err != nil {
return
}
n, err = w.WriteString(shortName)
written += n
if err != nil {
return
}
err = w.WriteByte(' ')
written++
if err != nil {
return
}
n, err = writeEscapedString(w, *in.Help, true)
written += n
if err != nil {
return
}
err = w.WriteByte('\n')
written++
if err != nil {
return
}
}
n, err = w.WriteString("# TYPE ")
written += n
if err != nil {
return
}
n, err = w.WriteString(shortName)
written += n
if err != nil {
return
}
switch metricType {
case dto.MetricType_COUNTER:
if strings.HasSuffix(name, "_total") {
n, err = w.WriteString(" counter\n")
} else {
n, err = w.WriteString(" unknown\n")
}
case dto.MetricType_GAUGE:
n, err = w.WriteString(" gauge\n")
case dto.MetricType_SUMMARY:
n, err = w.WriteString(" summary\n")
case dto.MetricType_UNTYPED:
n, err = w.WriteString(" unknown\n")
case dto.MetricType_HISTOGRAM:
n, err = w.WriteString(" histogram\n")
default:
return written, fmt.Errorf("unknown metric type %s", metricType.String())
}
written += n
if err != nil {
return
}
// Finally the samples, one line for each.
for _, metric := range in.Metric {
switch metricType {
case dto.MetricType_COUNTER:
if metric.Counter == nil {
return written, fmt.Errorf(
"expected counter in metric %s %s", name, metric,
)
}
// Note that we have ensured above that either the name
// ends on `_total` or that the rendered type is
// `unknown`. Therefore, no `_total` must be added here.
n, err = writeOpenMetricsSample(
w, name, "", metric, "", 0,
metric.Counter.GetValue(), 0, false,
metric.Counter.Exemplar,
)
case dto.MetricType_GAUGE:
if metric.Gauge == nil {
return written, fmt.Errorf(
"expected gauge in metric %s %s", name, metric,
)
}
n, err = writeOpenMetricsSample(
w, name, "", metric, "", 0,
metric.Gauge.GetValue(), 0, false,
nil,
)
case dto.MetricType_UNTYPED:
if metric.Untyped == nil {
return written, fmt.Errorf(
"expected untyped in metric %s %s", name, metric,
)
}
n, err = writeOpenMetricsSample(
w, name, "", metric, "", 0,
metric.Untyped.GetValue(), 0, false,
nil,
)
case dto.MetricType_SUMMARY:
if metric.Summary == nil {
return written, fmt.Errorf(
"expected summary in metric %s %s", name, metric,
)
}
for _, q := range metric.Summary.Quantile {
n, err = writeOpenMetricsSample(
w, name, "", metric,
model.QuantileLabel, q.GetQuantile(),
q.GetValue(), 0, false,
nil,
)
written += n
if err != nil {
return
}
}
n, err = writeOpenMetricsSample(
w, name, "_sum", metric, "", 0,
metric.Summary.GetSampleSum(), 0, false,
nil,
)
written += n
if err != nil {
return
}
n, err = writeOpenMetricsSample(
w, name, "_count", metric, "", 0,
0, metric.Summary.GetSampleCount(), true,
nil,
)
case dto.MetricType_HISTOGRAM:
if metric.Histogram == nil {
return written, fmt.Errorf(
"expected histogram in metric %s %s", name, metric,
)
}
infSeen := false
for _, b := range metric.Histogram.Bucket {
n, err = writeOpenMetricsSample(
w, name, "_bucket", metric,
model.BucketLabel, b.GetUpperBound(),
0, b.GetCumulativeCount(), true,
b.Exemplar,
)
written += n
if err != nil {
return
}
if math.IsInf(b.GetUpperBound(), +1) {
infSeen = true
}
}
if !infSeen {
n, err = writeOpenMetricsSample(
w, name, "_bucket", metric,
model.BucketLabel, math.Inf(+1),
0, metric.Histogram.GetSampleCount(), true,
nil,
)
written += n
if err != nil {
return
}
}
n, err = writeOpenMetricsSample(
w, name, "_sum", metric, "", 0,
metric.Histogram.GetSampleSum(), 0, false,
nil,
)
written += n
if err != nil {
return
}
n, err = writeOpenMetricsSample(
w, name, "_count", metric, "", 0,
0, metric.Histogram.GetSampleCount(), true,
nil,
)
default:
return written, fmt.Errorf(
"unexpected type in metric %s %s", name, metric,
)
}
written += n
if err != nil {
return
}
}
return
}
// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics.
func FinalizeOpenMetrics(w io.Writer) (written int, err error) {
return w.Write([]byte("# EOF\n"))
}
// writeOpenMetricsSample writes a single sample in OpenMetrics text format to
// w, given the metric name, the metric proto message itself, optionally an
// additional label name with a float64 value (use empty string as label name if
// not required), the value (optionally as float64 or uint64, determined by
// useIntValue), and optionally an exemplar (use nil if not required). The
// function returns the number of bytes written and any error encountered.
func writeOpenMetricsSample(
w enhancedWriter,
name, suffix string,
metric *dto.Metric,
additionalLabelName string, additionalLabelValue float64,
floatValue float64, intValue uint64, useIntValue bool,
exemplar *dto.Exemplar,
) (int, error) {
var written int
n, err := w.WriteString(name)
written += n
if err != nil {
return written, err
}
if suffix != "" {
n, err = w.WriteString(suffix)
written += n
if err != nil {
return written, err
}
}
n, err = writeOpenMetricsLabelPairs(
w, metric.Label, additionalLabelName, additionalLabelValue,
)
written += n
if err != nil {
return written, err
}
err = w.WriteByte(' ')
written++
if err != nil {
return written, err
}
if useIntValue {
n, err = writeUint(w, intValue)
} else {
n, err = writeOpenMetricsFloat(w, floatValue)
}
written += n
if err != nil {
return written, err
}
if metric.TimestampMs != nil {
err = w.WriteByte(' ')
written++
if err != nil {
return written, err
}
// TODO(beorn7): Format this directly without converting to a float first.
n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000)
written += n
if err != nil {
return written, err
}
}
if exemplar != nil {
n, err = writeExemplar(w, exemplar)
written += n
if err != nil {
return written, err
}
}
err = w.WriteByte('\n')
written++
if err != nil {
return written, err
}
return written, nil
}
// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float
// in OpenMetrics style.
func writeOpenMetricsLabelPairs(
w enhancedWriter,
in []*dto.LabelPair,
additionalLabelName string, additionalLabelValue float64,
) (int, error) {
if len(in) == 0 && additionalLabelName == "" {
return 0, nil
}
var (
written int
separator byte = '{'
)
for _, lp := range in {
err := w.WriteByte(separator)
written++
if err != nil {
return written, err
}
n, err := w.WriteString(lp.GetName())
written += n
if err != nil {
return written, err
}
n, err = w.WriteString(`="`)
written += n
if err != nil {
return written, err
}
n, err = writeEscapedString(w, lp.GetValue(), true)
written += n
if err != nil {
return written, err
}
err = w.WriteByte('"')
written++
if err != nil {
return written, err
}
separator = ','
}
if additionalLabelName != "" {
err := w.WriteByte(separator)
written++
if err != nil {
return written, err
}
n, err := w.WriteString(additionalLabelName)
written += n
if err != nil {
return written, err
}
n, err = w.WriteString(`="`)
written += n
if err != nil {
return written, err
}
n, err = writeOpenMetricsFloat(w, additionalLabelValue)
written += n
if err != nil {
return written, err
}
err = w.WriteByte('"')
written++
if err != nil {
return written, err
}
}
err := w.WriteByte('}')
written++
if err != nil {
return written, err
}
return written, nil
}
// writeExemplar writes the provided exemplar in OpenMetrics format to w. The
// function returns the number of bytes written and any error encountered.
func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
written := 0
n, err := w.WriteString(" # ")
written += n
if err != nil {
return written, err
}
n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0)
written += n
if err != nil {
return written, err
}
err = w.WriteByte(' ')
written++
if err != nil {
return written, err
}
n, err = writeOpenMetricsFloat(w, e.GetValue())
written += n
if err != nil {
return written, err
}
if e.Timestamp != nil {
err = w.WriteByte(' ')
written++
if err != nil {
return written, err
}
ts, err := ptypes.Timestamp((*e).Timestamp)
if err != nil {
return written, err
}
// TODO(beorn7): Format this directly from components of ts to
// avoid overflow/underflow and precision issues of the float
// conversion.
n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9)
written += n
if err != nil {
return written, err
}
}
return written, nil
}
// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting
// number would otherwise contain neither a "." nor an "e".
func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) {
switch {
case f == 1:
return w.WriteString("1.0")
case f == 0:
return w.WriteString("0.0")
case f == -1:
return w.WriteString("-1.0")
case math.IsNaN(f):
return w.WriteString("NaN")
case math.IsInf(f, +1):
return w.WriteString("+Inf")
case math.IsInf(f, -1):
return w.WriteString("-Inf")
default:
bp := numBufPool.Get().(*[]byte)
*bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
if !bytes.ContainsAny(*bp, "e.") {
*bp = append(*bp, '.', '0')
}
written, err := w.Write(*bp)
numBufPool.Put(bp)
return written, err
}
}
// writeUint is like writeInt just for uint64.
func writeUint(w enhancedWriter, u uint64) (int, error) {
bp := numBufPool.Get().(*[]byte)
*bp = strconv.AppendUint((*bp)[:0], u, 10)
written, err := w.Write(*bp)
numBufPool.Put(bp)
return written, err
}

View File

@ -14,9 +14,10 @@
package expfmt
import (
"bytes"
"bufio"
"fmt"
"io"
"io/ioutil"
"math"
"strconv"
"strings"
@ -27,7 +28,7 @@ import (
dto "github.com/prometheus/client_model/go"
)
// enhancedWriter has all the enhanced write functions needed here. bytes.Buffer
// enhancedWriter has all the enhanced write functions needed here. bufio.Writer
// implements it.
type enhancedWriter interface {
io.Writer
@ -37,14 +38,13 @@ type enhancedWriter interface {
}
const (
initialBufSize = 512
initialNumBufSize = 24
)
var (
bufPool = sync.Pool{
New: func() interface{} {
return bytes.NewBuffer(make([]byte, 0, initialBufSize))
return bufio.NewWriter(ioutil.Discard)
},
}
numBufPool = sync.Pool{
@ -75,16 +75,14 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e
}
// Try the interface upgrade. If it doesn't work, we'll use a
// bytes.Buffer from the sync.Pool and write out its content to out in a
// single go in the end.
// bufio.Writer from the sync.Pool.
w, ok := out.(enhancedWriter)
if !ok {
b := bufPool.Get().(*bytes.Buffer)
b.Reset()
b := bufPool.Get().(*bufio.Writer)
b.Reset(out)
w = b
defer func() {
bWritten, bErr := out.Write(b.Bytes())
written = bWritten
bErr := b.Flush()
if err == nil {
err = bErr
}
@ -425,9 +423,8 @@ var (
func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) {
if includeDoubleQuote {
return quotedEscaper.WriteString(w, v)
} else {
return escaper.WriteString(w, v)
}
return escaper.WriteString(w, v)
}
// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes

View File

@ -325,7 +325,7 @@ func (p *TextParser) startLabelValue() stateFn {
// - Other labels have to be added to currentLabels for signature calculation.
if p.currentMF.GetType() == dto.MetricType_SUMMARY {
if p.currentLabelPair.GetName() == model.QuantileLabel {
if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
// Create a more helpful error message.
p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
return nil
@ -337,7 +337,7 @@ func (p *TextParser) startLabelValue() stateFn {
// Similar special treatment of histograms.
if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
if p.currentLabelPair.GetName() == model.BucketLabel {
if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
// Create a more helpful error message.
p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
return nil
@ -392,7 +392,7 @@ func (p *TextParser) readingValue() stateFn {
if p.readTokenUntilWhitespace(); p.err != nil {
return nil // Unexpected end of input.
}
value, err := strconv.ParseFloat(p.currentToken.String(), 64)
value, err := parseFloat(p.currentToken.String())
if err != nil {
// Create a more helpful error message.
p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
@ -755,3 +755,10 @@ func histogramMetricName(name string) string {
return name
}
}
func parseFloat(s string) (float64, error) {
if strings.ContainsAny(s, "pP_") {
return 0, fmt.Errorf("unsupported character in float")
}
return strconv.ParseFloat(s, 64)
}

View File

@ -1,6 +1,4 @@
# Run only staticcheck for now. Additional linters will be enabled one-by-one.
linters:
enable:
- staticcheck
- govet
disable-all: true

View File

@ -2,17 +2,120 @@
Prometheus uses GitHub to manage reviews of pull requests.
* If you are a new contributor see: [Steps to Contribute](#steps-to-contribute)
* If you have a trivial fix or improvement, go ahead and create a pull request,
addressing (with `@...`) the maintainer of this repository (see
addressing (with `@...`) a suitable maintainer of this repository (see
[MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.
* If you plan to do something more involved, first discuss your ideas
on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
This will avoid unnecessary work and surely give you and us a good deal
of inspiration.
of inspiration. Also please see our [non-goals issue](https://github.com/prometheus/docs/issues/149) on areas that the Prometheus community doesn't plan to work on.
* Relevant coding style guidelines are the [Go Code Review
Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
and the _Formatting and style_ section of Peter Bourgon's [Go: Best
Practices for Production
Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
Environments](https://peter.bourgon.org/go-in-production/#formatting-and-style).
* Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works)
## Steps to Contribute
Should you wish to work on an issue, please claim it first by commenting on the GitHub issue that you want to work on it. This is to prevent duplicated efforts from contributors on the same issue.
Please check the [`help-wanted`](https://github.com/prometheus/procfs/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) label to find issues that are good for getting started. If you have questions about one of the issues, with or without the tag, please comment on them and one of the maintainers will clarify it. For a quicker response, contact us over [IRC](https://prometheus.io/community).
For quickly compiling and testing your changes do:
```
make test # Make sure all the tests pass before you commit and push :)
```
We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action.
## Pull Request Checklist
* Branch from the master branch and, if needed, rebase to the current master branch before submitting your pull request. If it doesn't merge cleanly with master you may be asked to rebase your changes.
* Commits should be as small as possible, while ensuring that each commit is correct independently (i.e., each commit should compile and pass tests).
* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on IRC channel [#prometheus](https://webchat.freenode.net/?channels=#prometheus) on irc.freenode.net (for the easiest start, [join via Riot](https://riot.im/app/#/room/#prometheus:matrix.org)).
* Add tests relevant to the fixed bug or new feature.
## Dependency management
The Prometheus project uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies on external packages. This requires a working Go environment with version 1.12 or greater installed.
All dependencies are vendored in the `vendor/` directory.
To add or update a new dependency, use the `go get` command:
```bash
# Pick the latest tagged release.
go get example.com/some/module/pkg
# Pick a specific version.
go get example.com/some/module/pkg@vX.Y.Z
```
Tidy up the `go.mod` and `go.sum` files and copy the new/updated dependency to the `vendor/` directory:
```bash
# The GO111MODULE variable can be omitted when the code isn't located in GOPATH.
GO111MODULE=on go mod tidy
GO111MODULE=on go mod vendor
```
You have to commit the changes to `go.mod`, `go.sum` and the `vendor/` directory before submitting the pull request.
## API Implementation Guidelines
### Naming and Documentation
Public functions and structs should normally be named according to the file(s) being read and parsed. For example,
the `fs.BuddyInfo()` function reads the file `/proc/buddyinfo`. In addition, the godoc for each public function
should contain the path to the file(s) being read and a URL of the linux kernel documentation describing the file(s).
### Reading vs. Parsing
Most functionality in this library consists of reading files and then parsing the text into structured data. In most
cases reading and parsing should be separated into different functions/methods with a public `fs.Thing()` method and
a private `parseThing(r Reader)` function. This provides a logical separation and allows parsing to be tested
directly without the need to read from the filesystem. Using a `Reader` argument is preferred over other data types
such as `string` or `*File` because it provides the most flexibility regarding the data source. When a set of files
in a directory needs to be parsed, then a `path` string parameter to the parse function can be used instead.
### /proc and /sys filesystem I/O
The `proc` and `sys` filesystems are pseudo file systems and work a bit differently from standard disk I/O.
Many of the files are changing continuously and the data being read can in some cases change between subsequent
reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls
to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the
full file in a single operation using an internal utility function called `util.ReadFileNoStat`.
This function is similar to `ioutil.ReadFile`, but it avoids the system call to `stat` to get the current size of
the file.
Note that parsing the file's contents can still be performed one line at a time. This is done by first reading
the full file, and then using a scanner on the `[]byte` or `string` containing the data.
```
data, err := util.ReadFileNoStat("/proc/cpuinfo")
if err != nil {
return err
}
reader := bytes.NewReader(data)
scanner := bufio.NewScanner(reader)
```
The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files
can be read using an internal function called `util.SysReadFile` which is similar to `ioutil.ReadFile` but does
not bother to check the size of the file before reading.
```
data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity")
```

View File

@ -86,6 +86,7 @@ endif
PREFIX ?= $(shell pwd)
BIN_DIR ?= $(shell pwd)
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
DOCKERFILE_PATH ?= ./
DOCKER_REPO ?= prom
DOCKER_ARCHS ?= amd64
@ -212,7 +213,7 @@ $(BUILD_DOCKER_ARCHS): common-docker-%:
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
--build-arg ARCH="$*" \
--build-arg OS="linux" \
.
$(DOCKERFILE_PATH)
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
@ -247,7 +248,9 @@ proto:
ifdef GOLANGCI_LINT
$(GOLANGCI_LINT):
mkdir -p $(FIRST_GOPATH)/bin
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \
| sed -e '/install -d/d' \
| sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
endif
ifdef GOVENDOR

View File

@ -1,6 +1,6 @@
# procfs
This procfs package provides functions to retrieve system, kernel and process
This package provides functions to retrieve system, kernel, and process
metrics from the pseudo-filesystems /proc and /sys.
*WARNING*: This package is a work in progress. Its API may still break in
@ -13,7 +13,8 @@ backwards-incompatible ways without warnings. Use it at your own risk.
## Usage
The procfs library is organized by packages based on whether the gathered data is coming from
/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc, /sys, or both. For example, current cpu statistics are gathered from
/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc,
/sys, or both. For example, cpu statistics are gathered from
`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount
point is initialized, and then the stat information is read.
@ -29,10 +30,17 @@ Some sub-packages such as `blockdevice`, require access to both the proc and sys
stats, err := fs.ProcDiskstats()
```
## Package Organization
The packages in this project are organized according to (1) whether the data comes from the `/proc` or
`/sys` filesystem and (2) the type of information being retrieved. For example, most process information
can be gathered from the functions in the root `procfs` package. Information about block devices such as disk drives
is available in the `blockdevices` sub-package.
## Building and Testing
The procfs library is normally built as part of another application. However, when making
changes to the library, the `make test` command can be used to run the API test suite.
The procfs library is intended to be built as part of another application, so there are no distributable binaries.
However, most of the API includes unit tests which can be run with `make test`.
### Updating Test Fixtures

85
vendor/github.com/prometheus/procfs/arp.go generated vendored Normal file
View File

@ -0,0 +1,85 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"fmt"
"io/ioutil"
"net"
"strings"
)
// ARPEntry contains a single row of the columnar data represented in
// /proc/net/arp.
type ARPEntry struct {
// IP address
IPAddr net.IP
// MAC address
HWAddr net.HardwareAddr
// Name of the device
Device string
}
// GatherARPEntries retrieves all the ARP entries, parse the relevant columns,
// and then return a slice of ARPEntry's.
func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
data, err := ioutil.ReadFile(fs.proc.Path("net/arp"))
if err != nil {
return nil, fmt.Errorf("error reading arp %s: %s", fs.proc.Path("net/arp"), err)
}
return parseARPEntries(data)
}
func parseARPEntries(data []byte) ([]ARPEntry, error) {
lines := strings.Split(string(data), "\n")
entries := make([]ARPEntry, 0)
var err error
const (
expectedDataWidth = 6
expectedHeaderWidth = 9
)
for _, line := range lines {
columns := strings.Fields(line)
width := len(columns)
if width == expectedHeaderWidth || width == 0 {
continue
} else if width == expectedDataWidth {
entry, err := parseARPEntry(columns)
if err != nil {
return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %s", err)
}
entries = append(entries, entry)
} else {
return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth)
}
}
return entries, err
}
func parseARPEntry(columns []string) (ARPEntry, error) {
ip := net.ParseIP(columns[0])
mac := net.HardwareAddr(columns[3])
entry := ARPEntry{
IPAddr: ip,
HWAddr: mac,
Device: columns[5],
}
return entry, nil
}

View File

@ -31,7 +31,7 @@ type BuddyInfo struct {
Sizes []float64
}
// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
func (fs FS) BuddyInfo() ([]BuddyInfo, error) {
file, err := os.Open(fs.proc.Path("buddyinfo"))
if err != nil {

167
vendor/github.com/prometheus/procfs/cpuinfo.go generated vendored Normal file
View File

@ -0,0 +1,167 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// CPUInfo contains general information about a system CPU found in /proc/cpuinfo
type CPUInfo struct {
Processor uint
VendorID string
CPUFamily string
Model string
ModelName string
Stepping string
Microcode string
CPUMHz float64
CacheSize string
PhysicalID string
Siblings uint
CoreID string
CPUCores uint
APICID string
InitialAPICID string
FPU string
FPUException string
CPUIDLevel uint
WP string
Flags []string
Bugs []string
BogoMips float64
CLFlushSize uint
CacheAlignment uint
AddressSizes string
PowerManagement string
}
// CPUInfo returns information about current system CPUs.
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
func (fs FS) CPUInfo() ([]CPUInfo, error) {
data, err := util.ReadFileNoStat(fs.proc.Path("cpuinfo"))
if err != nil {
return nil, err
}
return parseCPUInfo(data)
}
// parseCPUInfo parses data from /proc/cpuinfo
func parseCPUInfo(info []byte) ([]CPUInfo, error) {
cpuinfo := []CPUInfo{}
i := -1
scanner := bufio.NewScanner(bytes.NewReader(info))
for scanner.Scan() {
line := scanner.Text()
if strings.TrimSpace(line) == "" {
continue
}
field := strings.SplitN(line, ": ", 2)
switch strings.TrimSpace(field[0]) {
case "processor":
cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
i++
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].Processor = uint(v)
case "vendor_id":
cpuinfo[i].VendorID = field[1]
case "cpu family":
cpuinfo[i].CPUFamily = field[1]
case "model":
cpuinfo[i].Model = field[1]
case "model name":
cpuinfo[i].ModelName = field[1]
case "stepping":
cpuinfo[i].Stepping = field[1]
case "microcode":
cpuinfo[i].Microcode = field[1]
case "cpu MHz":
v, err := strconv.ParseFloat(field[1], 64)
if err != nil {
return nil, err
}
cpuinfo[i].CPUMHz = v
case "cache size":
cpuinfo[i].CacheSize = field[1]
case "physical id":
cpuinfo[i].PhysicalID = field[1]
case "siblings":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].Siblings = uint(v)
case "core id":
cpuinfo[i].CoreID = field[1]
case "cpu cores":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].CPUCores = uint(v)
case "apicid":
cpuinfo[i].APICID = field[1]
case "initial apicid":
cpuinfo[i].InitialAPICID = field[1]
case "fpu":
cpuinfo[i].FPU = field[1]
case "fpu_exception":
cpuinfo[i].FPUException = field[1]
case "cpuid level":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].CPUIDLevel = uint(v)
case "wp":
cpuinfo[i].WP = field[1]
case "flags":
cpuinfo[i].Flags = strings.Fields(field[1])
case "bugs":
cpuinfo[i].Bugs = strings.Fields(field[1])
case "bogomips":
v, err := strconv.ParseFloat(field[1], 64)
if err != nil {
return nil, err
}
cpuinfo[i].BogoMips = v
case "clflush size":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].CLFlushSize = uint(v)
case "cache_alignment":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].CacheAlignment = uint(v)
case "address sizes":
cpuinfo[i].AddressSizes = field[1]
case "power management":
cpuinfo[i].PowerManagement = field[1]
}
}
return cpuinfo, nil
}

131
vendor/github.com/prometheus/procfs/crypto.go generated vendored Normal file
View File

@ -0,0 +1,131 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bytes"
"fmt"
"io/ioutil"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// Crypto holds info parsed from /proc/crypto.
type Crypto struct {
Alignmask *uint64
Async bool
Blocksize *uint64
Chunksize *uint64
Ctxsize *uint64
Digestsize *uint64
Driver string
Geniv string
Internal string
Ivsize *uint64
Maxauthsize *uint64
MaxKeysize *uint64
MinKeysize *uint64
Module string
Name string
Priority *int64
Refcnt *int64
Seedsize *uint64
Selftest string
Type string
Walksize *uint64
}
// Crypto parses an crypto-file (/proc/crypto) and returns a slice of
// structs containing the relevant info. More information available here:
// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html
func (fs FS) Crypto() ([]Crypto, error) {
data, err := ioutil.ReadFile(fs.proc.Path("crypto"))
if err != nil {
return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
}
crypto, err := parseCrypto(data)
if err != nil {
return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
}
return crypto, nil
}
func parseCrypto(cryptoData []byte) ([]Crypto, error) {
crypto := []Crypto{}
cryptoBlocks := bytes.Split(cryptoData, []byte("\n\n"))
for _, block := range cryptoBlocks {
var newCryptoElem Crypto
lines := strings.Split(string(block), "\n")
for _, line := range lines {
if strings.TrimSpace(line) == "" || line[0] == ' ' {
continue
}
fields := strings.Split(line, ":")
key := strings.TrimSpace(fields[0])
value := strings.TrimSpace(fields[1])
vp := util.NewValueParser(value)
switch strings.TrimSpace(key) {
case "async":
b, err := strconv.ParseBool(value)
if err == nil {
newCryptoElem.Async = b
}
case "blocksize":
newCryptoElem.Blocksize = vp.PUInt64()
case "chunksize":
newCryptoElem.Chunksize = vp.PUInt64()
case "digestsize":
newCryptoElem.Digestsize = vp.PUInt64()
case "driver":
newCryptoElem.Driver = value
case "geniv":
newCryptoElem.Geniv = value
case "internal":
newCryptoElem.Internal = value
case "ivsize":
newCryptoElem.Ivsize = vp.PUInt64()
case "maxauthsize":
newCryptoElem.Maxauthsize = vp.PUInt64()
case "max keysize":
newCryptoElem.MaxKeysize = vp.PUInt64()
case "min keysize":
newCryptoElem.MinKeysize = vp.PUInt64()
case "module":
newCryptoElem.Module = value
case "name":
newCryptoElem.Name = value
case "priority":
newCryptoElem.Priority = vp.PInt64()
case "refcnt":
newCryptoElem.Refcnt = vp.PInt64()
case "seedsize":
newCryptoElem.Seedsize = vp.PUInt64()
case "selftest":
newCryptoElem.Selftest = value
case "type":
newCryptoElem.Type = value
case "walksize":
newCryptoElem.Walksize = vp.PUInt64()
}
}
crypto = append(crypto, newCryptoElem)
}
return crypto, nil
}

File diff suppressed because it is too large Load Diff

View File

@ -1,3 +1,8 @@
module github.com/prometheus/procfs
require golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4
go 1.12
require (
github.com/google/go-cmp v0.3.1
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
)

View File

@ -1,2 +1,4 @@
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=

View File

@ -25,6 +25,9 @@ const (
// DefaultSysMountPoint is the common mount point of the sys filesystem.
DefaultSysMountPoint = "/sys"
// DefaultConfigfsMountPoint is the common mount point of the configfs
DefaultConfigfsMountPoint = "/sys/kernel/config"
)
// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an

View File

@ -0,0 +1,88 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"io/ioutil"
"strconv"
"strings"
)
// ParseUint32s parses a slice of strings into a slice of uint32s.
func ParseUint32s(ss []string) ([]uint32, error) {
us := make([]uint32, 0, len(ss))
for _, s := range ss {
u, err := strconv.ParseUint(s, 10, 32)
if err != nil {
return nil, err
}
us = append(us, uint32(u))
}
return us, nil
}
// ParseUint64s parses a slice of strings into a slice of uint64s.
func ParseUint64s(ss []string) ([]uint64, error) {
us := make([]uint64, 0, len(ss))
for _, s := range ss {
u, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return nil, err
}
us = append(us, u)
}
return us, nil
}
// ParsePInt64s parses a slice of strings into a slice of int64 pointers.
func ParsePInt64s(ss []string) ([]*int64, error) {
us := make([]*int64, 0, len(ss))
for _, s := range ss {
u, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return nil, err
}
us = append(us, &u)
}
return us, nil
}
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
func ReadUintFromFile(path string) (uint64, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return 0, err
}
return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
}
// ParseBool parses a string into a boolean pointer.
func ParseBool(b string) *bool {
var truth bool
switch b {
case "enabled":
truth = true
case "disabled":
truth = false
default:
return nil
}
return &truth
}

View File

@ -0,0 +1,38 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"io"
"io/ioutil"
"os"
)
// ReadFileNoStat uses ioutil.ReadAll to read contents of entire file.
// This is similar to ioutil.ReadFile but without the call to os.Stat, because
// many files in /proc and /sys report incorrect file sizes (either 0 or 4096).
// Reads a max file size of 512kB. For files larger than this, a scanner
// should be used.
func ReadFileNoStat(filename string) ([]byte, error) {
const maxBufferSize = 1024 * 512
f, err := os.Open(filename)
if err != nil {
return nil, err
}
defer f.Close()
reader := io.LimitReader(f, maxBufferSize)
return ioutil.ReadAll(reader)
}

View File

@ -0,0 +1,48 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux,!appengine
package util
import (
"bytes"
"os"
"syscall"
)
// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly.
// https://github.com/prometheus/node_exporter/pull/728/files
//
// Note that this function will not read files larger than 128 bytes.
func SysReadFile(file string) (string, error) {
f, err := os.Open(file)
if err != nil {
return "", err
}
defer f.Close()
// On some machines, hwmon drivers are broken and return EAGAIN. This causes
// Go's ioutil.ReadFile implementation to poll forever.
//
// Since we either want to read data or bail immediately, do the simplest
// possible read using syscall directly.
const sysFileBufferSize = 128
b := make([]byte, sysFileBufferSize)
n, err := syscall.Read(int(f.Fd()), b)
if err != nil {
return "", err
}
return string(bytes.TrimSpace(b[:n])), nil
}

View File

@ -0,0 +1,26 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux,appengine !linux
package util
import (
"fmt"
)
// SysReadFile is here implemented as a noop for builds that do not support
// the read syscall. For example Windows, or Linux on Google App Engine.
func SysReadFile(file string) (string, error) {
return "", fmt.Errorf("not supported on this platform")
}

View File

@ -0,0 +1,91 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"strconv"
)
// TODO(mdlayher): util packages are an anti-pattern and this should be moved
// somewhere else that is more focused in the future.
// A ValueParser enables parsing a single string into a variety of data types
// in a concise and safe way. The Err method must be invoked after invoking
// any other methods to ensure a value was successfully parsed.
type ValueParser struct {
v string
err error
}
// NewValueParser creates a ValueParser using the input string.
func NewValueParser(v string) *ValueParser {
return &ValueParser{v: v}
}
// Int interprets the underlying value as an int and returns that value.
func (vp *ValueParser) Int() int { return int(vp.int64()) }
// PInt64 interprets the underlying value as an int64 and returns a pointer to
// that value.
func (vp *ValueParser) PInt64() *int64 {
if vp.err != nil {
return nil
}
v := vp.int64()
return &v
}
// int64 interprets the underlying value as an int64 and returns that value.
// TODO: export if/when necessary.
func (vp *ValueParser) int64() int64 {
if vp.err != nil {
return 0
}
// A base value of zero makes ParseInt infer the correct base using the
// string's prefix, if any.
const base = 0
v, err := strconv.ParseInt(vp.v, base, 64)
if err != nil {
vp.err = err
return 0
}
return v
}
// PUInt64 interprets the underlying value as an uint64 and returns a pointer to
// that value.
func (vp *ValueParser) PUInt64() *uint64 {
if vp.err != nil {
return nil
}
// A base value of zero makes ParseInt infer the correct base using the
// string's prefix, if any.
const base = 0
v, err := strconv.ParseUint(vp.v, base, 64)
if err != nil {
vp.err = err
return nil
}
return &v
}
// Err returns the last error, if any, encountered by the ValueParser.
func (vp *ValueParser) Err() error {
return vp.err
}

View File

@ -15,6 +15,7 @@ package procfs
import (
"bufio"
"bytes"
"encoding/hex"
"errors"
"fmt"
@ -24,6 +25,8 @@ import (
"os"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
@ -64,17 +67,16 @@ type IPVSBackendStatus struct {
// IPVSStats reads the IPVS statistics from the specified `proc` filesystem.
func (fs FS) IPVSStats() (IPVSStats, error) {
file, err := os.Open(fs.proc.Path("net/ip_vs_stats"))
data, err := util.ReadFileNoStat(fs.proc.Path("net/ip_vs_stats"))
if err != nil {
return IPVSStats{}, err
}
defer file.Close()
return parseIPVSStats(file)
return parseIPVSStats(bytes.NewReader(data))
}
// parseIPVSStats performs the actual parsing of `ip_vs_stats`.
func parseIPVSStats(file io.Reader) (IPVSStats, error) {
func parseIPVSStats(r io.Reader) (IPVSStats, error) {
var (
statContent []byte
statLines []string
@ -82,7 +84,7 @@ func parseIPVSStats(file io.Reader) (IPVSStats, error) {
stats IPVSStats
)
statContent, err := ioutil.ReadAll(file)
statContent, err := ioutil.ReadAll(r)
if err != nil {
return IPVSStats{}, err
}

View File

@ -22,8 +22,8 @@ import (
)
var (
statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
)
// MDStat holds info parsed from /proc/mdstat.
@ -34,8 +34,12 @@ type MDStat struct {
ActivityState string
// Number of active disks.
DisksActive int64
// Total number of disks the device consists of.
// Total number of disks the device requires.
DisksTotal int64
// Number of failed disks.
DisksFailed int64
// Spare disks in the device.
DisksSpare int64
// Number of blocks the device holds.
BlocksTotal int64
// Number of blocks on the device that are in sync.
@ -59,29 +63,38 @@ func (fs FS) MDStat() ([]MDStat, error) {
// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of
// structs containing the relevant info.
func parseMDStat(mdstatData []byte) ([]MDStat, error) {
func parseMDStat(mdStatData []byte) ([]MDStat, error) {
mdStats := []MDStat{}
lines := strings.Split(string(mdstatData), "\n")
for i, l := range lines {
if strings.TrimSpace(l) == "" || l[0] == ' ' ||
strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") {
lines := strings.Split(string(mdStatData), "\n")
for i, line := range lines {
if strings.TrimSpace(line) == "" || line[0] == ' ' ||
strings.HasPrefix(line, "Personalities") ||
strings.HasPrefix(line, "unused") {
continue
}
deviceFields := strings.Fields(l)
deviceFields := strings.Fields(line)
if len(deviceFields) < 3 {
return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", l)
return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line)
}
mdName := deviceFields[0]
activityState := deviceFields[2]
mdName := deviceFields[0] // mdx
state := deviceFields[2] // active or inactive
if len(lines) <= i+3 {
return mdStats, fmt.Errorf("missing lines for md device %s", mdName)
return nil, fmt.Errorf(
"error parsing %s: too few lines for md device",
mdName,
)
}
active, total, size, err := evalStatusLine(lines[i+1])
// Failed disks have the suffix (F) & Spare disks have the suffix (S).
fail := int64(strings.Count(line, "(F)"))
spare := int64(strings.Count(line, "(S)"))
active, total, size, err := evalStatusLine(lines[i], lines[i+1])
if err != nil {
return nil, err
return nil, fmt.Errorf("error parsing md device lines: %s", err)
}
syncLineIdx := i + 2
@ -89,20 +102,38 @@ func parseMDStat(mdstatData []byte) ([]MDStat, error) {
syncLineIdx++
}
// If device is recovering/syncing at the moment, get the number of currently
// If device is syncing at the moment, get the number of currently
// synced bytes, otherwise that number equals the size of the device.
syncedBlocks := size
if strings.Contains(lines[syncLineIdx], "recovery") || strings.Contains(lines[syncLineIdx], "resync") {
syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx])
if err != nil {
return nil, err
recovering := strings.Contains(lines[syncLineIdx], "recovery")
resyncing := strings.Contains(lines[syncLineIdx], "resync")
// Append recovery and resyncing state info.
if recovering || resyncing {
if recovering {
state = "recovering"
} else {
state = "resyncing"
}
// Handle case when resync=PENDING or resync=DELAYED.
if strings.Contains(lines[syncLineIdx], "PENDING") ||
strings.Contains(lines[syncLineIdx], "DELAYED") {
syncedBlocks = 0
} else {
syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx])
if err != nil {
return nil, fmt.Errorf("error parsing sync line in md device %s: %s", mdName, err)
}
}
}
mdStats = append(mdStats, MDStat{
Name: mdName,
ActivityState: activityState,
ActivityState: state,
DisksActive: active,
DisksFailed: fail,
DisksSpare: spare,
DisksTotal: total,
BlocksTotal: size,
BlocksSynced: syncedBlocks,
@ -112,39 +143,51 @@ func parseMDStat(mdstatData []byte) ([]MDStat, error) {
return mdStats, nil
}
func evalStatusLine(statusline string) (active, total, size int64, err error) {
matches := statuslineRE.FindStringSubmatch(statusline)
if len(matches) != 4 {
return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline)
func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, err error) {
sizeStr := strings.Fields(statusLine)[0]
size, err = strconv.ParseInt(sizeStr, 10, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err)
}
size, err = strconv.ParseInt(matches[1], 10, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
// In the device deviceLine, only disks have a number associated with them in [].
total = int64(strings.Count(deviceLine, "["))
return total, total, size, nil
}
if strings.Contains(deviceLine, "inactive") {
return 0, 0, size, nil
}
matches := statusLineRE.FindStringSubmatch(statusLine)
if len(matches) != 4 {
return 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine)
}
total, err = strconv.ParseInt(matches[2], 10, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err)
}
active, err = strconv.ParseInt(matches[3], 10, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err)
}
return active, total, size, nil
}
func evalRecoveryLine(buildline string) (syncedBlocks int64, err error) {
matches := buildlineRE.FindStringSubmatch(buildline)
func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) {
matches := recoveryLineRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return 0, fmt.Errorf("unexpected buildline: %s", buildline)
return 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine)
}
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
if err != nil {
return 0, fmt.Errorf("%s in buildline: %s", err, buildline)
return 0, fmt.Errorf("%s in recoveryLine: %s", err, recoveryLine)
}
return syncedBlocks, nil

277
vendor/github.com/prometheus/procfs/meminfo.go generated vendored Normal file
View File

@ -0,0 +1,277 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"io"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// Meminfo represents memory statistics.
type Meminfo struct {
// Total usable ram (i.e. physical ram minus a few reserved
// bits and the kernel binary code)
MemTotal uint64
// The sum of LowFree+HighFree
MemFree uint64
// An estimate of how much memory is available for starting
// new applications, without swapping. Calculated from
// MemFree, SReclaimable, the size of the file LRU lists, and
// the low watermarks in each zone. The estimate takes into
// account that the system needs some page cache to function
// well, and that not all reclaimable slab will be
// reclaimable, due to items being in use. The impact of those
// factors will vary from system to system.
MemAvailable uint64
// Relatively temporary storage for raw disk blocks shouldn't
// get tremendously large (20MB or so)
Buffers uint64
Cached uint64
// Memory that once was swapped out, is swapped back in but
// still also is in the swapfile (if memory is needed it
// doesn't need to be swapped out AGAIN because it is already
// in the swapfile. This saves I/O)
SwapCached uint64
// Memory that has been used more recently and usually not
// reclaimed unless absolutely necessary.
Active uint64
// Memory which has been less recently used. It is more
// eligible to be reclaimed for other purposes
Inactive uint64
ActiveAnon uint64
InactiveAnon uint64
ActiveFile uint64
InactiveFile uint64
Unevictable uint64
Mlocked uint64
// total amount of swap space available
SwapTotal uint64
// Memory which has been evicted from RAM, and is temporarily
// on the disk
SwapFree uint64
// Memory which is waiting to get written back to the disk
Dirty uint64
// Memory which is actively being written back to the disk
Writeback uint64
// Non-file backed pages mapped into userspace page tables
AnonPages uint64
// files which have been mapped, such as libraries
Mapped uint64
Shmem uint64
// in-kernel data structures cache
Slab uint64
// Part of Slab, that might be reclaimed, such as caches
SReclaimable uint64
// Part of Slab, that cannot be reclaimed on memory pressure
SUnreclaim uint64
KernelStack uint64
// amount of memory dedicated to the lowest level of page
// tables.
PageTables uint64
// NFS pages sent to the server, but not yet committed to
// stable storage
NFSUnstable uint64
// Memory used for block device "bounce buffers"
Bounce uint64
// Memory used by FUSE for temporary writeback buffers
WritebackTmp uint64
// Based on the overcommit ratio ('vm.overcommit_ratio'),
// this is the total amount of memory currently available to
// be allocated on the system. This limit is only adhered to
// if strict overcommit accounting is enabled (mode 2 in
// 'vm.overcommit_memory').
// The CommitLimit is calculated with the following formula:
// CommitLimit = ([total RAM pages] - [total huge TLB pages]) *
// overcommit_ratio / 100 + [total swap pages]
// For example, on a system with 1G of physical RAM and 7G
// of swap with a `vm.overcommit_ratio` of 30 it would
// yield a CommitLimit of 7.3G.
// For more details, see the memory overcommit documentation
// in vm/overcommit-accounting.
CommitLimit uint64
// The amount of memory presently allocated on the system.
// The committed memory is a sum of all of the memory which
// has been allocated by processes, even if it has not been
// "used" by them as of yet. A process which malloc()'s 1G
// of memory, but only touches 300M of it will show up as
// using 1G. This 1G is memory which has been "committed" to
// by the VM and can be used at any time by the allocating
// application. With strict overcommit enabled on the system
// (mode 2 in 'vm.overcommit_memory'),allocations which would
// exceed the CommitLimit (detailed above) will not be permitted.
// This is useful if one needs to guarantee that processes will
// not fail due to lack of memory once that memory has been
// successfully allocated.
CommittedAS uint64
// total size of vmalloc memory area
VmallocTotal uint64
// amount of vmalloc area which is used
VmallocUsed uint64
// largest contiguous block of vmalloc area which is free
VmallocChunk uint64
HardwareCorrupted uint64
AnonHugePages uint64
ShmemHugePages uint64
ShmemPmdMapped uint64
CmaTotal uint64
CmaFree uint64
HugePagesTotal uint64
HugePagesFree uint64
HugePagesRsvd uint64
HugePagesSurp uint64
Hugepagesize uint64
DirectMap4k uint64
DirectMap2M uint64
DirectMap1G uint64
}
// Meminfo returns an information about current kernel/system memory statistics.
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
func (fs FS) Meminfo() (Meminfo, error) {
b, err := util.ReadFileNoStat(fs.proc.Path("meminfo"))
if err != nil {
return Meminfo{}, err
}
m, err := parseMemInfo(bytes.NewReader(b))
if err != nil {
return Meminfo{}, fmt.Errorf("failed to parse meminfo: %v", err)
}
return *m, nil
}
func parseMemInfo(r io.Reader) (*Meminfo, error) {
var m Meminfo
s := bufio.NewScanner(r)
for s.Scan() {
// Each line has at least a name and value; we ignore the unit.
fields := strings.Fields(s.Text())
if len(fields) < 2 {
return nil, fmt.Errorf("malformed meminfo line: %q", s.Text())
}
v, err := strconv.ParseUint(fields[1], 0, 64)
if err != nil {
return nil, err
}
switch fields[0] {
case "MemTotal:":
m.MemTotal = v
case "MemFree:":
m.MemFree = v
case "MemAvailable:":
m.MemAvailable = v
case "Buffers:":
m.Buffers = v
case "Cached:":
m.Cached = v
case "SwapCached:":
m.SwapCached = v
case "Active:":
m.Active = v
case "Inactive:":
m.Inactive = v
case "Active(anon):":
m.ActiveAnon = v
case "Inactive(anon):":
m.InactiveAnon = v
case "Active(file):":
m.ActiveFile = v
case "Inactive(file):":
m.InactiveFile = v
case "Unevictable:":
m.Unevictable = v
case "Mlocked:":
m.Mlocked = v
case "SwapTotal:":
m.SwapTotal = v
case "SwapFree:":
m.SwapFree = v
case "Dirty:":
m.Dirty = v
case "Writeback:":
m.Writeback = v
case "AnonPages:":
m.AnonPages = v
case "Mapped:":
m.Mapped = v
case "Shmem:":
m.Shmem = v
case "Slab:":
m.Slab = v
case "SReclaimable:":
m.SReclaimable = v
case "SUnreclaim:":
m.SUnreclaim = v
case "KernelStack:":
m.KernelStack = v
case "PageTables:":
m.PageTables = v
case "NFS_Unstable:":
m.NFSUnstable = v
case "Bounce:":
m.Bounce = v
case "WritebackTmp:":
m.WritebackTmp = v
case "CommitLimit:":
m.CommitLimit = v
case "Committed_AS:":
m.CommittedAS = v
case "VmallocTotal:":
m.VmallocTotal = v
case "VmallocUsed:":
m.VmallocUsed = v
case "VmallocChunk:":
m.VmallocChunk = v
case "HardwareCorrupted:":
m.HardwareCorrupted = v
case "AnonHugePages:":
m.AnonHugePages = v
case "ShmemHugePages:":
m.ShmemHugePages = v
case "ShmemPmdMapped:":
m.ShmemPmdMapped = v
case "CmaTotal:":
m.CmaTotal = v
case "CmaFree:":
m.CmaFree = v
case "HugePages_Total:":
m.HugePagesTotal = v
case "HugePages_Free:":
m.HugePagesFree = v
case "HugePages_Rsvd:":
m.HugePagesRsvd = v
case "HugePages_Surp:":
m.HugePagesSurp = v
case "Hugepagesize:":
m.Hugepagesize = v
case "DirectMap4k:":
m.DirectMap4k = v
case "DirectMap2M:":
m.DirectMap2M = v
case "DirectMap1G:":
m.DirectMap1G = v
}
}
return &m, nil
}

180
vendor/github.com/prometheus/procfs/mountinfo.go generated vendored Normal file
View File

@ -0,0 +1,180 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// A MountInfo is a type that describes the details, options
// for each mount, parsed from /proc/self/mountinfo.
// The fields described in each entry of /proc/self/mountinfo
// is described in the following man page.
// http://man7.org/linux/man-pages/man5/proc.5.html
type MountInfo struct {
// Unique Id for the mount
MountId int
// The Id of the parent mount
ParentId int
// The value of `st_dev` for the files on this FS
MajorMinorVer string
// The pathname of the directory in the FS that forms
// the root for this mount
Root string
// The pathname of the mount point relative to the root
MountPoint string
// Mount options
Options map[string]string
// Zero or more optional fields
OptionalFields map[string]string
// The Filesystem type
FSType string
// FS specific information or "none"
Source string
// Superblock options
SuperOptions map[string]string
}
// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs.
func parseMountInfo(info []byte) ([]*MountInfo, error) {
mounts := []*MountInfo{}
scanner := bufio.NewScanner(bytes.NewReader(info))
for scanner.Scan() {
mountString := scanner.Text()
parsedMounts, err := parseMountInfoString(mountString)
if err != nil {
return nil, err
}
mounts = append(mounts, parsedMounts)
}
err := scanner.Err()
return mounts, err
}
// Parses a mountinfo file line, and converts it to a MountInfo struct.
// An important check here is to see if the hyphen separator, as if it does not exist,
// it means that the line is malformed.
func parseMountInfoString(mountString string) (*MountInfo, error) {
var err error
mountInfo := strings.Split(mountString, " ")
mountInfoLength := len(mountInfo)
if mountInfoLength < 11 {
return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString)
}
if mountInfo[mountInfoLength-4] != "-" {
return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4])
}
mount := &MountInfo{
MajorMinorVer: mountInfo[2],
Root: mountInfo[3],
MountPoint: mountInfo[4],
Options: mountOptionsParser(mountInfo[5]),
OptionalFields: nil,
FSType: mountInfo[mountInfoLength-3],
Source: mountInfo[mountInfoLength-2],
SuperOptions: mountOptionsParser(mountInfo[mountInfoLength-1]),
}
mount.MountId, err = strconv.Atoi(mountInfo[0])
if err != nil {
return nil, fmt.Errorf("failed to parse mount ID")
}
mount.ParentId, err = strconv.Atoi(mountInfo[1])
if err != nil {
return nil, fmt.Errorf("failed to parse parent ID")
}
// Has optional fields, which is a space separated list of values.
// Example: shared:2 master:7
if mountInfo[6] != "" {
mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4])
if err != nil {
return nil, err
}
}
return mount, nil
}
// mountOptionsIsValidField checks a string against a valid list of optional fields keys.
func mountOptionsIsValidField(s string) bool {
switch s {
case
"shared",
"master",
"propagate_from",
"unbindable":
return true
}
return false
}
// mountOptionsParseOptionalFields parses a list of optional fields strings into a double map of strings.
func mountOptionsParseOptionalFields(o []string) (map[string]string, error) {
optionalFields := make(map[string]string)
for _, field := range o {
optionSplit := strings.SplitN(field, ":", 2)
value := ""
if len(optionSplit) == 2 {
value = optionSplit[1]
}
if mountOptionsIsValidField(optionSplit[0]) {
optionalFields[optionSplit[0]] = value
}
}
return optionalFields, nil
}
// Parses the mount options, superblock options.
func mountOptionsParser(mountOptions string) map[string]string {
opts := make(map[string]string)
options := strings.Split(mountOptions, ",")
for _, opt := range options {
splitOption := strings.Split(opt, "=")
if len(splitOption) < 2 {
key := splitOption[0]
opts[key] = ""
} else {
key, value := splitOption[0], splitOption[1]
opts[key] = value
}
}
return opts
}
// Retrieves mountinfo information from `/proc/self/mountinfo`.
func GetMounts() ([]*MountInfo, error) {
data, err := util.ReadFileNoStat("/proc/self/mountinfo")
if err != nil {
return nil, err
}
return parseMountInfo(data)
}
// Retrieves mountinfo information from a processes' `/proc/<pid>/mountinfo`.
func GetProcMounts(pid int) ([]*MountInfo, error) {
data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid))
if err != nil {
return nil, err
}
return parseMountInfo(data)
}

View File

@ -183,7 +183,6 @@ func (netDev NetDev) Total() NetDevLine {
names = append(names, ifc.Name)
total.RxBytes += ifc.RxBytes
total.RxPackets += ifc.RxPackets
total.RxPackets += ifc.RxPackets
total.RxErrors += ifc.RxErrors
total.RxDropped += ifc.RxDropped
total.RxFIFO += ifc.RxFIFO

163
vendor/github.com/prometheus/procfs/net_sockstat.go generated vendored Normal file
View File

@ -0,0 +1,163 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// A NetSockstat contains the output of /proc/net/sockstat{,6} for IPv4 or IPv6,
// respectively.
type NetSockstat struct {
// Used is non-nil for IPv4 sockstat results, but nil for IPv6.
Used *int
Protocols []NetSockstatProtocol
}
// A NetSockstatProtocol contains statistics about a given socket protocol.
// Pointer fields indicate that the value may or may not be present on any
// given protocol.
type NetSockstatProtocol struct {
Protocol string
InUse int
Orphan *int
TW *int
Alloc *int
Mem *int
Memory *int
}
// NetSockstat retrieves IPv4 socket statistics.
func (fs FS) NetSockstat() (*NetSockstat, error) {
return readSockstat(fs.proc.Path("net", "sockstat"))
}
// NetSockstat6 retrieves IPv6 socket statistics.
//
// If IPv6 is disabled on this kernel, the returned error can be checked with
// os.IsNotExist.
func (fs FS) NetSockstat6() (*NetSockstat, error) {
return readSockstat(fs.proc.Path("net", "sockstat6"))
}
// readSockstat opens and parses a NetSockstat from the input file.
func readSockstat(name string) (*NetSockstat, error) {
// This file is small and can be read with one syscall.
b, err := util.ReadFileNoStat(name)
if err != nil {
// Do not wrap this error so the caller can detect os.IsNotExist and
// similar conditions.
return nil, err
}
stat, err := parseSockstat(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("failed to read sockstats from %q: %v", name, err)
}
return stat, nil
}
// parseSockstat reads the contents of a sockstat file and parses a NetSockstat.
func parseSockstat(r io.Reader) (*NetSockstat, error) {
var stat NetSockstat
s := bufio.NewScanner(r)
for s.Scan() {
// Expect a minimum of a protocol and one key/value pair.
fields := strings.Split(s.Text(), " ")
if len(fields) < 3 {
return nil, fmt.Errorf("malformed sockstat line: %q", s.Text())
}
// The remaining fields are key/value pairs.
kvs, err := parseSockstatKVs(fields[1:])
if err != nil {
return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %v", s.Text(), err)
}
// The first field is the protocol. We must trim its colon suffix.
proto := strings.TrimSuffix(fields[0], ":")
switch proto {
case "sockets":
// Special case: IPv4 has a sockets "used" key/value pair that we
// embed at the top level of the structure.
used := kvs["used"]
stat.Used = &used
default:
// Parse all other lines as individual protocols.
nsp := parseSockstatProtocol(kvs)
nsp.Protocol = proto
stat.Protocols = append(stat.Protocols, nsp)
}
}
if err := s.Err(); err != nil {
return nil, err
}
return &stat, nil
}
// parseSockstatKVs parses a string slice into a map of key/value pairs.
func parseSockstatKVs(kvs []string) (map[string]int, error) {
if len(kvs)%2 != 0 {
return nil, errors.New("odd number of fields in key/value pairs")
}
// Iterate two values at a time to gather key/value pairs.
out := make(map[string]int, len(kvs)/2)
for i := 0; i < len(kvs); i += 2 {
vp := util.NewValueParser(kvs[i+1])
out[kvs[i]] = vp.Int()
if err := vp.Err(); err != nil {
return nil, err
}
}
return out, nil
}
// parseSockstatProtocol parses a NetSockstatProtocol from the input kvs map.
func parseSockstatProtocol(kvs map[string]int) NetSockstatProtocol {
var nsp NetSockstatProtocol
for k, v := range kvs {
// Capture the range variable to ensure we get unique pointers for
// each of the optional fields.
v := v
switch k {
case "inuse":
nsp.InUse = v
case "orphan":
nsp.Orphan = &v
case "tw":
nsp.TW = &v
case "alloc":
nsp.Alloc = &v
case "mem":
nsp.Mem = &v
case "memory":
nsp.Memory = &v
}
}
return nsp
}

91
vendor/github.com/prometheus/procfs/net_softnet.go generated vendored Normal file
View File

@ -0,0 +1,91 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"fmt"
"io/ioutil"
"strconv"
"strings"
)
// For the proc file format details,
// see https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
// SoftnetEntry contains a single row of data from /proc/net/softnet_stat
type SoftnetEntry struct {
// Number of processed packets
Processed uint
// Number of dropped packets
Dropped uint
// Number of times processing packets ran out of quota
TimeSqueezed uint
}
// GatherSoftnetStats reads /proc/net/softnet_stat, parse the relevant columns,
// and then return a slice of SoftnetEntry's.
func (fs FS) GatherSoftnetStats() ([]SoftnetEntry, error) {
data, err := ioutil.ReadFile(fs.proc.Path("net/softnet_stat"))
if err != nil {
return nil, fmt.Errorf("error reading softnet %s: %s", fs.proc.Path("net/softnet_stat"), err)
}
return parseSoftnetEntries(data)
}
func parseSoftnetEntries(data []byte) ([]SoftnetEntry, error) {
lines := strings.Split(string(data), "\n")
entries := make([]SoftnetEntry, 0)
var err error
const (
expectedColumns = 11
)
for _, line := range lines {
columns := strings.Fields(line)
width := len(columns)
if width == 0 {
continue
}
if width != expectedColumns {
return []SoftnetEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedColumns)
}
var entry SoftnetEntry
if entry, err = parseSoftnetEntry(columns); err != nil {
return []SoftnetEntry{}, err
}
entries = append(entries, entry)
}
return entries, nil
}
func parseSoftnetEntry(columns []string) (SoftnetEntry, error) {
var err error
var processed, dropped, timeSqueezed uint64
if processed, err = strconv.ParseUint(columns[0], 16, 32); err != nil {
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 0: %s", err)
}
if dropped, err = strconv.ParseUint(columns[1], 16, 32); err != nil {
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 1: %s", err)
}
if timeSqueezed, err = strconv.ParseUint(columns[2], 16, 32); err != nil {
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 2: %s", err)
}
return SoftnetEntry{
Processed: uint(processed),
Dropped: uint(dropped),
TimeSqueezed: uint(timeSqueezed),
}, nil
}

View File

@ -207,10 +207,6 @@ func (u NetUnix) parseUsers(hexStr string) (uint64, error) {
return strconv.ParseUint(hexStr, 16, 32)
}
func (u NetUnix) parseProtocol(hexStr string) (uint64, error) {
return strconv.ParseUint(hexStr, 16, 32)
}
func (u NetUnix) parseType(hexStr string) (NetUnixType, error) {
typ, err := strconv.ParseUint(hexStr, 16, 16)
if err != nil {

View File

@ -22,6 +22,7 @@ import (
"strings"
"github.com/prometheus/procfs/internal/fs"
"github.com/prometheus/procfs/internal/util"
)
// Proc provides information about a running process.
@ -121,13 +122,7 @@ func (fs FS) AllProcs() (Procs, error) {
// CmdLine returns the command line of a process.
func (p Proc) CmdLine() ([]string, error) {
f, err := os.Open(p.path("cmdline"))
if err != nil {
return nil, err
}
defer f.Close()
data, err := ioutil.ReadAll(f)
data, err := util.ReadFileNoStat(p.path("cmdline"))
if err != nil {
return nil, err
}
@ -141,13 +136,7 @@ func (p Proc) CmdLine() ([]string, error) {
// Comm returns the command name of a process.
func (p Proc) Comm() (string, error) {
f, err := os.Open(p.path("comm"))
if err != nil {
return "", err
}
defer f.Close()
data, err := ioutil.ReadAll(f)
data, err := util.ReadFileNoStat(p.path("comm"))
if err != nil {
return "", err
}
@ -247,6 +236,18 @@ func (p Proc) MountStats() ([]*Mount, error) {
return parseMountStats(f)
}
// MountInfo retrieves mount information for mount points in a
// process's namespace.
// It supplies information missing in `/proc/self/mounts` and
// fixes various other problems with that file too.
func (p Proc) MountInfo() ([]*MountInfo, error) {
data, err := util.ReadFileNoStat(p.path("mountinfo"))
if err != nil {
return nil, err
}
return parseMountInfo(data)
}
func (p Proc) fileDescriptors() ([]string, error) {
d, err := os.Open(p.path("fd"))
if err != nil {
@ -265,3 +266,33 @@ func (p Proc) fileDescriptors() ([]string, error) {
func (p Proc) path(pa ...string) string {
return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
}
// FileDescriptorsInfo retrieves information about all file descriptors of
// the process.
func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) {
names, err := p.fileDescriptors()
if err != nil {
return nil, err
}
var fdinfos ProcFDInfos
for _, n := range names {
fdinfo, err := p.FDInfo(n)
if err != nil {
continue
}
fdinfos = append(fdinfos, *fdinfo)
}
return fdinfos, nil
}
// Schedstat returns task scheduling information for the process.
func (p Proc) Schedstat() (ProcSchedstat, error) {
contents, err := ioutil.ReadFile(p.path("schedstat"))
if err != nil {
return ProcSchedstat{}, err
}
return parseProcSchedstat(string(contents))
}

37
vendor/github.com/prometheus/procfs/proc_environ.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"strings"
"github.com/prometheus/procfs/internal/util"
)
// Environ reads process environments from /proc/<pid>/environ
func (p Proc) Environ() ([]string, error) {
environments := make([]string, 0)
data, err := util.ReadFileNoStat(p.path("environ"))
if err != nil {
return environments, err
}
environments = strings.Split(string(data), "\000")
if len(environments) > 0 {
environments = environments[:len(environments)-1]
}
return environments, nil
}

125
vendor/github.com/prometheus/procfs/proc_fdinfo.go generated vendored Normal file
View File

@ -0,0 +1,125 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"regexp"
"github.com/prometheus/procfs/internal/util"
)
// Regexp variables
var (
rPos = regexp.MustCompile(`^pos:\s+(\d+)$`)
rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`)
rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
rInotify = regexp.MustCompile(`^inotify`)
)
// ProcFDInfo contains represents file descriptor information.
type ProcFDInfo struct {
// File descriptor
FD string
// File offset
Pos string
// File access mode and status flags
Flags string
// Mount point ID
MntID string
// List of inotify lines (structed) in the fdinfo file (kernel 3.8+ only)
InotifyInfos []InotifyInfo
}
// FDInfo constructor. On kernels older than 3.8, InotifyInfos will always be empty.
func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
data, err := util.ReadFileNoStat(p.path("fdinfo", fd))
if err != nil {
return nil, err
}
var text, pos, flags, mntid string
var inotify []InotifyInfo
scanner := bufio.NewScanner(bytes.NewReader(data))
for scanner.Scan() {
text = scanner.Text()
if rPos.MatchString(text) {
pos = rPos.FindStringSubmatch(text)[1]
} else if rFlags.MatchString(text) {
flags = rFlags.FindStringSubmatch(text)[1]
} else if rMntID.MatchString(text) {
mntid = rMntID.FindStringSubmatch(text)[1]
} else if rInotify.MatchString(text) {
newInotify, err := parseInotifyInfo(text)
if err != nil {
return nil, err
}
inotify = append(inotify, *newInotify)
}
}
i := &ProcFDInfo{
FD: fd,
Pos: pos,
Flags: flags,
MntID: mntid,
InotifyInfos: inotify,
}
return i, nil
}
// InotifyInfo represents a single inotify line in the fdinfo file.
type InotifyInfo struct {
// Watch descriptor number
WD string
// Inode number
Ino string
// Device ID
Sdev string
// Mask of events being monitored
Mask string
}
// InotifyInfo constructor. Only available on kernel 3.8+.
func parseInotifyInfo(line string) (*InotifyInfo, error) {
r := regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)\s+mask:([0-9a-f]+)`)
m := r.FindStringSubmatch(line)
i := &InotifyInfo{
WD: m[1],
Ino: m[2],
Sdev: m[3],
Mask: m[4],
}
return i, nil
}
// ProcFDInfos represents a list of ProcFDInfo structs.
type ProcFDInfos []ProcFDInfo
func (p ProcFDInfos) Len() int { return len(p) }
func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD }
// InotifyWatchLen returns the total number of inotify watches
func (p ProcFDInfos) InotifyWatchLen() (int, error) {
length := 0
for _, f := range p {
length += len(f.InotifyInfos)
}
return length, nil
}

View File

@ -15,8 +15,8 @@ package procfs
import (
"fmt"
"io/ioutil"
"os"
"github.com/prometheus/procfs/internal/util"
)
// ProcIO models the content of /proc/<pid>/io.
@ -43,13 +43,7 @@ type ProcIO struct {
func (p Proc) IO() (ProcIO, error) {
pio := ProcIO{}
f, err := os.Open(p.path("io"))
if err != nil {
return pio, err
}
defer f.Close()
data, err := ioutil.ReadAll(f)
data, err := util.ReadFileNoStat(p.path("io"))
if err != nil {
return pio, err
}

View File

@ -24,11 +24,13 @@ package procfs
// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"github.com/prometheus/procfs/internal/util"
)
const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d"
@ -55,24 +57,21 @@ type PSIStats struct {
// resource from /proc/pressure/<resource>. At time of writing this can be
// either "cpu", "memory" or "io".
func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
file, err := os.Open(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
if err != nil {
return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %s", resource)
}
defer file.Close()
return parsePSIStats(resource, file)
return parsePSIStats(resource, bytes.NewReader(data))
}
// parsePSIStats parses the specified file for pressure stall information
func parsePSIStats(resource string, file io.Reader) (PSIStats, error) {
func parsePSIStats(resource string, r io.Reader) (PSIStats, error) {
psiStats := PSIStats{}
stats, err := ioutil.ReadAll(file)
if err != nil {
return psiStats, fmt.Errorf("psi_stats: unable to read data for %s", resource)
}
for _, l := range strings.Split(string(stats), "\n") {
scanner := bufio.NewScanner(r)
for scanner.Scan() {
l := scanner.Text()
prefix := strings.Split(l, " ")[0]
switch prefix {
case "some":

View File

@ -16,10 +16,10 @@ package procfs
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"github.com/prometheus/procfs/internal/fs"
"github.com/prometheus/procfs/internal/util"
)
// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
@ -106,20 +106,14 @@ type ProcStat struct {
// NewStat returns the current status information of the process.
//
// Deprecated: use NewStat() instead
// Deprecated: use p.Stat() instead
func (p Proc) NewStat() (ProcStat, error) {
return p.Stat()
}
// Stat returns the current status information of the process.
func (p Proc) Stat() (ProcStat, error) {
f, err := os.Open(p.path("stat"))
if err != nil {
return ProcStat{}, err
}
defer f.Close()
data, err := ioutil.ReadAll(f)
data, err := util.ReadFileNoStat(p.path("stat"))
if err != nil {
return ProcStat{}, err
}

View File

@ -15,13 +15,13 @@ package procfs
import (
"bytes"
"io/ioutil"
"os"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// ProcStat provides status information about the process,
// ProcStatus provides status information about the process,
// read from /proc/[pid]/stat.
type ProcStatus struct {
// The process ID.
@ -29,6 +29,9 @@ type ProcStatus struct {
// The process name.
Name string
// Thread group ID.
TGID int
// Peak virtual memory size.
VmPeak uint64
// Virtual memory size.
@ -72,13 +75,7 @@ type ProcStatus struct {
// NewStatus returns the current status information of the process.
func (p Proc) NewStatus() (ProcStatus, error) {
f, err := os.Open(p.path("status"))
if err != nil {
return ProcStatus{}, err
}
defer f.Close()
data, err := ioutil.ReadAll(f)
data, err := util.ReadFileNoStat(p.path("status"))
if err != nil {
return ProcStatus{}, err
}
@ -113,6 +110,8 @@ func (p Proc) NewStatus() (ProcStatus, error) {
func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) {
switch k {
case "Tgid":
s.TGID = int(vUint)
case "Name":
s.Name = vString
case "VmPeak":

118
vendor/github.com/prometheus/procfs/schedstat.go generated vendored Normal file
View File

@ -0,0 +1,118 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"errors"
"os"
"regexp"
"strconv"
)
var (
cpuLineRE = regexp.MustCompile(`cpu(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)`)
procLineRE = regexp.MustCompile(`(\d+) (\d+) (\d+)`)
)
// Schedstat contains scheduler statistics from /proc/schedstat
//
// See
// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt
// for a detailed description of what these numbers mean.
//
// Note the current kernel documentation claims some of the time units are in
// jiffies when they are actually in nanoseconds since 2.6.23 with the
// introduction of CFS. A fix to the documentation is pending. See
// https://lore.kernel.org/patchwork/project/lkml/list/?series=403473
type Schedstat struct {
CPUs []*SchedstatCPU
}
// SchedstatCPU contains the values from one "cpu<N>" line
type SchedstatCPU struct {
CPUNum string
RunningNanoseconds uint64
WaitingNanoseconds uint64
RunTimeslices uint64
}
// ProcSchedstat contains the values from /proc/<pid>/schedstat
type ProcSchedstat struct {
RunningNanoseconds uint64
WaitingNanoseconds uint64
RunTimeslices uint64
}
// Schedstat reads data from /proc/schedstat
func (fs FS) Schedstat() (*Schedstat, error) {
file, err := os.Open(fs.proc.Path("schedstat"))
if err != nil {
return nil, err
}
defer file.Close()
stats := &Schedstat{}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
match := cpuLineRE.FindStringSubmatch(scanner.Text())
if match != nil {
cpu := &SchedstatCPU{}
cpu.CPUNum = match[1]
cpu.RunningNanoseconds, err = strconv.ParseUint(match[8], 10, 64)
if err != nil {
continue
}
cpu.WaitingNanoseconds, err = strconv.ParseUint(match[9], 10, 64)
if err != nil {
continue
}
cpu.RunTimeslices, err = strconv.ParseUint(match[10], 10, 64)
if err != nil {
continue
}
stats.CPUs = append(stats.CPUs, cpu)
}
}
return stats, nil
}
func parseProcSchedstat(contents string) (stats ProcSchedstat, err error) {
match := procLineRE.FindStringSubmatch(contents)
if match != nil {
stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64)
if err != nil {
return
}
stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64)
if err != nil {
return
}
stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64)
return
}
err = errors.New("could not parse schedstat")
return
}

View File

@ -15,13 +15,14 @@ package procfs
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/fs"
"github.com/prometheus/procfs/internal/util"
)
// CPUStat shows how much time the cpu spend in various stages.
@ -164,16 +165,15 @@ func (fs FS) NewStat() (Stat, error) {
// Stat returns information about current cpu/process statistics.
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
func (fs FS) Stat() (Stat, error) {
f, err := os.Open(fs.proc.Path("stat"))
fileName := fs.proc.Path("stat")
data, err := util.ReadFileNoStat(fileName)
if err != nil {
return Stat{}, err
}
defer f.Close()
stat := Stat{}
scanner := bufio.NewScanner(f)
scanner := bufio.NewScanner(bytes.NewReader(data))
for scanner.Scan() {
line := scanner.Text()
parts := strings.Fields(scanner.Text())
@ -237,7 +237,7 @@ func (fs FS) Stat() (Stat, error) {
}
if err := scanner.Err(); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
return Stat{}, fmt.Errorf("couldn't parse %s: %s", fileName, err)
}
return stat, nil

210
vendor/github.com/prometheus/procfs/vm.go generated vendored Normal file
View File

@ -0,0 +1,210 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package procfs
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// The VM interface is described at
// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
// Each setting is exposed as a single file.
// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array
// and numa_zonelist_order (deprecated) which is a string
type VM struct {
AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes
BlockDump *int64 // /proc/sys/vm/block_dump
CompactUnevictableAllowed *int64 // /proc/sys/vm/compact_unevictable_allowed
DirtyBackgroundBytes *int64 // /proc/sys/vm/dirty_background_bytes
DirtyBackgroundRatio *int64 // /proc/sys/vm/dirty_background_ratio
DirtyBytes *int64 // /proc/sys/vm/dirty_bytes
DirtyExpireCentisecs *int64 // /proc/sys/vm/dirty_expire_centisecs
DirtyRatio *int64 // /proc/sys/vm/dirty_ratio
DirtytimeExpireSeconds *int64 // /proc/sys/vm/dirtytime_expire_seconds
DirtyWritebackCentisecs *int64 // /proc/sys/vm/dirty_writeback_centisecs
DropCaches *int64 // /proc/sys/vm/drop_caches
ExtfragThreshold *int64 // /proc/sys/vm/extfrag_threshold
HugetlbShmGroup *int64 // /proc/sys/vm/hugetlb_shm_group
LaptopMode *int64 // /proc/sys/vm/laptop_mode
LegacyVaLayout *int64 // /proc/sys/vm/legacy_va_layout
LowmemReserveRatio []*int64 // /proc/sys/vm/lowmem_reserve_ratio
MaxMapCount *int64 // /proc/sys/vm/max_map_count
MemoryFailureEarlyKill *int64 // /proc/sys/vm/memory_failure_early_kill
MemoryFailureRecovery *int64 // /proc/sys/vm/memory_failure_recovery
MinFreeKbytes *int64 // /proc/sys/vm/min_free_kbytes
MinSlabRatio *int64 // /proc/sys/vm/min_slab_ratio
MinUnmappedRatio *int64 // /proc/sys/vm/min_unmapped_ratio
MmapMinAddr *int64 // /proc/sys/vm/mmap_min_addr
NrHugepages *int64 // /proc/sys/vm/nr_hugepages
NrHugepagesMempolicy *int64 // /proc/sys/vm/nr_hugepages_mempolicy
NrOvercommitHugepages *int64 // /proc/sys/vm/nr_overcommit_hugepages
NumaStat *int64 // /proc/sys/vm/numa_stat
NumaZonelistOrder string // /proc/sys/vm/numa_zonelist_order
OomDumpTasks *int64 // /proc/sys/vm/oom_dump_tasks
OomKillAllocatingTask *int64 // /proc/sys/vm/oom_kill_allocating_task
OvercommitKbytes *int64 // /proc/sys/vm/overcommit_kbytes
OvercommitMemory *int64 // /proc/sys/vm/overcommit_memory
OvercommitRatio *int64 // /proc/sys/vm/overcommit_ratio
PageCluster *int64 // /proc/sys/vm/page-cluster
PanicOnOom *int64 // /proc/sys/vm/panic_on_oom
PercpuPagelistFraction *int64 // /proc/sys/vm/percpu_pagelist_fraction
StatInterval *int64 // /proc/sys/vm/stat_interval
Swappiness *int64 // /proc/sys/vm/swappiness
UserReserveKbytes *int64 // /proc/sys/vm/user_reserve_kbytes
VfsCachePressure *int64 // /proc/sys/vm/vfs_cache_pressure
WatermarkBoostFactor *int64 // /proc/sys/vm/watermark_boost_factor
WatermarkScaleFactor *int64 // /proc/sys/vm/watermark_scale_factor
ZoneReclaimMode *int64 // /proc/sys/vm/zone_reclaim_mode
}
// VM reads the VM statistics from the specified `proc` filesystem.
func (fs FS) VM() (*VM, error) {
path := fs.proc.Path("sys/vm")
file, err := os.Stat(path)
if err != nil {
return nil, err
}
if !file.Mode().IsDir() {
return nil, fmt.Errorf("%s is not a directory", path)
}
files, err := ioutil.ReadDir(path)
if err != nil {
return nil, err
}
var vm VM
for _, f := range files {
if f.IsDir() {
continue
}
name := filepath.Join(path, f.Name())
// ignore errors on read, as there are some write only
// in /proc/sys/vm
value, err := util.SysReadFile(name)
if err != nil {
continue
}
vp := util.NewValueParser(value)
switch f.Name() {
case "admin_reserve_kbytes":
vm.AdminReserveKbytes = vp.PInt64()
case "block_dump":
vm.BlockDump = vp.PInt64()
case "compact_unevictable_allowed":
vm.CompactUnevictableAllowed = vp.PInt64()
case "dirty_background_bytes":
vm.DirtyBackgroundBytes = vp.PInt64()
case "dirty_background_ratio":
vm.DirtyBackgroundRatio = vp.PInt64()
case "dirty_bytes":
vm.DirtyBytes = vp.PInt64()
case "dirty_expire_centisecs":
vm.DirtyExpireCentisecs = vp.PInt64()
case "dirty_ratio":
vm.DirtyRatio = vp.PInt64()
case "dirtytime_expire_seconds":
vm.DirtytimeExpireSeconds = vp.PInt64()
case "dirty_writeback_centisecs":
vm.DirtyWritebackCentisecs = vp.PInt64()
case "drop_caches":
vm.DropCaches = vp.PInt64()
case "extfrag_threshold":
vm.ExtfragThreshold = vp.PInt64()
case "hugetlb_shm_group":
vm.HugetlbShmGroup = vp.PInt64()
case "laptop_mode":
vm.LaptopMode = vp.PInt64()
case "legacy_va_layout":
vm.LegacyVaLayout = vp.PInt64()
case "lowmem_reserve_ratio":
stringSlice := strings.Fields(value)
pint64Slice := make([]*int64, 0, len(stringSlice))
for _, value := range stringSlice {
vp := util.NewValueParser(value)
pint64Slice = append(pint64Slice, vp.PInt64())
}
vm.LowmemReserveRatio = pint64Slice
case "max_map_count":
vm.MaxMapCount = vp.PInt64()
case "memory_failure_early_kill":
vm.MemoryFailureEarlyKill = vp.PInt64()
case "memory_failure_recovery":
vm.MemoryFailureRecovery = vp.PInt64()
case "min_free_kbytes":
vm.MinFreeKbytes = vp.PInt64()
case "min_slab_ratio":
vm.MinSlabRatio = vp.PInt64()
case "min_unmapped_ratio":
vm.MinUnmappedRatio = vp.PInt64()
case "mmap_min_addr":
vm.MmapMinAddr = vp.PInt64()
case "nr_hugepages":
vm.NrHugepages = vp.PInt64()
case "nr_hugepages_mempolicy":
vm.NrHugepagesMempolicy = vp.PInt64()
case "nr_overcommit_hugepages":
vm.NrOvercommitHugepages = vp.PInt64()
case "numa_stat":
vm.NumaStat = vp.PInt64()
case "numa_zonelist_order":
vm.NumaZonelistOrder = value
case "oom_dump_tasks":
vm.OomDumpTasks = vp.PInt64()
case "oom_kill_allocating_task":
vm.OomKillAllocatingTask = vp.PInt64()
case "overcommit_kbytes":
vm.OvercommitKbytes = vp.PInt64()
case "overcommit_memory":
vm.OvercommitMemory = vp.PInt64()
case "overcommit_ratio":
vm.OvercommitRatio = vp.PInt64()
case "page-cluster":
vm.PageCluster = vp.PInt64()
case "panic_on_oom":
vm.PanicOnOom = vp.PInt64()
case "percpu_pagelist_fraction":
vm.PercpuPagelistFraction = vp.PInt64()
case "stat_interval":
vm.StatInterval = vp.PInt64()
case "swappiness":
vm.Swappiness = vp.PInt64()
case "user_reserve_kbytes":
vm.UserReserveKbytes = vp.PInt64()
case "vfs_cache_pressure":
vm.VfsCachePressure = vp.PInt64()
case "watermark_boost_factor":
vm.WatermarkBoostFactor = vp.PInt64()
case "watermark_scale_factor":
vm.WatermarkScaleFactor = vp.PInt64()
case "zone_reclaim_mode":
vm.ZoneReclaimMode = vp.PInt64()
}
if err := vp.Err(); err != nil {
return nil, err
}
}
return &vm, nil
}

Some files were not shown because too many files have changed in this diff Show More