mirror of
https://github.com/status-im/status-go.git
synced 2025-02-19 18:28:18 +00:00
Upgrade go-ens
Go ens needs to be updated to be compatible with the lastest geth version
This commit is contained in:
parent
188eabef0b
commit
2f539d3bd2
@ -1,13 +0,0 @@
|
||||
module github.com/status-im/status-go/extkeys
|
||||
|
||||
go 1.13
|
||||
|
||||
replace github.com/ethereum/go-ethereum v1.9.5 => github.com/status-im/go-ethereum v1.9.5-status.7
|
||||
|
||||
require (
|
||||
github.com/btcsuite/btcd v0.20.1-beta
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d
|
||||
github.com/ethereum/go-ethereum v1.9.5
|
||||
golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c
|
||||
golang.org/x/text v0.3.2
|
||||
)
|
102
extkeys/go.sum
102
extkeys/go.sum
@ -1,102 +0,0 @@
|
||||
bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
|
||||
github.com/Azure/azure-pipeline-go v0.0.0-20180607212504-7571e8eb0876/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg=
|
||||
github.com/Azure/azure-storage-blob-go v0.0.0-20180712005634-eaae161d9d5e/go.mod h1:x2mtS6O3mnMEZOJp7d7oldh8IvatBrMfReiyQ+cKgKY=
|
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/allegro/bigcache v0.0.0-20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||
github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d h1:yJzD/yFppdVCf6ApMkVy8cUxV0XrxdP9rVf6D87/Mng=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
|
||||
github.com/dgrijalva/jwt-go v0.0.0-20170201225849-2268707a8f08/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/docker/docker v0.0.0-20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/elastic/gosigar v0.0.0-20180330100440-37f05ff46ffa/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs=
|
||||
github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/gizak/termui v0.0.0-20170117222342-991cd3d38091/go.mod h1:PkJoWUt/zacQKysNfQtcw1RW+eK2SxkieVBtl+4ovLA=
|
||||
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
|
||||
github.com/go-stack/stack v1.5.4/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/hashicorp/golang-lru v0.0.0-20160813221303-0a025b7e63ad/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v0.0.0-20161224104101-679507af18f3/go.mod h1:MZ2ZmwcBpvOoJ22IJsc7va19ZwoheaBk43rKg12SKag=
|
||||
github.com/influxdata/influxdb v0.0.0-20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY=
|
||||
github.com/jackpal/go-nat-pmp v0.0.0-20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/julienschmidt/httprouter v0.0.0-20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/karalabe/hid v0.0.0-20181128192157-d815e0c1a2e2/go.mod h1:YvbcH+3Wo6XPs9nkgTY3u19KXLauXW+J5nB7hEHuX0A=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/maruel/panicparse v0.0.0-20160720141634-ad661195ed0e/go.mod h1:nty42YY5QByNC5MM7q/nj938VbgPU7avs45z6NClpxI=
|
||||
github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-isatty v0.0.0-20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
|
||||
github.com/naoina/toml v0.0.0-20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
|
||||
github.com/nsf/termbox-go v0.0.0-20170211012700-3540b76b9c77/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ=
|
||||
github.com/olekukonko/tablewriter v0.0.0-20170128050532-febf2d34b54a/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/opentracing/opentracing-go v0.0.0-20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34=
|
||||
github.com/peterh/liner v0.0.0-20170902204657-a37ad3984311/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
|
||||
github.com/pkg/errors v0.0.0-20171216070316-e881fd58d78e/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/prometheus v0.0.0-20170814170113-3101606756c5/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s=
|
||||
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
|
||||
github.com/robertkrimen/otto v0.0.0-20170205013659-6a77b7cbc37d/go.mod h1:xvqspoSXJTIpemEonrMDFq6XzwHYYgToXWj5eRX1OtY=
|
||||
github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ=
|
||||
github.com/status-im/go-ethereum v1.9.5-status.7 h1:DKH1GiF52LwaZaw6YDBliFEgm/JDsbIT+hn7ph6X94Q=
|
||||
github.com/status-im/go-ethereum v1.9.5-status.7/go.mod h1:YyH5DKB6+z+Vaya7eIm67pnuPZ1oiUMbbsZW41ktN0g=
|
||||
github.com/status-im/status-go/extkeys v1.0.0/go.mod h1:GdqJbrcpkNm5ZsSCpp+PdMxnXx+OcRBdm3PI0rs1FpU=
|
||||
github.com/stretchr/testify v0.0.0-20170809224252-890a5c3458b4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/syndtr/goleveldb v0.0.0-20181128100959-b001fa50d6b2/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
|
||||
github.com/uber/jaeger-client-go v0.0.0-20180607151842-f7e0d4744fa6/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
||||
github.com/uber/jaeger-lib v0.0.0-20180615202729-a51202d6f4a7/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191119213627-4f8c1d86b1ba h1:9bFeDpN3gTqNanMVqNcoR/pJQuP5uroC3t1D7eXozTE=
|
||||
golang.org/x/crypto v0.0.0-20191119213627-4f8c1d86b1ba/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c h1:/nJuwDLoL/zrqY6gf57vxC+Pi+pZ8bfhpPkicO5H7W4=
|
||||
golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405 h1:829vOVxxusYHC+IqBtkX5mbKtsY9fheQiQn0MZRVLfQ=
|
||||
gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
|
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20180302121509-abf0ba0be5d5/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
|
||||
gopkg.in/sourcemap.v1 v1.0.5/go.mod h1:2RlvNNSMglmRrcvhfuzp4hQHwOtjxlbjX7UPY/GXb78=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
32
go.mod
32
go.mod
@ -2,7 +2,7 @@ module github.com/status-im/status-go
|
||||
|
||||
go 1.13
|
||||
|
||||
replace github.com/ethereum/go-ethereum v1.9.5 => github.com/status-im/go-ethereum v1.9.5-status.12
|
||||
replace github.com/ethereum/go-ethereum => /home/cammellos/.go/src/github.com/status-im/go-ethereum
|
||||
|
||||
replace github.com/Sirupsen/logrus v1.4.2 => github.com/sirupsen/logrus v1.4.2
|
||||
|
||||
@ -13,21 +13,20 @@ replace github.com/nfnt/resize => github.com/status-im/resize v0.0.0-20201215164
|
||||
require (
|
||||
github.com/PuerkitoBio/goquery v1.6.0 // indirect
|
||||
github.com/beevik/ntp v0.2.0
|
||||
github.com/btcsuite/btcd v0.22.0-beta
|
||||
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce
|
||||
github.com/cenkalti/backoff/v3 v3.2.2
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/deckarep/golang-set v1.7.1
|
||||
github.com/ethereum/go-ethereum v1.9.5
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect
|
||||
github.com/ethereum/go-ethereum v1.10.4
|
||||
github.com/go-playground/universal-translator v0.17.0 // indirect
|
||||
github.com/golang-migrate/migrate/v4 v4.8.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 // indirect
|
||||
github.com/golang/mock v1.4.1
|
||||
github.com/golang/protobuf v1.4.1
|
||||
github.com/google/uuid v1.1.1
|
||||
github.com/golang/protobuf v1.5.2
|
||||
github.com/google/uuid v1.2.0
|
||||
github.com/gorilla/mux v1.7.3 // indirect
|
||||
github.com/ipfs/go-log v1.0.4
|
||||
github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a
|
||||
github.com/karalabe/usb v0.0.0-20191104083709-911d15fe12a9 // indirect
|
||||
github.com/keighl/metabolize v0.0.0-20150915210303-97ab655d4034
|
||||
github.com/kilic/bls12-381 v0.0.0-20200607163746-32e1441c8a9f
|
||||
github.com/leodido/go-urn v1.2.0 // indirect
|
||||
@ -37,14 +36,12 @@ require (
|
||||
github.com/mat/besticon v3.12.0+incompatible
|
||||
github.com/mattn/go-colorable v0.1.4 // indirect
|
||||
github.com/mattn/go-isatty v0.0.10 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.6 // indirect
|
||||
github.com/multiformats/go-multiaddr v0.3.1
|
||||
github.com/multiformats/go-multiaddr v0.3.2
|
||||
github.com/multiformats/go-multibase v0.0.3
|
||||
github.com/multiformats/go-varint v0.0.6
|
||||
github.com/mutecomm/go-sqlcipher v0.0.0-20190227152316-55dbde17881f
|
||||
github.com/nfnt/resize v0.0.0-00010101000000-000000000000
|
||||
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd
|
||||
github.com/olekukonko/tablewriter v0.0.2 // indirect
|
||||
github.com/oliamb/cutter v0.2.2
|
||||
github.com/pborman/uuid v1.2.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
@ -55,25 +52,20 @@ require (
|
||||
github.com/status-im/doubleratchet v3.0.0+incompatible
|
||||
github.com/status-im/go-waku v0.0.0-20210624095504-4133155590da
|
||||
github.com/status-im/go-wakurelay-pubsub v0.4.2
|
||||
github.com/status-im/keycard-go v0.0.0-20200107115650-f38e9a19958e // indirect
|
||||
github.com/status-im/markdown v0.0.0-20201022101546-c0cbdd5763bf
|
||||
github.com/status-im/migrate/v4 v4.6.2-status.2
|
||||
github.com/status-im/rendezvous v1.3.2
|
||||
github.com/status-im/status-go/extkeys v1.1.2
|
||||
github.com/status-im/tcp-shaker v0.0.0-20191114194237-215893130501
|
||||
github.com/stretchr/testify v1.6.1
|
||||
github.com/syndtr/goleveldb v1.0.0
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954
|
||||
github.com/tsenart/tb v0.0.0-20181025101425-0d2499c8b6e9
|
||||
github.com/vacp2p/mvds v0.0.24-0.20201124060106-26d8e94130d8
|
||||
github.com/wealdtech/go-ens/v3 v3.3.0
|
||||
github.com/wealdtech/go-ens/v3 v3.5.0
|
||||
github.com/xeipuuv/gojsonschema v1.2.0
|
||||
go.uber.org/zap v1.15.0
|
||||
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e
|
||||
golang.org/x/image v0.0.0-20200927104501-e162460cd6b5
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/protobuf v1.26.0-rc.1 // indirect
|
||||
golang.org/x/text v0.3.6
|
||||
gopkg.in/go-playground/assert.v1 v1.2.1 // indirect
|
||||
gopkg.in/go-playground/validator.v9 v9.31.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
||||
|
20
vendor/github.com/StackExchange/wmi/LICENSE
generated
vendored
Normal file
20
vendor/github.com/StackExchange/wmi/LICENSE
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Stack Exchange
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
6
vendor/github.com/StackExchange/wmi/README.md
generated
vendored
Normal file
6
vendor/github.com/StackExchange/wmi/README.md
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
wmi
|
||||
===
|
||||
|
||||
Package wmi provides a WQL interface to Windows WMI.
|
||||
|
||||
Note: It interfaces with WMI on the local machine, therefore it only runs on Windows.
|
260
vendor/github.com/StackExchange/wmi/swbemservices.go
generated
vendored
Normal file
260
vendor/github.com/StackExchange/wmi/swbemservices.go
generated
vendored
Normal file
@ -0,0 +1,260 @@
|
||||
// +build windows
|
||||
|
||||
package wmi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/go-ole/go-ole"
|
||||
"github.com/go-ole/go-ole/oleutil"
|
||||
)
|
||||
|
||||
// SWbemServices is used to access wmi. See https://msdn.microsoft.com/en-us/library/aa393719(v=vs.85).aspx
|
||||
type SWbemServices struct {
|
||||
//TODO: track namespace. Not sure if we can re connect to a different namespace using the same instance
|
||||
cWMIClient *Client //This could also be an embedded struct, but then we would need to branch on Client vs SWbemServices in the Query method
|
||||
sWbemLocatorIUnknown *ole.IUnknown
|
||||
sWbemLocatorIDispatch *ole.IDispatch
|
||||
queries chan *queryRequest
|
||||
closeError chan error
|
||||
lQueryorClose sync.Mutex
|
||||
}
|
||||
|
||||
type queryRequest struct {
|
||||
query string
|
||||
dst interface{}
|
||||
args []interface{}
|
||||
finished chan error
|
||||
}
|
||||
|
||||
// InitializeSWbemServices will return a new SWbemServices object that can be used to query WMI
|
||||
func InitializeSWbemServices(c *Client, connectServerArgs ...interface{}) (*SWbemServices, error) {
|
||||
//fmt.Println("InitializeSWbemServices: Starting")
|
||||
//TODO: implement connectServerArgs as optional argument for init with connectServer call
|
||||
s := new(SWbemServices)
|
||||
s.cWMIClient = c
|
||||
s.queries = make(chan *queryRequest)
|
||||
initError := make(chan error)
|
||||
go s.process(initError)
|
||||
|
||||
err, ok := <-initError
|
||||
if ok {
|
||||
return nil, err //Send error to caller
|
||||
}
|
||||
//fmt.Println("InitializeSWbemServices: Finished")
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Close will clear and release all of the SWbemServices resources
|
||||
func (s *SWbemServices) Close() error {
|
||||
s.lQueryorClose.Lock()
|
||||
if s == nil || s.sWbemLocatorIDispatch == nil {
|
||||
s.lQueryorClose.Unlock()
|
||||
return fmt.Errorf("SWbemServices is not Initialized")
|
||||
}
|
||||
if s.queries == nil {
|
||||
s.lQueryorClose.Unlock()
|
||||
return fmt.Errorf("SWbemServices has been closed")
|
||||
}
|
||||
//fmt.Println("Close: sending close request")
|
||||
var result error
|
||||
ce := make(chan error)
|
||||
s.closeError = ce //Race condition if multiple callers to close. May need to lock here
|
||||
close(s.queries) //Tell background to shut things down
|
||||
s.lQueryorClose.Unlock()
|
||||
err, ok := <-ce
|
||||
if ok {
|
||||
result = err
|
||||
}
|
||||
//fmt.Println("Close: finished")
|
||||
return result
|
||||
}
|
||||
|
||||
func (s *SWbemServices) process(initError chan error) {
|
||||
//fmt.Println("process: starting background thread initialization")
|
||||
//All OLE/WMI calls must happen on the same initialized thead, so lock this goroutine
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED)
|
||||
if err != nil {
|
||||
oleCode := err.(*ole.OleError).Code()
|
||||
if oleCode != ole.S_OK && oleCode != S_FALSE {
|
||||
initError <- fmt.Errorf("ole.CoInitializeEx error: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
defer ole.CoUninitialize()
|
||||
|
||||
unknown, err := oleutil.CreateObject("WbemScripting.SWbemLocator")
|
||||
if err != nil {
|
||||
initError <- fmt.Errorf("CreateObject SWbemLocator error: %v", err)
|
||||
return
|
||||
} else if unknown == nil {
|
||||
initError <- ErrNilCreateObject
|
||||
return
|
||||
}
|
||||
defer unknown.Release()
|
||||
s.sWbemLocatorIUnknown = unknown
|
||||
|
||||
dispatch, err := s.sWbemLocatorIUnknown.QueryInterface(ole.IID_IDispatch)
|
||||
if err != nil {
|
||||
initError <- fmt.Errorf("SWbemLocator QueryInterface error: %v", err)
|
||||
return
|
||||
}
|
||||
defer dispatch.Release()
|
||||
s.sWbemLocatorIDispatch = dispatch
|
||||
|
||||
// we can't do the ConnectServer call outside the loop unless we find a way to track and re-init the connectServerArgs
|
||||
//fmt.Println("process: initialized. closing initError")
|
||||
close(initError)
|
||||
//fmt.Println("process: waiting for queries")
|
||||
for q := range s.queries {
|
||||
//fmt.Printf("process: new query: len(query)=%d\n", len(q.query))
|
||||
errQuery := s.queryBackground(q)
|
||||
//fmt.Println("process: s.queryBackground finished")
|
||||
if errQuery != nil {
|
||||
q.finished <- errQuery
|
||||
}
|
||||
close(q.finished)
|
||||
}
|
||||
//fmt.Println("process: queries channel closed")
|
||||
s.queries = nil //set channel to nil so we know it is closed
|
||||
//TODO: I think the Release/Clear calls can panic if things are in a bad state.
|
||||
//TODO: May need to recover from panics and send error to method caller instead.
|
||||
close(s.closeError)
|
||||
}
|
||||
|
||||
// Query runs the WQL query using a SWbemServices instance and appends the values to dst.
|
||||
//
|
||||
// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in
|
||||
// the query must have the same name in dst. Supported types are all signed and
|
||||
// unsigned integers, time.Time, string, bool, or a pointer to one of those.
|
||||
// Array types are not supported.
|
||||
//
|
||||
// By default, the local machine and default namespace are used. These can be
|
||||
// changed using connectServerArgs. See
|
||||
// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details.
|
||||
func (s *SWbemServices) Query(query string, dst interface{}, connectServerArgs ...interface{}) error {
|
||||
s.lQueryorClose.Lock()
|
||||
if s == nil || s.sWbemLocatorIDispatch == nil {
|
||||
s.lQueryorClose.Unlock()
|
||||
return fmt.Errorf("SWbemServices is not Initialized")
|
||||
}
|
||||
if s.queries == nil {
|
||||
s.lQueryorClose.Unlock()
|
||||
return fmt.Errorf("SWbemServices has been closed")
|
||||
}
|
||||
|
||||
//fmt.Println("Query: Sending query request")
|
||||
qr := queryRequest{
|
||||
query: query,
|
||||
dst: dst,
|
||||
args: connectServerArgs,
|
||||
finished: make(chan error),
|
||||
}
|
||||
s.queries <- &qr
|
||||
s.lQueryorClose.Unlock()
|
||||
err, ok := <-qr.finished
|
||||
if ok {
|
||||
//fmt.Println("Query: Finished with error")
|
||||
return err //Send error to caller
|
||||
}
|
||||
//fmt.Println("Query: Finished")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SWbemServices) queryBackground(q *queryRequest) error {
|
||||
if s == nil || s.sWbemLocatorIDispatch == nil {
|
||||
return fmt.Errorf("SWbemServices is not Initialized")
|
||||
}
|
||||
wmi := s.sWbemLocatorIDispatch //Should just rename in the code, but this will help as we break things apart
|
||||
//fmt.Println("queryBackground: Starting")
|
||||
|
||||
dv := reflect.ValueOf(q.dst)
|
||||
if dv.Kind() != reflect.Ptr || dv.IsNil() {
|
||||
return ErrInvalidEntityType
|
||||
}
|
||||
dv = dv.Elem()
|
||||
mat, elemType := checkMultiArg(dv)
|
||||
if mat == multiArgTypeInvalid {
|
||||
return ErrInvalidEntityType
|
||||
}
|
||||
|
||||
// service is a SWbemServices
|
||||
serviceRaw, err := oleutil.CallMethod(wmi, "ConnectServer", q.args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
service := serviceRaw.ToIDispatch()
|
||||
defer serviceRaw.Clear()
|
||||
|
||||
// result is a SWBemObjectSet
|
||||
resultRaw, err := oleutil.CallMethod(service, "ExecQuery", q.query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result := resultRaw.ToIDispatch()
|
||||
defer resultRaw.Clear()
|
||||
|
||||
count, err := oleInt64(result, "Count")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enumProperty, err := result.GetProperty("_NewEnum")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer enumProperty.Clear()
|
||||
|
||||
enum, err := enumProperty.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if enum == nil {
|
||||
return fmt.Errorf("can't get IEnumVARIANT, enum is nil")
|
||||
}
|
||||
defer enum.Release()
|
||||
|
||||
// Initialize a slice with Count capacity
|
||||
dv.Set(reflect.MakeSlice(dv.Type(), 0, int(count)))
|
||||
|
||||
var errFieldMismatch error
|
||||
for itemRaw, length, err := enum.Next(1); length > 0; itemRaw, length, err = enum.Next(1) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := func() error {
|
||||
// item is a SWbemObject, but really a Win32_Process
|
||||
item := itemRaw.ToIDispatch()
|
||||
defer item.Release()
|
||||
|
||||
ev := reflect.New(elemType)
|
||||
if err = s.cWMIClient.loadEntity(ev.Interface(), item); err != nil {
|
||||
if _, ok := err.(*ErrFieldMismatch); ok {
|
||||
// We continue loading entities even in the face of field mismatch errors.
|
||||
// If we encounter any other error, that other error is returned. Otherwise,
|
||||
// an ErrFieldMismatch is returned.
|
||||
errFieldMismatch = err
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if mat != multiArgTypeStructPtr {
|
||||
ev = ev.Elem()
|
||||
}
|
||||
dv.Set(reflect.Append(dv, ev))
|
||||
return nil
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
//fmt.Println("queryBackground: Finished")
|
||||
return errFieldMismatch
|
||||
}
|
587
vendor/github.com/StackExchange/wmi/wmi.go
generated
vendored
Normal file
587
vendor/github.com/StackExchange/wmi/wmi.go
generated
vendored
Normal file
@ -0,0 +1,587 @@
|
||||
// +build windows
|
||||
|
||||
/*
|
||||
Package wmi provides a WQL interface for WMI on Windows.
|
||||
|
||||
Example code to print names of running processes:
|
||||
|
||||
type Win32_Process struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
func main() {
|
||||
var dst []Win32_Process
|
||||
q := wmi.CreateQuery(&dst, "")
|
||||
err := wmi.Query(q, &dst)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
for i, v := range dst {
|
||||
println(i, v.Name)
|
||||
}
|
||||
}
|
||||
|
||||
*/
|
||||
package wmi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-ole/go-ole"
|
||||
"github.com/go-ole/go-ole/oleutil"
|
||||
)
|
||||
|
||||
var l = log.New(os.Stdout, "", log.LstdFlags)
|
||||
|
||||
var (
|
||||
ErrInvalidEntityType = errors.New("wmi: invalid entity type")
|
||||
// ErrNilCreateObject is the error returned if CreateObject returns nil even
|
||||
// if the error was nil.
|
||||
ErrNilCreateObject = errors.New("wmi: create object returned nil")
|
||||
lock sync.Mutex
|
||||
)
|
||||
|
||||
// S_FALSE is returned by CoInitializeEx if it was already called on this thread.
|
||||
const S_FALSE = 0x00000001
|
||||
|
||||
// QueryNamespace invokes Query with the given namespace on the local machine.
|
||||
func QueryNamespace(query string, dst interface{}, namespace string) error {
|
||||
return Query(query, dst, nil, namespace)
|
||||
}
|
||||
|
||||
// Query runs the WQL query and appends the values to dst.
|
||||
//
|
||||
// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in
|
||||
// the query must have the same name in dst. Supported types are all signed and
|
||||
// unsigned integers, time.Time, string, bool, or a pointer to one of those.
|
||||
// Array types are not supported.
|
||||
//
|
||||
// By default, the local machine and default namespace are used. These can be
|
||||
// changed using connectServerArgs. See
|
||||
// https://docs.microsoft.com/en-us/windows/desktop/WmiSdk/swbemlocator-connectserver
|
||||
// for details.
|
||||
//
|
||||
// Query is a wrapper around DefaultClient.Query.
|
||||
func Query(query string, dst interface{}, connectServerArgs ...interface{}) error {
|
||||
if DefaultClient.SWbemServicesClient == nil {
|
||||
return DefaultClient.Query(query, dst, connectServerArgs...)
|
||||
}
|
||||
return DefaultClient.SWbemServicesClient.Query(query, dst, connectServerArgs...)
|
||||
}
|
||||
|
||||
// CallMethod calls a method named methodName on an instance of the class named
|
||||
// className, with the given params.
|
||||
//
|
||||
// CallMethod is a wrapper around DefaultClient.CallMethod.
|
||||
func CallMethod(connectServerArgs []interface{}, className, methodName string, params []interface{}) (int32, error) {
|
||||
return DefaultClient.CallMethod(connectServerArgs, className, methodName, params)
|
||||
}
|
||||
|
||||
// A Client is an WMI query client.
|
||||
//
|
||||
// Its zero value (DefaultClient) is a usable client.
|
||||
type Client struct {
|
||||
// NonePtrZero specifies if nil values for fields which aren't pointers
|
||||
// should be returned as the field types zero value.
|
||||
//
|
||||
// Setting this to true allows stucts without pointer fields to be used
|
||||
// without the risk failure should a nil value returned from WMI.
|
||||
NonePtrZero bool
|
||||
|
||||
// PtrNil specifies if nil values for pointer fields should be returned
|
||||
// as nil.
|
||||
//
|
||||
// Setting this to true will set pointer fields to nil where WMI
|
||||
// returned nil, otherwise the types zero value will be returned.
|
||||
PtrNil bool
|
||||
|
||||
// AllowMissingFields specifies that struct fields not present in the
|
||||
// query result should not result in an error.
|
||||
//
|
||||
// Setting this to true allows custom queries to be used with full
|
||||
// struct definitions instead of having to define multiple structs.
|
||||
AllowMissingFields bool
|
||||
|
||||
// SWbemServiceClient is an optional SWbemServices object that can be
|
||||
// initialized and then reused across multiple queries. If it is null
|
||||
// then the method will initialize a new temporary client each time.
|
||||
SWbemServicesClient *SWbemServices
|
||||
}
|
||||
|
||||
// DefaultClient is the default Client and is used by Query, QueryNamespace, and CallMethod.
|
||||
var DefaultClient = &Client{}
|
||||
|
||||
// coinitService coinitializes WMI service. If no error is returned, a cleanup function
|
||||
// is returned which must be executed (usually deferred) to clean up allocated resources.
|
||||
func (c *Client) coinitService(connectServerArgs ...interface{}) (*ole.IDispatch, func(), error) {
|
||||
var unknown *ole.IUnknown
|
||||
var wmi *ole.IDispatch
|
||||
var serviceRaw *ole.VARIANT
|
||||
|
||||
// be sure teardown happens in the reverse
|
||||
// order from that which they were created
|
||||
deferFn := func() {
|
||||
if serviceRaw != nil {
|
||||
serviceRaw.Clear()
|
||||
}
|
||||
if wmi != nil {
|
||||
wmi.Release()
|
||||
}
|
||||
if unknown != nil {
|
||||
unknown.Release()
|
||||
}
|
||||
ole.CoUninitialize()
|
||||
}
|
||||
|
||||
// if we error'ed here, clean up immediately
|
||||
var err error
|
||||
defer func() {
|
||||
if err != nil {
|
||||
deferFn()
|
||||
}
|
||||
}()
|
||||
|
||||
err = ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED)
|
||||
if err != nil {
|
||||
oleCode := err.(*ole.OleError).Code()
|
||||
if oleCode != ole.S_OK && oleCode != S_FALSE {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
unknown, err = oleutil.CreateObject("WbemScripting.SWbemLocator")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
} else if unknown == nil {
|
||||
return nil, nil, ErrNilCreateObject
|
||||
}
|
||||
|
||||
wmi, err = unknown.QueryInterface(ole.IID_IDispatch)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// service is a SWbemServices
|
||||
serviceRaw, err = oleutil.CallMethod(wmi, "ConnectServer", connectServerArgs...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return serviceRaw.ToIDispatch(), deferFn, nil
|
||||
}
|
||||
|
||||
// CallMethod calls a WMI method named methodName on an instance
|
||||
// of the class named className. It passes in the arguments given
|
||||
// in params. Use connectServerArgs to customize the machine and
|
||||
// namespace; by default, the local machine and default namespace
|
||||
// are used. See
|
||||
// https://docs.microsoft.com/en-us/windows/desktop/WmiSdk/swbemlocator-connectserver
|
||||
// for details.
|
||||
func (c *Client) CallMethod(connectServerArgs []interface{}, className, methodName string, params []interface{}) (int32, error) {
|
||||
service, cleanup, err := c.coinitService(connectServerArgs...)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("coinit: %v", err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
// Get class
|
||||
classRaw, err := oleutil.CallMethod(service, "Get", className)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("CallMethod Get class %s: %v", className, err)
|
||||
}
|
||||
class := classRaw.ToIDispatch()
|
||||
defer classRaw.Clear()
|
||||
|
||||
// Run method
|
||||
resultRaw, err := oleutil.CallMethod(class, methodName, params...)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("CallMethod %s.%s: %v", className, methodName, err)
|
||||
}
|
||||
resultInt, ok := resultRaw.Value().(int32)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("return value was not an int32: %v (%T)", resultRaw, resultRaw)
|
||||
}
|
||||
|
||||
return resultInt, nil
|
||||
}
|
||||
|
||||
// Query runs the WQL query and appends the values to dst.
|
||||
//
|
||||
// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in
|
||||
// the query must have the same name in dst. Supported types are all signed and
|
||||
// unsigned integers, time.Time, string, bool, or a pointer to one of those.
|
||||
// Array types are not supported.
|
||||
//
|
||||
// By default, the local machine and default namespace are used. These can be
|
||||
// changed using connectServerArgs. See
|
||||
// https://docs.microsoft.com/en-us/windows/desktop/WmiSdk/swbemlocator-connectserver
|
||||
// for details.
|
||||
func (c *Client) Query(query string, dst interface{}, connectServerArgs ...interface{}) error {
|
||||
dv := reflect.ValueOf(dst)
|
||||
if dv.Kind() != reflect.Ptr || dv.IsNil() {
|
||||
return ErrInvalidEntityType
|
||||
}
|
||||
dv = dv.Elem()
|
||||
mat, elemType := checkMultiArg(dv)
|
||||
if mat == multiArgTypeInvalid {
|
||||
return ErrInvalidEntityType
|
||||
}
|
||||
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
service, cleanup, err := c.coinitService(connectServerArgs...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
// result is a SWBemObjectSet
|
||||
resultRaw, err := oleutil.CallMethod(service, "ExecQuery", query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result := resultRaw.ToIDispatch()
|
||||
defer resultRaw.Clear()
|
||||
|
||||
count, err := oleInt64(result, "Count")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enumProperty, err := result.GetProperty("_NewEnum")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer enumProperty.Clear()
|
||||
|
||||
enum, err := enumProperty.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if enum == nil {
|
||||
return fmt.Errorf("can't get IEnumVARIANT, enum is nil")
|
||||
}
|
||||
defer enum.Release()
|
||||
|
||||
// Initialize a slice with Count capacity
|
||||
dv.Set(reflect.MakeSlice(dv.Type(), 0, int(count)))
|
||||
|
||||
var errFieldMismatch error
|
||||
for itemRaw, length, err := enum.Next(1); length > 0; itemRaw, length, err = enum.Next(1) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := func() error {
|
||||
// item is a SWbemObject, but really a Win32_Process
|
||||
item := itemRaw.ToIDispatch()
|
||||
defer item.Release()
|
||||
|
||||
ev := reflect.New(elemType)
|
||||
if err = c.loadEntity(ev.Interface(), item); err != nil {
|
||||
if _, ok := err.(*ErrFieldMismatch); ok {
|
||||
// We continue loading entities even in the face of field mismatch errors.
|
||||
// If we encounter any other error, that other error is returned. Otherwise,
|
||||
// an ErrFieldMismatch is returned.
|
||||
errFieldMismatch = err
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if mat != multiArgTypeStructPtr {
|
||||
ev = ev.Elem()
|
||||
}
|
||||
dv.Set(reflect.Append(dv, ev))
|
||||
return nil
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return errFieldMismatch
|
||||
}
|
||||
|
||||
// ErrFieldMismatch is returned when a field is to be loaded into a different
|
||||
// type than the one it was stored from, or when a field is missing or
|
||||
// unexported in the destination struct.
|
||||
// StructType is the type of the struct pointed to by the destination argument.
|
||||
type ErrFieldMismatch struct {
|
||||
StructType reflect.Type
|
||||
FieldName string
|
||||
Reason string
|
||||
}
|
||||
|
||||
func (e *ErrFieldMismatch) Error() string {
|
||||
return fmt.Sprintf("wmi: cannot load field %q into a %q: %s",
|
||||
e.FieldName, e.StructType, e.Reason)
|
||||
}
|
||||
|
||||
var timeType = reflect.TypeOf(time.Time{})
|
||||
|
||||
// loadEntity loads a SWbemObject into a struct pointer.
|
||||
func (c *Client) loadEntity(dst interface{}, src *ole.IDispatch) (errFieldMismatch error) {
|
||||
v := reflect.ValueOf(dst).Elem()
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
f := v.Field(i)
|
||||
of := f
|
||||
isPtr := f.Kind() == reflect.Ptr
|
||||
if isPtr {
|
||||
ptr := reflect.New(f.Type().Elem())
|
||||
f.Set(ptr)
|
||||
f = f.Elem()
|
||||
}
|
||||
n := v.Type().Field(i).Name
|
||||
if !f.CanSet() {
|
||||
return &ErrFieldMismatch{
|
||||
StructType: of.Type(),
|
||||
FieldName: n,
|
||||
Reason: "CanSet() is false",
|
||||
}
|
||||
}
|
||||
prop, err := oleutil.GetProperty(src, n)
|
||||
if err != nil {
|
||||
if !c.AllowMissingFields {
|
||||
errFieldMismatch = &ErrFieldMismatch{
|
||||
StructType: of.Type(),
|
||||
FieldName: n,
|
||||
Reason: "no such struct field",
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
defer prop.Clear()
|
||||
|
||||
if prop.VT == 0x1 { //VT_NULL
|
||||
continue
|
||||
}
|
||||
|
||||
switch val := prop.Value().(type) {
|
||||
case int8, int16, int32, int64, int:
|
||||
v := reflect.ValueOf(val).Int()
|
||||
switch f.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
f.SetInt(v)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
f.SetUint(uint64(v))
|
||||
default:
|
||||
return &ErrFieldMismatch{
|
||||
StructType: of.Type(),
|
||||
FieldName: n,
|
||||
Reason: "not an integer class",
|
||||
}
|
||||
}
|
||||
case uint8, uint16, uint32, uint64:
|
||||
v := reflect.ValueOf(val).Uint()
|
||||
switch f.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
f.SetInt(int64(v))
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
f.SetUint(v)
|
||||
default:
|
||||
return &ErrFieldMismatch{
|
||||
StructType: of.Type(),
|
||||
FieldName: n,
|
||||
Reason: "not an integer class",
|
||||
}
|
||||
}
|
||||
case string:
|
||||
switch f.Kind() {
|
||||
case reflect.String:
|
||||
f.SetString(val)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
iv, err := strconv.ParseInt(val, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.SetInt(iv)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
uv, err := strconv.ParseUint(val, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.SetUint(uv)
|
||||
case reflect.Struct:
|
||||
switch f.Type() {
|
||||
case timeType:
|
||||
if len(val) == 25 {
|
||||
mins, err := strconv.Atoi(val[22:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
val = val[:22] + fmt.Sprintf("%02d%02d", mins/60, mins%60)
|
||||
}
|
||||
t, err := time.Parse("20060102150405.000000-0700", val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.Set(reflect.ValueOf(t))
|
||||
}
|
||||
}
|
||||
case bool:
|
||||
switch f.Kind() {
|
||||
case reflect.Bool:
|
||||
f.SetBool(val)
|
||||
default:
|
||||
return &ErrFieldMismatch{
|
||||
StructType: of.Type(),
|
||||
FieldName: n,
|
||||
Reason: "not a bool",
|
||||
}
|
||||
}
|
||||
case float32:
|
||||
switch f.Kind() {
|
||||
case reflect.Float32:
|
||||
f.SetFloat(float64(val))
|
||||
default:
|
||||
return &ErrFieldMismatch{
|
||||
StructType: of.Type(),
|
||||
FieldName: n,
|
||||
Reason: "not a Float32",
|
||||
}
|
||||
}
|
||||
default:
|
||||
if f.Kind() == reflect.Slice {
|
||||
switch f.Type().Elem().Kind() {
|
||||
case reflect.String:
|
||||
safeArray := prop.ToArray()
|
||||
if safeArray != nil {
|
||||
arr := safeArray.ToValueArray()
|
||||
fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr))
|
||||
for i, v := range arr {
|
||||
s := fArr.Index(i)
|
||||
s.SetString(v.(string))
|
||||
}
|
||||
f.Set(fArr)
|
||||
}
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
safeArray := prop.ToArray()
|
||||
if safeArray != nil {
|
||||
arr := safeArray.ToValueArray()
|
||||
fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr))
|
||||
for i, v := range arr {
|
||||
s := fArr.Index(i)
|
||||
s.SetUint(reflect.ValueOf(v).Uint())
|
||||
}
|
||||
f.Set(fArr)
|
||||
}
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
safeArray := prop.ToArray()
|
||||
if safeArray != nil {
|
||||
arr := safeArray.ToValueArray()
|
||||
fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr))
|
||||
for i, v := range arr {
|
||||
s := fArr.Index(i)
|
||||
s.SetInt(reflect.ValueOf(v).Int())
|
||||
}
|
||||
f.Set(fArr)
|
||||
}
|
||||
default:
|
||||
return &ErrFieldMismatch{
|
||||
StructType: of.Type(),
|
||||
FieldName: n,
|
||||
Reason: fmt.Sprintf("unsupported slice type (%T)", val),
|
||||
}
|
||||
}
|
||||
} else {
|
||||
typeof := reflect.TypeOf(val)
|
||||
if typeof == nil && (isPtr || c.NonePtrZero) {
|
||||
if (isPtr && c.PtrNil) || (!isPtr && c.NonePtrZero) {
|
||||
of.Set(reflect.Zero(of.Type()))
|
||||
}
|
||||
break
|
||||
}
|
||||
return &ErrFieldMismatch{
|
||||
StructType: of.Type(),
|
||||
FieldName: n,
|
||||
Reason: fmt.Sprintf("unsupported type (%T)", val),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return errFieldMismatch
|
||||
}
|
||||
|
||||
type multiArgType int
|
||||
|
||||
const (
|
||||
multiArgTypeInvalid multiArgType = iota
|
||||
multiArgTypeStruct
|
||||
multiArgTypeStructPtr
|
||||
)
|
||||
|
||||
// checkMultiArg checks that v has type []S, []*S for some struct type S.
|
||||
//
|
||||
// It returns what category the slice's elements are, and the reflect.Type
|
||||
// that represents S.
|
||||
func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) {
|
||||
if v.Kind() != reflect.Slice {
|
||||
return multiArgTypeInvalid, nil
|
||||
}
|
||||
elemType = v.Type().Elem()
|
||||
switch elemType.Kind() {
|
||||
case reflect.Struct:
|
||||
return multiArgTypeStruct, elemType
|
||||
case reflect.Ptr:
|
||||
elemType = elemType.Elem()
|
||||
if elemType.Kind() == reflect.Struct {
|
||||
return multiArgTypeStructPtr, elemType
|
||||
}
|
||||
}
|
||||
return multiArgTypeInvalid, nil
|
||||
}
|
||||
|
||||
func oleInt64(item *ole.IDispatch, prop string) (int64, error) {
|
||||
v, err := oleutil.GetProperty(item, prop)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer v.Clear()
|
||||
|
||||
i := int64(v.Val)
|
||||
return i, nil
|
||||
}
|
||||
|
||||
// CreateQuery returns a WQL query string that queries all columns of src. where
|
||||
// is an optional string that is appended to the query, to be used with WHERE
|
||||
// clauses. In such a case, the "WHERE" string should appear at the beginning.
|
||||
// The wmi class is obtained by the name of the type. You can pass a optional
|
||||
// class throught the variadic class parameter which is useful for anonymous
|
||||
// structs.
|
||||
func CreateQuery(src interface{}, where string, class ...string) string {
|
||||
var b bytes.Buffer
|
||||
b.WriteString("SELECT ")
|
||||
s := reflect.Indirect(reflect.ValueOf(src))
|
||||
t := s.Type()
|
||||
if s.Kind() == reflect.Slice {
|
||||
t = t.Elem()
|
||||
}
|
||||
if t.Kind() != reflect.Struct {
|
||||
return ""
|
||||
}
|
||||
var fields []string
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
fields = append(fields, t.Field(i).Name)
|
||||
}
|
||||
b.WriteString(strings.Join(fields, ", "))
|
||||
b.WriteString(" FROM ")
|
||||
if len(class) > 0{
|
||||
b.WriteString(class[0])
|
||||
} else {
|
||||
b.WriteString(t.Name())
|
||||
}
|
||||
b.WriteString(" " + where)
|
||||
return b.String()
|
||||
}
|
22
vendor/github.com/VictoriaMetrics/fastcache/LICENSE
generated
vendored
Normal file
22
vendor/github.com/VictoriaMetrics/fastcache/LICENSE
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2018 VictoriaMetrics
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
116
vendor/github.com/VictoriaMetrics/fastcache/README.md
generated
vendored
Normal file
116
vendor/github.com/VictoriaMetrics/fastcache/README.md
generated
vendored
Normal file
@ -0,0 +1,116 @@
|
||||
[](https://github.com/VictoriaMetrics/fastcache/actions)
|
||||
[](http://godoc.org/github.com/VictoriaMetrics/fastcache)
|
||||
[](https://goreportcard.com/report/github.com/VictoriaMetrics/fastcache)
|
||||
[](https://codecov.io/gh/VictoriaMetrics/fastcache)
|
||||
|
||||
# fastcache - fast thread-safe inmemory cache for big number of entries in Go
|
||||
|
||||
### Features
|
||||
|
||||
* Fast. Performance scales on multi-core CPUs. See benchmark results below.
|
||||
* Thread-safe. Concurrent goroutines may read and write into a single
|
||||
cache instance.
|
||||
* The fastcache is designed for storing big number of entries without
|
||||
[GC overhead](https://syslog.ravelin.com/further-dangers-of-large-heaps-in-go-7a267b57d487).
|
||||
* Fastcache automatically evicts old entries when reaching the maximum cache size
|
||||
set on its creation.
|
||||
* [Simple API](http://godoc.org/github.com/VictoriaMetrics/fastcache).
|
||||
* Simple source code.
|
||||
* Cache may be [saved to file](https://godoc.org/github.com/VictoriaMetrics/fastcache#Cache.SaveToFile)
|
||||
and [loaded from file](https://godoc.org/github.com/VictoriaMetrics/fastcache#LoadFromFile).
|
||||
* Works on [Google AppEngine](https://cloud.google.com/appengine/docs/go/).
|
||||
|
||||
|
||||
### Benchmarks
|
||||
|
||||
`Fastcache` performance is compared with [BigCache](https://github.com/allegro/bigcache), standard Go map
|
||||
and [sync.Map](https://golang.org/pkg/sync/#Map).
|
||||
|
||||
```
|
||||
GOMAXPROCS=4 go test github.com/VictoriaMetrics/fastcache -bench='Set|Get' -benchtime=10s
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
pkg: github.com/VictoriaMetrics/fastcache
|
||||
BenchmarkBigCacheSet-4 2000 10566656 ns/op 6.20 MB/s 4660369 B/op 6 allocs/op
|
||||
BenchmarkBigCacheGet-4 2000 6902694 ns/op 9.49 MB/s 684169 B/op 131076 allocs/op
|
||||
BenchmarkBigCacheSetGet-4 1000 17579118 ns/op 7.46 MB/s 5046744 B/op 131083 allocs/op
|
||||
BenchmarkCacheSet-4 5000 3808874 ns/op 17.21 MB/s 1142 B/op 2 allocs/op
|
||||
BenchmarkCacheGet-4 5000 3293849 ns/op 19.90 MB/s 1140 B/op 2 allocs/op
|
||||
BenchmarkCacheSetGet-4 2000 8456061 ns/op 15.50 MB/s 2857 B/op 5 allocs/op
|
||||
BenchmarkStdMapSet-4 2000 10559382 ns/op 6.21 MB/s 268413 B/op 65537 allocs/op
|
||||
BenchmarkStdMapGet-4 5000 2687404 ns/op 24.39 MB/s 2558 B/op 13 allocs/op
|
||||
BenchmarkStdMapSetGet-4 100 154641257 ns/op 0.85 MB/s 387405 B/op 65558 allocs/op
|
||||
BenchmarkSyncMapSet-4 500 24703219 ns/op 2.65 MB/s 3426543 B/op 262411 allocs/op
|
||||
BenchmarkSyncMapGet-4 5000 2265892 ns/op 28.92 MB/s 2545 B/op 79 allocs/op
|
||||
BenchmarkSyncMapSetGet-4 1000 14595535 ns/op 8.98 MB/s 3417190 B/op 262277 allocs/op
|
||||
```
|
||||
|
||||
`MB/s` column here actually means `millions of operations per second`.
|
||||
As you can see, `fastcache` is faster than the `BigCache` in all the cases.
|
||||
`fastcache` is faster than the standard Go map and `sync.Map` on workloads
|
||||
with inserts.
|
||||
|
||||
|
||||
### Limitations
|
||||
|
||||
* Keys and values must be byte slices. Other types must be marshaled before
|
||||
storing them in the cache.
|
||||
* Big entries with sizes exceeding 64KB must be stored via [distinct API](http://godoc.org/github.com/VictoriaMetrics/fastcache#Cache.SetBig).
|
||||
* There is no cache expiration. Entries are evicted from the cache only
|
||||
on cache size overflow. Entry deadline may be stored inside the value in order
|
||||
to implement cache expiration.
|
||||
|
||||
|
||||
### Architecture details
|
||||
|
||||
The cache uses ideas from [BigCache](https://github.com/allegro/bigcache):
|
||||
|
||||
* The cache consists of many buckets, each with its own lock.
|
||||
This helps scaling the performance on multi-core CPUs, since multiple
|
||||
CPUs may concurrently access distinct buckets.
|
||||
* Each bucket consists of a `hash(key) -> (key, value) position` map
|
||||
and 64KB-sized byte slices (chunks) holding encoded `(key, value)` entries.
|
||||
Each bucket contains only `O(chunksCount)` pointers. For instance, 64GB cache
|
||||
would contain ~1M pointers, while similarly-sized `map[string][]byte`
|
||||
would contain ~1B pointers for short keys and values. This would lead to
|
||||
[huge GC overhead](https://syslog.ravelin.com/further-dangers-of-large-heaps-in-go-7a267b57d487).
|
||||
|
||||
64KB-sized chunks reduce memory fragmentation and the total memory usage comparing
|
||||
to a single big chunk per bucket.
|
||||
Chunks are allocated off-heap if possible. This reduces total memory usage because
|
||||
GC collects unused memory more frequently without the need in `GOGC` tweaking.
|
||||
|
||||
|
||||
### Users
|
||||
|
||||
* `Fastcache` has been extracted from [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) sources.
|
||||
See [this article](https://medium.com/devopslinks/victoriametrics-creating-the-best-remote-storage-for-prometheus-5d92d66787ac)
|
||||
for more info about `VictoriaMetrics`.
|
||||
|
||||
|
||||
### FAQ
|
||||
|
||||
#### What is the difference between `fastcache` and other similar caches like [BigCache](https://github.com/allegro/bigcache) or [FreeCache](https://github.com/coocood/freecache)?
|
||||
|
||||
* `Fastcache` is faster. See benchmark results above.
|
||||
* `Fastcache` uses less memory due to lower heap fragmentation. This allows
|
||||
saving many GBs of memory on multi-GB caches.
|
||||
* `Fastcache` API [is simpler](http://godoc.org/github.com/VictoriaMetrics/fastcache).
|
||||
The API is designed to be used in zero-allocation mode.
|
||||
|
||||
|
||||
#### Why `fastcache` doesn't support cache expiration?
|
||||
|
||||
Because we don't need cache expiration in [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
Cached entries inside `VictoriaMetrics` never expire. They are automatically evicted on cache size overflow.
|
||||
|
||||
It is easy to implement cache expiration on top of `fastcache` by caching values
|
||||
with marshaled deadlines and verifying deadlines after reading these values
|
||||
from the cache.
|
||||
|
||||
|
||||
#### Why `fastcache` doesn't support advanced features such as [thundering herd protection](https://en.wikipedia.org/wiki/Thundering_herd_problem) or callbacks on entries' eviction?
|
||||
|
||||
Because these features would complicate the code and would make it slower.
|
||||
`Fastcache` source code is simple - just copy-paste it and implement the feature you want
|
||||
on top of it.
|
160
vendor/github.com/VictoriaMetrics/fastcache/bigcache.go
generated
vendored
Normal file
160
vendor/github.com/VictoriaMetrics/fastcache/bigcache.go
generated
vendored
Normal file
@ -0,0 +1,160 @@
|
||||
package fastcache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
xxhash "github.com/cespare/xxhash/v2"
|
||||
)
|
||||
|
||||
// maxSubvalueLen is the maximum size of subvalue chunk.
|
||||
//
|
||||
// - 16 bytes are for subkey encoding
|
||||
// - 4 bytes are for len(key)+len(value) encoding inside fastcache
|
||||
// - 1 byte is implementation detail of fastcache
|
||||
const maxSubvalueLen = chunkSize - 16 - 4 - 1
|
||||
|
||||
// maxKeyLen is the maximum size of key.
|
||||
//
|
||||
// - 16 bytes are for (hash + valueLen)
|
||||
// - 4 bytes are for len(key)+len(subkey)
|
||||
// - 1 byte is implementation detail of fastcache
|
||||
const maxKeyLen = chunkSize - 16 - 4 - 1
|
||||
|
||||
// SetBig sets (k, v) to c where len(v) may exceed 64KB.
|
||||
//
|
||||
// GetBig must be used for reading stored values.
|
||||
//
|
||||
// The stored entry may be evicted at any time either due to cache
|
||||
// overflow or due to unlikely hash collision.
|
||||
// Pass higher maxBytes value to New if the added items disappear
|
||||
// frequently.
|
||||
//
|
||||
// It is safe to store entries smaller than 64KB with SetBig.
|
||||
//
|
||||
// k and v contents may be modified after returning from SetBig.
|
||||
func (c *Cache) SetBig(k, v []byte) {
|
||||
atomic.AddUint64(&c.bigStats.SetBigCalls, 1)
|
||||
if len(k) > maxKeyLen {
|
||||
atomic.AddUint64(&c.bigStats.TooBigKeyErrors, 1)
|
||||
return
|
||||
}
|
||||
valueLen := len(v)
|
||||
valueHash := xxhash.Sum64(v)
|
||||
|
||||
// Split v into chunks with up to 64Kb each.
|
||||
subkey := getSubkeyBuf()
|
||||
var i uint64
|
||||
for len(v) > 0 {
|
||||
subkey.B = marshalUint64(subkey.B[:0], valueHash)
|
||||
subkey.B = marshalUint64(subkey.B, uint64(i))
|
||||
i++
|
||||
subvalueLen := maxSubvalueLen
|
||||
if len(v) < subvalueLen {
|
||||
subvalueLen = len(v)
|
||||
}
|
||||
subvalue := v[:subvalueLen]
|
||||
v = v[subvalueLen:]
|
||||
c.Set(subkey.B, subvalue)
|
||||
}
|
||||
|
||||
// Write metavalue, which consists of valueHash and valueLen.
|
||||
subkey.B = marshalUint64(subkey.B[:0], valueHash)
|
||||
subkey.B = marshalUint64(subkey.B, uint64(valueLen))
|
||||
c.Set(k, subkey.B)
|
||||
putSubkeyBuf(subkey)
|
||||
}
|
||||
|
||||
// GetBig searches for the value for the given k, appends it to dst
|
||||
// and returns the result.
|
||||
//
|
||||
// GetBig returns only values stored via SetBig. It doesn't work
|
||||
// with values stored via other methods.
|
||||
//
|
||||
// k contents may be modified after returning from GetBig.
|
||||
func (c *Cache) GetBig(dst, k []byte) (r []byte) {
|
||||
atomic.AddUint64(&c.bigStats.GetBigCalls, 1)
|
||||
subkey := getSubkeyBuf()
|
||||
dstWasNil := dst == nil
|
||||
defer func() {
|
||||
putSubkeyBuf(subkey)
|
||||
if len(r) == 0 && dstWasNil {
|
||||
// Guarantee that if the caller provided nil and this is a cache miss that
|
||||
// the caller can accurately test for a cache miss with `if r == nil`.
|
||||
r = nil
|
||||
}
|
||||
}()
|
||||
|
||||
// Read and parse metavalue
|
||||
subkey.B = c.Get(subkey.B[:0], k)
|
||||
if len(subkey.B) == 0 {
|
||||
// Nothing found.
|
||||
return dst
|
||||
}
|
||||
if len(subkey.B) != 16 {
|
||||
atomic.AddUint64(&c.bigStats.InvalidMetavalueErrors, 1)
|
||||
return dst
|
||||
}
|
||||
valueHash := unmarshalUint64(subkey.B)
|
||||
valueLen := unmarshalUint64(subkey.B[8:])
|
||||
|
||||
// Collect result from chunks.
|
||||
dstLen := len(dst)
|
||||
if n := dstLen + int(valueLen) - cap(dst); n > 0 {
|
||||
dst = append(dst[:cap(dst)], make([]byte, n)...)
|
||||
}
|
||||
dst = dst[:dstLen]
|
||||
var i uint64
|
||||
for uint64(len(dst)-dstLen) < valueLen {
|
||||
subkey.B = marshalUint64(subkey.B[:0], valueHash)
|
||||
subkey.B = marshalUint64(subkey.B, uint64(i))
|
||||
i++
|
||||
dstNew := c.Get(dst, subkey.B)
|
||||
if len(dstNew) == len(dst) {
|
||||
// Cannot find subvalue
|
||||
return dst[:dstLen]
|
||||
}
|
||||
dst = dstNew
|
||||
}
|
||||
|
||||
// Verify the obtained value.
|
||||
v := dst[dstLen:]
|
||||
if uint64(len(v)) != valueLen {
|
||||
atomic.AddUint64(&c.bigStats.InvalidValueLenErrors, 1)
|
||||
return dst[:dstLen]
|
||||
}
|
||||
h := xxhash.Sum64(v)
|
||||
if h != valueHash {
|
||||
atomic.AddUint64(&c.bigStats.InvalidValueHashErrors, 1)
|
||||
return dst[:dstLen]
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func getSubkeyBuf() *bytesBuf {
|
||||
v := subkeyPool.Get()
|
||||
if v == nil {
|
||||
return &bytesBuf{}
|
||||
}
|
||||
return v.(*bytesBuf)
|
||||
}
|
||||
|
||||
func putSubkeyBuf(bb *bytesBuf) {
|
||||
bb.B = bb.B[:0]
|
||||
subkeyPool.Put(bb)
|
||||
}
|
||||
|
||||
var subkeyPool sync.Pool
|
||||
|
||||
type bytesBuf struct {
|
||||
B []byte
|
||||
}
|
||||
|
||||
func marshalUint64(dst []byte, u uint64) []byte {
|
||||
return append(dst, byte(u>>56), byte(u>>48), byte(u>>40), byte(u>>32), byte(u>>24), byte(u>>16), byte(u>>8), byte(u))
|
||||
}
|
||||
|
||||
func unmarshalUint64(src []byte) uint64 {
|
||||
_ = src[7]
|
||||
return uint64(src[0])<<56 | uint64(src[1])<<48 | uint64(src[2])<<40 | uint64(src[3])<<32 | uint64(src[4])<<24 | uint64(src[5])<<16 | uint64(src[6])<<8 | uint64(src[7])
|
||||
}
|
415
vendor/github.com/VictoriaMetrics/fastcache/fastcache.go
generated
vendored
Normal file
415
vendor/github.com/VictoriaMetrics/fastcache/fastcache.go
generated
vendored
Normal file
@ -0,0 +1,415 @@
|
||||
// Package fastcache implements fast in-memory cache.
|
||||
//
|
||||
// The package has been extracted from https://victoriametrics.com/
|
||||
package fastcache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
xxhash "github.com/cespare/xxhash/v2"
|
||||
)
|
||||
|
||||
const bucketsCount = 512
|
||||
|
||||
const chunkSize = 64 * 1024
|
||||
|
||||
const bucketSizeBits = 40
|
||||
|
||||
const genSizeBits = 64 - bucketSizeBits
|
||||
|
||||
const maxGen = 1<<genSizeBits - 1
|
||||
|
||||
const maxBucketSize uint64 = 1 << bucketSizeBits
|
||||
|
||||
// Stats represents cache stats.
|
||||
//
|
||||
// Use Cache.UpdateStats for obtaining fresh stats from the cache.
|
||||
type Stats struct {
|
||||
// GetCalls is the number of Get calls.
|
||||
GetCalls uint64
|
||||
|
||||
// SetCalls is the number of Set calls.
|
||||
SetCalls uint64
|
||||
|
||||
// Misses is the number of cache misses.
|
||||
Misses uint64
|
||||
|
||||
// Collisions is the number of cache collisions.
|
||||
//
|
||||
// Usually the number of collisions must be close to zero.
|
||||
// High number of collisions suggest something wrong with cache.
|
||||
Collisions uint64
|
||||
|
||||
// Corruptions is the number of detected corruptions of the cache.
|
||||
//
|
||||
// Corruptions may occur when corrupted cache is loaded from file.
|
||||
Corruptions uint64
|
||||
|
||||
// EntriesCount is the current number of entries in the cache.
|
||||
EntriesCount uint64
|
||||
|
||||
// BytesSize is the current size of the cache in bytes.
|
||||
BytesSize uint64
|
||||
|
||||
// BigStats contains stats for GetBig/SetBig methods.
|
||||
BigStats
|
||||
}
|
||||
|
||||
// Reset resets s, so it may be re-used again in Cache.UpdateStats.
|
||||
func (s *Stats) Reset() {
|
||||
*s = Stats{}
|
||||
}
|
||||
|
||||
// BigStats contains stats for GetBig/SetBig methods.
|
||||
type BigStats struct {
|
||||
// GetBigCalls is the number of GetBig calls.
|
||||
GetBigCalls uint64
|
||||
|
||||
// SetBigCalls is the number of SetBig calls.
|
||||
SetBigCalls uint64
|
||||
|
||||
// TooBigKeyErrors is the number of calls to SetBig with too big key.
|
||||
TooBigKeyErrors uint64
|
||||
|
||||
// InvalidMetavalueErrors is the number of calls to GetBig resulting
|
||||
// to invalid metavalue.
|
||||
InvalidMetavalueErrors uint64
|
||||
|
||||
// InvalidValueLenErrors is the number of calls to GetBig resulting
|
||||
// to a chunk with invalid length.
|
||||
InvalidValueLenErrors uint64
|
||||
|
||||
// InvalidValueHashErrors is the number of calls to GetBig resulting
|
||||
// to a chunk with invalid hash value.
|
||||
InvalidValueHashErrors uint64
|
||||
}
|
||||
|
||||
func (bs *BigStats) reset() {
|
||||
atomic.StoreUint64(&bs.GetBigCalls, 0)
|
||||
atomic.StoreUint64(&bs.SetBigCalls, 0)
|
||||
atomic.StoreUint64(&bs.TooBigKeyErrors, 0)
|
||||
atomic.StoreUint64(&bs.InvalidMetavalueErrors, 0)
|
||||
atomic.StoreUint64(&bs.InvalidValueLenErrors, 0)
|
||||
atomic.StoreUint64(&bs.InvalidValueHashErrors, 0)
|
||||
}
|
||||
|
||||
// Cache is a fast thread-safe inmemory cache optimized for big number
|
||||
// of entries.
|
||||
//
|
||||
// It has much lower impact on GC comparing to a simple `map[string][]byte`.
|
||||
//
|
||||
// Use New or LoadFromFile* for creating new cache instance.
|
||||
// Concurrent goroutines may call any Cache methods on the same cache instance.
|
||||
//
|
||||
// Call Reset when the cache is no longer needed. This reclaims the allocated
|
||||
// memory.
|
||||
type Cache struct {
|
||||
buckets [bucketsCount]bucket
|
||||
|
||||
bigStats BigStats
|
||||
}
|
||||
|
||||
// New returns new cache with the given maxBytes capacity in bytes.
|
||||
//
|
||||
// maxBytes must be smaller than the available RAM size for the app,
|
||||
// since the cache holds data in memory.
|
||||
//
|
||||
// If maxBytes is less than 32MB, then the minimum cache capacity is 32MB.
|
||||
func New(maxBytes int) *Cache {
|
||||
if maxBytes <= 0 {
|
||||
panic(fmt.Errorf("maxBytes must be greater than 0; got %d", maxBytes))
|
||||
}
|
||||
var c Cache
|
||||
maxBucketBytes := uint64((maxBytes + bucketsCount - 1) / bucketsCount)
|
||||
for i := range c.buckets[:] {
|
||||
c.buckets[i].Init(maxBucketBytes)
|
||||
}
|
||||
return &c
|
||||
}
|
||||
|
||||
// Set stores (k, v) in the cache.
|
||||
//
|
||||
// Get must be used for reading the stored entry.
|
||||
//
|
||||
// The stored entry may be evicted at any time either due to cache
|
||||
// overflow or due to unlikely hash collision.
|
||||
// Pass higher maxBytes value to New if the added items disappear
|
||||
// frequently.
|
||||
//
|
||||
// (k, v) entries with summary size exceeding 64KB aren't stored in the cache.
|
||||
// SetBig can be used for storing entries exceeding 64KB.
|
||||
//
|
||||
// k and v contents may be modified after returning from Set.
|
||||
func (c *Cache) Set(k, v []byte) {
|
||||
h := xxhash.Sum64(k)
|
||||
idx := h % bucketsCount
|
||||
c.buckets[idx].Set(k, v, h)
|
||||
}
|
||||
|
||||
// Get appends value by the key k to dst and returns the result.
|
||||
//
|
||||
// Get allocates new byte slice for the returned value if dst is nil.
|
||||
//
|
||||
// Get returns only values stored in c via Set.
|
||||
//
|
||||
// k contents may be modified after returning from Get.
|
||||
func (c *Cache) Get(dst, k []byte) []byte {
|
||||
h := xxhash.Sum64(k)
|
||||
idx := h % bucketsCount
|
||||
dst, _ = c.buckets[idx].Get(dst, k, h, true)
|
||||
return dst
|
||||
}
|
||||
|
||||
// HasGet works identically to Get, but also returns whether the given key
|
||||
// exists in the cache. This method makes it possible to differentiate between a
|
||||
// stored nil/empty value versus and non-existing value.
|
||||
func (c *Cache) HasGet(dst, k []byte) ([]byte, bool) {
|
||||
h := xxhash.Sum64(k)
|
||||
idx := h % bucketsCount
|
||||
return c.buckets[idx].Get(dst, k, h, true)
|
||||
}
|
||||
|
||||
// Has returns true if entry for the given key k exists in the cache.
|
||||
func (c *Cache) Has(k []byte) bool {
|
||||
h := xxhash.Sum64(k)
|
||||
idx := h % bucketsCount
|
||||
_, ok := c.buckets[idx].Get(nil, k, h, false)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Del deletes value for the given k from the cache.
|
||||
//
|
||||
// k contents may be modified after returning from Del.
|
||||
func (c *Cache) Del(k []byte) {
|
||||
h := xxhash.Sum64(k)
|
||||
idx := h % bucketsCount
|
||||
c.buckets[idx].Del(h)
|
||||
}
|
||||
|
||||
// Reset removes all the items from the cache.
|
||||
func (c *Cache) Reset() {
|
||||
for i := range c.buckets[:] {
|
||||
c.buckets[i].Reset()
|
||||
}
|
||||
c.bigStats.reset()
|
||||
}
|
||||
|
||||
// UpdateStats adds cache stats to s.
|
||||
//
|
||||
// Call s.Reset before calling UpdateStats if s is re-used.
|
||||
func (c *Cache) UpdateStats(s *Stats) {
|
||||
for i := range c.buckets[:] {
|
||||
c.buckets[i].UpdateStats(s)
|
||||
}
|
||||
s.GetBigCalls += atomic.LoadUint64(&c.bigStats.GetBigCalls)
|
||||
s.SetBigCalls += atomic.LoadUint64(&c.bigStats.SetBigCalls)
|
||||
s.TooBigKeyErrors += atomic.LoadUint64(&c.bigStats.TooBigKeyErrors)
|
||||
s.InvalidMetavalueErrors += atomic.LoadUint64(&c.bigStats.InvalidMetavalueErrors)
|
||||
s.InvalidValueLenErrors += atomic.LoadUint64(&c.bigStats.InvalidValueLenErrors)
|
||||
s.InvalidValueHashErrors += atomic.LoadUint64(&c.bigStats.InvalidValueHashErrors)
|
||||
}
|
||||
|
||||
type bucket struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
// chunks is a ring buffer with encoded (k, v) pairs.
|
||||
// It consists of 64KB chunks.
|
||||
chunks [][]byte
|
||||
|
||||
// m maps hash(k) to idx of (k, v) pair in chunks.
|
||||
m map[uint64]uint64
|
||||
|
||||
// idx points to chunks for writing the next (k, v) pair.
|
||||
idx uint64
|
||||
|
||||
// gen is the generation of chunks.
|
||||
gen uint64
|
||||
|
||||
getCalls uint64
|
||||
setCalls uint64
|
||||
misses uint64
|
||||
collisions uint64
|
||||
corruptions uint64
|
||||
}
|
||||
|
||||
func (b *bucket) Init(maxBytes uint64) {
|
||||
if maxBytes == 0 {
|
||||
panic(fmt.Errorf("maxBytes cannot be zero"))
|
||||
}
|
||||
if maxBytes >= maxBucketSize {
|
||||
panic(fmt.Errorf("too big maxBytes=%d; should be smaller than %d", maxBytes, maxBucketSize))
|
||||
}
|
||||
maxChunks := (maxBytes + chunkSize - 1) / chunkSize
|
||||
b.chunks = make([][]byte, maxChunks)
|
||||
b.m = make(map[uint64]uint64)
|
||||
b.Reset()
|
||||
}
|
||||
|
||||
func (b *bucket) Reset() {
|
||||
b.mu.Lock()
|
||||
chunks := b.chunks
|
||||
for i := range chunks {
|
||||
putChunk(chunks[i])
|
||||
chunks[i] = nil
|
||||
}
|
||||
bm := b.m
|
||||
for k := range bm {
|
||||
delete(bm, k)
|
||||
}
|
||||
b.idx = 0
|
||||
b.gen = 1
|
||||
atomic.StoreUint64(&b.getCalls, 0)
|
||||
atomic.StoreUint64(&b.setCalls, 0)
|
||||
atomic.StoreUint64(&b.misses, 0)
|
||||
atomic.StoreUint64(&b.collisions, 0)
|
||||
atomic.StoreUint64(&b.corruptions, 0)
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
func (b *bucket) Clean() {
|
||||
b.mu.Lock()
|
||||
bGen := b.gen & ((1 << genSizeBits) - 1)
|
||||
bIdx := b.idx
|
||||
bm := b.m
|
||||
for k, v := range bm {
|
||||
gen := v >> bucketSizeBits
|
||||
idx := v & ((1 << bucketSizeBits) - 1)
|
||||
if gen == bGen && idx < bIdx || gen+1 == bGen && idx >= bIdx || gen == maxGen && bGen == 1 && idx >= bIdx {
|
||||
continue
|
||||
}
|
||||
delete(bm, k)
|
||||
}
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
func (b *bucket) UpdateStats(s *Stats) {
|
||||
s.GetCalls += atomic.LoadUint64(&b.getCalls)
|
||||
s.SetCalls += atomic.LoadUint64(&b.setCalls)
|
||||
s.Misses += atomic.LoadUint64(&b.misses)
|
||||
s.Collisions += atomic.LoadUint64(&b.collisions)
|
||||
s.Corruptions += atomic.LoadUint64(&b.corruptions)
|
||||
|
||||
b.mu.RLock()
|
||||
s.EntriesCount += uint64(len(b.m))
|
||||
for _, chunk := range b.chunks {
|
||||
s.BytesSize += uint64(cap(chunk))
|
||||
}
|
||||
b.mu.RUnlock()
|
||||
}
|
||||
|
||||
func (b *bucket) Set(k, v []byte, h uint64) {
|
||||
setCalls := atomic.AddUint64(&b.setCalls, 1)
|
||||
if setCalls%(1<<14) == 0 {
|
||||
b.Clean()
|
||||
}
|
||||
|
||||
if len(k) >= (1<<16) || len(v) >= (1<<16) {
|
||||
// Too big key or value - its length cannot be encoded
|
||||
// with 2 bytes (see below). Skip the entry.
|
||||
return
|
||||
}
|
||||
var kvLenBuf [4]byte
|
||||
kvLenBuf[0] = byte(uint16(len(k)) >> 8)
|
||||
kvLenBuf[1] = byte(len(k))
|
||||
kvLenBuf[2] = byte(uint16(len(v)) >> 8)
|
||||
kvLenBuf[3] = byte(len(v))
|
||||
kvLen := uint64(len(kvLenBuf) + len(k) + len(v))
|
||||
if kvLen >= chunkSize {
|
||||
// Do not store too big keys and values, since they do not
|
||||
// fit a chunk.
|
||||
return
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
idx := b.idx
|
||||
idxNew := idx + kvLen
|
||||
chunkIdx := idx / chunkSize
|
||||
chunkIdxNew := idxNew / chunkSize
|
||||
if chunkIdxNew > chunkIdx {
|
||||
if chunkIdxNew >= uint64(len(b.chunks)) {
|
||||
idx = 0
|
||||
idxNew = kvLen
|
||||
chunkIdx = 0
|
||||
b.gen++
|
||||
if b.gen&((1<<genSizeBits)-1) == 0 {
|
||||
b.gen++
|
||||
}
|
||||
} else {
|
||||
idx = chunkIdxNew * chunkSize
|
||||
idxNew = idx + kvLen
|
||||
chunkIdx = chunkIdxNew
|
||||
}
|
||||
b.chunks[chunkIdx] = b.chunks[chunkIdx][:0]
|
||||
}
|
||||
chunk := b.chunks[chunkIdx]
|
||||
if chunk == nil {
|
||||
chunk = getChunk()
|
||||
chunk = chunk[:0]
|
||||
}
|
||||
chunk = append(chunk, kvLenBuf[:]...)
|
||||
chunk = append(chunk, k...)
|
||||
chunk = append(chunk, v...)
|
||||
b.chunks[chunkIdx] = chunk
|
||||
b.m[h] = idx | (b.gen << bucketSizeBits)
|
||||
b.idx = idxNew
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
func (b *bucket) Get(dst, k []byte, h uint64, returnDst bool) ([]byte, bool) {
|
||||
atomic.AddUint64(&b.getCalls, 1)
|
||||
found := false
|
||||
b.mu.RLock()
|
||||
v := b.m[h]
|
||||
bGen := b.gen & ((1 << genSizeBits) - 1)
|
||||
if v > 0 {
|
||||
gen := v >> bucketSizeBits
|
||||
idx := v & ((1 << bucketSizeBits) - 1)
|
||||
if gen == bGen && idx < b.idx || gen+1 == bGen && idx >= b.idx || gen == maxGen && bGen == 1 && idx >= b.idx {
|
||||
chunkIdx := idx / chunkSize
|
||||
if chunkIdx >= uint64(len(b.chunks)) {
|
||||
// Corrupted data during the load from file. Just skip it.
|
||||
atomic.AddUint64(&b.corruptions, 1)
|
||||
goto end
|
||||
}
|
||||
chunk := b.chunks[chunkIdx]
|
||||
idx %= chunkSize
|
||||
if idx+4 >= chunkSize {
|
||||
// Corrupted data during the load from file. Just skip it.
|
||||
atomic.AddUint64(&b.corruptions, 1)
|
||||
goto end
|
||||
}
|
||||
kvLenBuf := chunk[idx : idx+4]
|
||||
keyLen := (uint64(kvLenBuf[0]) << 8) | uint64(kvLenBuf[1])
|
||||
valLen := (uint64(kvLenBuf[2]) << 8) | uint64(kvLenBuf[3])
|
||||
idx += 4
|
||||
if idx+keyLen+valLen >= chunkSize {
|
||||
// Corrupted data during the load from file. Just skip it.
|
||||
atomic.AddUint64(&b.corruptions, 1)
|
||||
goto end
|
||||
}
|
||||
if string(k) == string(chunk[idx:idx+keyLen]) {
|
||||
idx += keyLen
|
||||
if returnDst {
|
||||
dst = append(dst, chunk[idx:idx+valLen]...)
|
||||
}
|
||||
found = true
|
||||
} else {
|
||||
atomic.AddUint64(&b.collisions, 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
end:
|
||||
b.mu.RUnlock()
|
||||
if !found {
|
||||
atomic.AddUint64(&b.misses, 1)
|
||||
}
|
||||
return dst, found
|
||||
}
|
||||
|
||||
func (b *bucket) Del(h uint64) {
|
||||
b.mu.Lock()
|
||||
delete(b.m, h)
|
||||
b.mu.Unlock()
|
||||
}
|
419
vendor/github.com/VictoriaMetrics/fastcache/file.go
generated
vendored
Normal file
419
vendor/github.com/VictoriaMetrics/fastcache/file.go
generated
vendored
Normal file
@ -0,0 +1,419 @@
|
||||
package fastcache
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
|
||||
"github.com/golang/snappy"
|
||||
)
|
||||
|
||||
// SaveToFile atomically saves cache data to the given filePath using a single
|
||||
// CPU core.
|
||||
//
|
||||
// SaveToFile may be called concurrently with other operations on the cache.
|
||||
//
|
||||
// The saved data may be loaded with LoadFromFile*.
|
||||
//
|
||||
// See also SaveToFileConcurrent for faster saving to file.
|
||||
func (c *Cache) SaveToFile(filePath string) error {
|
||||
return c.SaveToFileConcurrent(filePath, 1)
|
||||
}
|
||||
|
||||
// SaveToFileConcurrent saves cache data to the given filePath using concurrency
|
||||
// CPU cores.
|
||||
//
|
||||
// SaveToFileConcurrent may be called concurrently with other operations
|
||||
// on the cache.
|
||||
//
|
||||
// The saved data may be loaded with LoadFromFile*.
|
||||
//
|
||||
// See also SaveToFile.
|
||||
func (c *Cache) SaveToFileConcurrent(filePath string, concurrency int) error {
|
||||
// Create dir if it doesn't exist.
|
||||
dir := filepath.Dir(filePath)
|
||||
if _, err := os.Stat(dir); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return fmt.Errorf("cannot stat %q: %s", dir, err)
|
||||
}
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return fmt.Errorf("cannot create dir %q: %s", dir, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Save cache data into a temporary directory.
|
||||
tmpDir, err := ioutil.TempDir(dir, "fastcache.tmp.")
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create temporary dir inside %q: %s", dir, err)
|
||||
}
|
||||
defer func() {
|
||||
if tmpDir != "" {
|
||||
_ = os.RemoveAll(tmpDir)
|
||||
}
|
||||
}()
|
||||
gomaxprocs := runtime.GOMAXPROCS(-1)
|
||||
if concurrency <= 0 || concurrency > gomaxprocs {
|
||||
concurrency = gomaxprocs
|
||||
}
|
||||
if err := c.save(tmpDir, concurrency); err != nil {
|
||||
return fmt.Errorf("cannot save cache data to temporary dir %q: %s", tmpDir, err)
|
||||
}
|
||||
|
||||
// Remove old filePath contents, since os.Rename may return
|
||||
// error if filePath dir exists.
|
||||
if err := os.RemoveAll(filePath); err != nil {
|
||||
return fmt.Errorf("cannot remove old contents at %q: %s", filePath, err)
|
||||
}
|
||||
if err := os.Rename(tmpDir, filePath); err != nil {
|
||||
return fmt.Errorf("cannot move temporary dir %q to %q: %s", tmpDir, filePath, err)
|
||||
}
|
||||
tmpDir = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadFromFile loads cache data from the given filePath.
|
||||
//
|
||||
// See SaveToFile* for saving cache data to file.
|
||||
func LoadFromFile(filePath string) (*Cache, error) {
|
||||
return load(filePath, 0)
|
||||
}
|
||||
|
||||
// LoadFromFileOrNew tries loading cache data from the given filePath.
|
||||
//
|
||||
// The function falls back to creating new cache with the given maxBytes
|
||||
// capacity if error occurs during loading the cache from file.
|
||||
func LoadFromFileOrNew(filePath string, maxBytes int) *Cache {
|
||||
c, err := load(filePath, maxBytes)
|
||||
if err == nil {
|
||||
return c
|
||||
}
|
||||
return New(maxBytes)
|
||||
}
|
||||
|
||||
func (c *Cache) save(dir string, workersCount int) error {
|
||||
if err := saveMetadata(c, dir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save buckets by workersCount concurrent workers.
|
||||
workCh := make(chan int, workersCount)
|
||||
results := make(chan error)
|
||||
for i := 0; i < workersCount; i++ {
|
||||
go func(workerNum int) {
|
||||
results <- saveBuckets(c.buckets[:], workCh, dir, workerNum)
|
||||
}(i)
|
||||
}
|
||||
// Feed workers with work
|
||||
for i := range c.buckets[:] {
|
||||
workCh <- i
|
||||
}
|
||||
close(workCh)
|
||||
|
||||
// Read results.
|
||||
var err error
|
||||
for i := 0; i < workersCount; i++ {
|
||||
result := <-results
|
||||
if result != nil && err == nil {
|
||||
err = result
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func load(filePath string, maxBytes int) (*Cache, error) {
|
||||
maxBucketChunks, err := loadMetadata(filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if maxBytes > 0 {
|
||||
maxBucketBytes := uint64((maxBytes + bucketsCount - 1) / bucketsCount)
|
||||
expectedBucketChunks := (maxBucketBytes + chunkSize - 1) / chunkSize
|
||||
if maxBucketChunks != expectedBucketChunks {
|
||||
return nil, fmt.Errorf("cache file %s contains maxBytes=%d; want %d", filePath, maxBytes, expectedBucketChunks*chunkSize*bucketsCount)
|
||||
}
|
||||
}
|
||||
|
||||
// Read bucket files from filePath dir.
|
||||
d, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot open %q: %s", filePath, err)
|
||||
}
|
||||
defer func() {
|
||||
_ = d.Close()
|
||||
}()
|
||||
fis, err := d.Readdir(-1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read files from %q: %s", filePath, err)
|
||||
}
|
||||
results := make(chan error)
|
||||
workersCount := 0
|
||||
var c Cache
|
||||
for _, fi := range fis {
|
||||
fn := fi.Name()
|
||||
if fi.IsDir() || !dataFileRegexp.MatchString(fn) {
|
||||
continue
|
||||
}
|
||||
workersCount++
|
||||
go func(dataPath string) {
|
||||
results <- loadBuckets(c.buckets[:], dataPath, maxBucketChunks)
|
||||
}(filePath + "/" + fn)
|
||||
}
|
||||
err = nil
|
||||
for i := 0; i < workersCount; i++ {
|
||||
result := <-results
|
||||
if result != nil && err == nil {
|
||||
err = result
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Initialize buckets, which could be missing due to incomplete or corrupted files in the cache.
|
||||
// It is better initializing such buckets instead of returning error, since the rest of buckets
|
||||
// contain valid data.
|
||||
for i := range c.buckets[:] {
|
||||
b := &c.buckets[i]
|
||||
if len(b.chunks) == 0 {
|
||||
b.chunks = make([][]byte, maxBucketChunks)
|
||||
b.m = make(map[uint64]uint64)
|
||||
}
|
||||
}
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
func saveMetadata(c *Cache, dir string) error {
|
||||
metadataPath := dir + "/metadata.bin"
|
||||
metadataFile, err := os.Create(metadataPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create %q: %s", metadataPath, err)
|
||||
}
|
||||
defer func() {
|
||||
_ = metadataFile.Close()
|
||||
}()
|
||||
maxBucketChunks := uint64(cap(c.buckets[0].chunks))
|
||||
if err := writeUint64(metadataFile, maxBucketChunks); err != nil {
|
||||
return fmt.Errorf("cannot write maxBucketChunks=%d to %q: %s", maxBucketChunks, metadataPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadMetadata(dir string) (uint64, error) {
|
||||
metadataPath := dir + "/metadata.bin"
|
||||
metadataFile, err := os.Open(metadataPath)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot open %q: %s", metadataPath, err)
|
||||
}
|
||||
defer func() {
|
||||
_ = metadataFile.Close()
|
||||
}()
|
||||
maxBucketChunks, err := readUint64(metadataFile)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot read maxBucketChunks from %q: %s", metadataPath, err)
|
||||
}
|
||||
if maxBucketChunks == 0 {
|
||||
return 0, fmt.Errorf("invalid maxBucketChunks=0 read from %q", metadataPath)
|
||||
}
|
||||
return maxBucketChunks, nil
|
||||
}
|
||||
|
||||
var dataFileRegexp = regexp.MustCompile(`^data\.\d+\.bin$`)
|
||||
|
||||
func saveBuckets(buckets []bucket, workCh <-chan int, dir string, workerNum int) error {
|
||||
dataPath := fmt.Sprintf("%s/data.%d.bin", dir, workerNum)
|
||||
dataFile, err := os.Create(dataPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create %q: %s", dataPath, err)
|
||||
}
|
||||
defer func() {
|
||||
_ = dataFile.Close()
|
||||
}()
|
||||
zw := snappy.NewBufferedWriter(dataFile)
|
||||
for bucketNum := range workCh {
|
||||
if err := writeUint64(zw, uint64(bucketNum)); err != nil {
|
||||
return fmt.Errorf("cannot write bucketNum=%d to %q: %s", bucketNum, dataPath, err)
|
||||
}
|
||||
if err := buckets[bucketNum].Save(zw); err != nil {
|
||||
return fmt.Errorf("cannot save bucket[%d] to %q: %s", bucketNum, dataPath, err)
|
||||
}
|
||||
}
|
||||
if err := zw.Close(); err != nil {
|
||||
return fmt.Errorf("cannot close snappy.Writer for %q: %s", dataPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadBuckets(buckets []bucket, dataPath string, maxChunks uint64) error {
|
||||
dataFile, err := os.Open(dataPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open %q: %s", dataPath, err)
|
||||
}
|
||||
defer func() {
|
||||
_ = dataFile.Close()
|
||||
}()
|
||||
zr := snappy.NewReader(dataFile)
|
||||
for {
|
||||
bucketNum, err := readUint64(zr)
|
||||
if err == io.EOF {
|
||||
// Reached the end of file.
|
||||
return nil
|
||||
}
|
||||
if bucketNum >= uint64(len(buckets)) {
|
||||
return fmt.Errorf("unexpected bucketNum read from %q: %d; must be smaller than %d", dataPath, bucketNum, len(buckets))
|
||||
}
|
||||
if err := buckets[bucketNum].Load(zr, maxChunks); err != nil {
|
||||
return fmt.Errorf("cannot load bucket[%d] from %q: %s", bucketNum, dataPath, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bucket) Save(w io.Writer) error {
|
||||
b.Clean()
|
||||
|
||||
b.mu.RLock()
|
||||
defer b.mu.RUnlock()
|
||||
|
||||
// Store b.idx, b.gen and b.m to w.
|
||||
|
||||
bIdx := b.idx
|
||||
bGen := b.gen
|
||||
chunksLen := 0
|
||||
for _, chunk := range b.chunks {
|
||||
if chunk == nil {
|
||||
break
|
||||
}
|
||||
chunksLen++
|
||||
}
|
||||
kvs := make([]byte, 0, 2*8*len(b.m))
|
||||
var u64Buf [8]byte
|
||||
for k, v := range b.m {
|
||||
binary.LittleEndian.PutUint64(u64Buf[:], k)
|
||||
kvs = append(kvs, u64Buf[:]...)
|
||||
binary.LittleEndian.PutUint64(u64Buf[:], v)
|
||||
kvs = append(kvs, u64Buf[:]...)
|
||||
}
|
||||
|
||||
if err := writeUint64(w, bIdx); err != nil {
|
||||
return fmt.Errorf("cannot write b.idx: %s", err)
|
||||
}
|
||||
if err := writeUint64(w, bGen); err != nil {
|
||||
return fmt.Errorf("cannot write b.gen: %s", err)
|
||||
}
|
||||
if err := writeUint64(w, uint64(len(kvs))/2/8); err != nil {
|
||||
return fmt.Errorf("cannot write len(b.m): %s", err)
|
||||
}
|
||||
if _, err := w.Write(kvs); err != nil {
|
||||
return fmt.Errorf("cannot write b.m: %s", err)
|
||||
}
|
||||
|
||||
// Store b.chunks to w.
|
||||
if err := writeUint64(w, uint64(chunksLen)); err != nil {
|
||||
return fmt.Errorf("cannot write len(b.chunks): %s", err)
|
||||
}
|
||||
for chunkIdx := 0; chunkIdx < chunksLen; chunkIdx++ {
|
||||
chunk := b.chunks[chunkIdx][:chunkSize]
|
||||
if _, err := w.Write(chunk); err != nil {
|
||||
return fmt.Errorf("cannot write b.chunks[%d]: %s", chunkIdx, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *bucket) Load(r io.Reader, maxChunks uint64) error {
|
||||
if maxChunks == 0 {
|
||||
return fmt.Errorf("the number of chunks per bucket cannot be zero")
|
||||
}
|
||||
bIdx, err := readUint64(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read b.idx: %s", err)
|
||||
}
|
||||
bGen, err := readUint64(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read b.gen: %s", err)
|
||||
}
|
||||
kvsLen, err := readUint64(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read len(b.m): %s", err)
|
||||
}
|
||||
kvsLen *= 2 * 8
|
||||
kvs := make([]byte, kvsLen)
|
||||
if _, err := io.ReadFull(r, kvs); err != nil {
|
||||
return fmt.Errorf("cannot read b.m: %s", err)
|
||||
}
|
||||
m := make(map[uint64]uint64, kvsLen/2/8)
|
||||
for len(kvs) > 0 {
|
||||
k := binary.LittleEndian.Uint64(kvs)
|
||||
kvs = kvs[8:]
|
||||
v := binary.LittleEndian.Uint64(kvs)
|
||||
kvs = kvs[8:]
|
||||
m[k] = v
|
||||
}
|
||||
|
||||
maxBytes := maxChunks * chunkSize
|
||||
if maxBytes >= maxBucketSize {
|
||||
return fmt.Errorf("too big maxBytes=%d; should be smaller than %d", maxBytes, maxBucketSize)
|
||||
}
|
||||
chunks := make([][]byte, maxChunks)
|
||||
chunksLen, err := readUint64(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read len(b.chunks): %s", err)
|
||||
}
|
||||
if chunksLen > uint64(maxChunks) {
|
||||
return fmt.Errorf("chunksLen=%d cannot exceed maxChunks=%d", chunksLen, maxChunks)
|
||||
}
|
||||
currChunkIdx := bIdx / chunkSize
|
||||
if currChunkIdx > 0 && currChunkIdx >= chunksLen {
|
||||
return fmt.Errorf("too big bIdx=%d; should be smaller than %d", bIdx, chunksLen*chunkSize)
|
||||
}
|
||||
for chunkIdx := uint64(0); chunkIdx < chunksLen; chunkIdx++ {
|
||||
chunk := getChunk()
|
||||
chunks[chunkIdx] = chunk
|
||||
if _, err := io.ReadFull(r, chunk); err != nil {
|
||||
// Free up allocated chunks before returning the error.
|
||||
for _, chunk := range chunks {
|
||||
if chunk != nil {
|
||||
putChunk(chunk)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("cannot read b.chunks[%d]: %s", chunkIdx, err)
|
||||
}
|
||||
}
|
||||
// Adjust len for the chunk pointed by currChunkIdx.
|
||||
if chunksLen > 0 {
|
||||
chunkLen := bIdx % chunkSize
|
||||
chunks[currChunkIdx] = chunks[currChunkIdx][:chunkLen]
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
for _, chunk := range b.chunks {
|
||||
putChunk(chunk)
|
||||
}
|
||||
b.chunks = chunks
|
||||
b.m = m
|
||||
b.idx = bIdx
|
||||
b.gen = bGen
|
||||
b.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeUint64(w io.Writer, u uint64) error {
|
||||
var u64Buf [8]byte
|
||||
binary.LittleEndian.PutUint64(u64Buf[:], u)
|
||||
_, err := w.Write(u64Buf[:])
|
||||
return err
|
||||
}
|
||||
|
||||
func readUint64(r io.Reader) (uint64, error) {
|
||||
var u64Buf [8]byte
|
||||
if _, err := io.ReadFull(r, u64Buf[:]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
u := binary.LittleEndian.Uint64(u64Buf[:])
|
||||
return u, nil
|
||||
}
|
12
vendor/github.com/VictoriaMetrics/fastcache/go.mod
generated
vendored
Normal file
12
vendor/github.com/VictoriaMetrics/fastcache/go.mod
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
module github.com/VictoriaMetrics/fastcache
|
||||
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156
|
||||
github.com/cespare/xxhash/v2 v2.1.1
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/golang/snappy v0.0.3
|
||||
github.com/stretchr/testify v1.3.0 // indirect
|
||||
golang.org/x/sys v0.0.0-20210324051608-47abb6519492
|
||||
)
|
16
vendor/github.com/VictoriaMetrics/fastcache/go.sum
generated
vendored
Normal file
16
vendor/github.com/VictoriaMetrics/fastcache/go.sum
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
golang.org/x/sys v0.0.0-20210324051608-47abb6519492 h1:Paq34FxTluEPvVyayQqMPgHm+vTOrIifmcYxFBx9TLg=
|
||||
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
11
vendor/github.com/VictoriaMetrics/fastcache/malloc_heap.go
generated
vendored
Normal file
11
vendor/github.com/VictoriaMetrics/fastcache/malloc_heap.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
// +build appengine windows
|
||||
|
||||
package fastcache
|
||||
|
||||
func getChunk() []byte {
|
||||
return make([]byte, chunkSize)
|
||||
}
|
||||
|
||||
func putChunk(chunk []byte) {
|
||||
// No-op.
|
||||
}
|
53
vendor/github.com/VictoriaMetrics/fastcache/malloc_mmap.go
generated
vendored
Normal file
53
vendor/github.com/VictoriaMetrics/fastcache/malloc_mmap.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
// +build !appengine,!windows
|
||||
|
||||
package fastcache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const chunksPerAlloc = 1024
|
||||
|
||||
var (
|
||||
freeChunks []*[chunkSize]byte
|
||||
freeChunksLock sync.Mutex
|
||||
)
|
||||
|
||||
func getChunk() []byte {
|
||||
freeChunksLock.Lock()
|
||||
if len(freeChunks) == 0 {
|
||||
// Allocate offheap memory, so GOGC won't take into account cache size.
|
||||
// This should reduce free memory waste.
|
||||
data, err := unix.Mmap(-1, 0, chunkSize*chunksPerAlloc, unix.PROT_READ|unix.PROT_WRITE, unix.MAP_ANON|unix.MAP_PRIVATE)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot allocate %d bytes via mmap: %s", chunkSize*chunksPerAlloc, err))
|
||||
}
|
||||
for len(data) > 0 {
|
||||
p := (*[chunkSize]byte)(unsafe.Pointer(&data[0]))
|
||||
freeChunks = append(freeChunks, p)
|
||||
data = data[chunkSize:]
|
||||
}
|
||||
}
|
||||
n := len(freeChunks) - 1
|
||||
p := freeChunks[n]
|
||||
freeChunks[n] = nil
|
||||
freeChunks = freeChunks[:n]
|
||||
freeChunksLock.Unlock()
|
||||
return p[:]
|
||||
}
|
||||
|
||||
func putChunk(chunk []byte) {
|
||||
if chunk == nil {
|
||||
return
|
||||
}
|
||||
chunk = chunk[:chunkSize]
|
||||
p := (*[chunkSize]byte)(unsafe.Pointer(&chunk[0]))
|
||||
|
||||
freeChunksLock.Lock()
|
||||
freeChunks = append(freeChunks, p)
|
||||
freeChunksLock.Unlock()
|
||||
}
|
10
vendor/github.com/allegro/bigcache/.gitignore
generated
vendored
10
vendor/github.com/allegro/bigcache/.gitignore
generated
vendored
@ -1,10 +0,0 @@
|
||||
.idea
|
||||
.DS_Store
|
||||
/server/server.exe
|
||||
/server/server
|
||||
/server/server_dar*
|
||||
/server/server_fre*
|
||||
/server/server_win*
|
||||
/server/server_net*
|
||||
/server/server_ope*
|
||||
CHANGELOG.md
|
31
vendor/github.com/allegro/bigcache/.travis.yml
generated
vendored
31
vendor/github.com/allegro/bigcache/.travis.yml
generated
vendored
@ -1,31 +0,0 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.x
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
fast_finish: true
|
||||
|
||||
before_install:
|
||||
- go get github.com/modocache/gover
|
||||
- go get github.com/mattn/goveralls
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get golang.org/x/tools/cmd/goimports
|
||||
- go get golang.org/x/lint/golint
|
||||
- go get github.com/stretchr/testify/assert
|
||||
- go get github.com/gordonklaus/ineffassign
|
||||
|
||||
script:
|
||||
- gofiles=$(find ./ -name '*.go') && [ -z "$gofiles" ] || unformatted=$(goimports -l $gofiles) && [ -z "$unformatted" ] || (echo >&2 "Go files must be formatted with gofmt. Following files has problem:\n $unformatted" && false)
|
||||
- diff <(echo -n) <(gofmt -s -d .)
|
||||
- golint ./... # This won't break the build, just show warnings
|
||||
- ineffassign .
|
||||
- go vet ./...
|
||||
- go test -race -count=1 -coverprofile=queue.coverprofile ./queue
|
||||
- go test -race -count=1 -coverprofile=server.coverprofile ./server
|
||||
- go test -race -count=1 -coverprofile=main.coverprofile
|
||||
- $HOME/gopath/bin/gover
|
||||
- $HOME/gopath/bin/goveralls -coverprofile=gover.coverprofile -service travis-ci
|
150
vendor/github.com/allegro/bigcache/README.md
generated
vendored
150
vendor/github.com/allegro/bigcache/README.md
generated
vendored
@ -1,150 +0,0 @@
|
||||
# BigCache [](https://travis-ci.org/allegro/bigcache) [](https://coveralls.io/github/allegro/bigcache?branch=master) [](https://godoc.org/github.com/allegro/bigcache) [](https://goreportcard.com/report/github.com/allegro/bigcache)
|
||||
|
||||
Fast, concurrent, evicting in-memory cache written to keep big number of entries without impact on performance.
|
||||
BigCache keeps entries on heap but omits GC for them. To achieve that operations on bytes arrays take place,
|
||||
therefore entries (de)serialization in front of the cache will be needed in most use cases.
|
||||
|
||||
## Usage
|
||||
|
||||
### Simple initialization
|
||||
|
||||
```go
|
||||
import "github.com/allegro/bigcache"
|
||||
|
||||
cache, _ := bigcache.NewBigCache(bigcache.DefaultConfig(10 * time.Minute))
|
||||
|
||||
cache.Set("my-unique-key", []byte("value"))
|
||||
|
||||
entry, _ := cache.Get("my-unique-key")
|
||||
fmt.Println(string(entry))
|
||||
```
|
||||
|
||||
### Custom initialization
|
||||
|
||||
When cache load can be predicted in advance then it is better to use custom initialization because additional memory
|
||||
allocation can be avoided in that way.
|
||||
|
||||
```go
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/allegro/bigcache"
|
||||
)
|
||||
|
||||
config := bigcache.Config {
|
||||
// number of shards (must be a power of 2)
|
||||
Shards: 1024,
|
||||
// time after which entry can be evicted
|
||||
LifeWindow: 10 * time.Minute,
|
||||
// rps * lifeWindow, used only in initial memory allocation
|
||||
MaxEntriesInWindow: 1000 * 10 * 60,
|
||||
// max entry size in bytes, used only in initial memory allocation
|
||||
MaxEntrySize: 500,
|
||||
// prints information about additional memory allocation
|
||||
Verbose: true,
|
||||
// cache will not allocate more memory than this limit, value in MB
|
||||
// if value is reached then the oldest entries can be overridden for the new ones
|
||||
// 0 value means no size limit
|
||||
HardMaxCacheSize: 8192,
|
||||
// callback fired when the oldest entry is removed because of its expiration time or no space left
|
||||
// for the new entry, or because delete was called. A bitmask representing the reason will be returned.
|
||||
// Default value is nil which means no callback and it prevents from unwrapping the oldest entry.
|
||||
OnRemove: nil,
|
||||
// OnRemoveWithReason is a callback fired when the oldest entry is removed because of its expiration time or no space left
|
||||
// for the new entry, or because delete was called. A constant representing the reason will be passed through.
|
||||
// Default value is nil which means no callback and it prevents from unwrapping the oldest entry.
|
||||
// Ignored if OnRemove is specified.
|
||||
OnRemoveWithReason: nil,
|
||||
}
|
||||
|
||||
cache, initErr := bigcache.NewBigCache(config)
|
||||
if initErr != nil {
|
||||
log.Fatal(initErr)
|
||||
}
|
||||
|
||||
cache.Set("my-unique-key", []byte("value"))
|
||||
|
||||
if entry, err := cache.Get("my-unique-key"); err == nil {
|
||||
fmt.Println(string(entry))
|
||||
}
|
||||
```
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Three caches were compared: bigcache, [freecache](https://github.com/coocood/freecache) and map.
|
||||
Benchmark tests were made using an i7-6700K with 32GB of RAM on Windows 10.
|
||||
|
||||
### Writes and reads
|
||||
|
||||
```bash
|
||||
cd caches_bench; go test -bench=. -benchtime=10s ./... -timeout 30m
|
||||
|
||||
BenchmarkMapSet-8 3000000 569 ns/op 202 B/op 3 allocs/op
|
||||
BenchmarkConcurrentMapSet-8 1000000 1592 ns/op 347 B/op 8 allocs/op
|
||||
BenchmarkFreeCacheSet-8 3000000 775 ns/op 355 B/op 2 allocs/op
|
||||
BenchmarkBigCacheSet-8 3000000 640 ns/op 303 B/op 2 allocs/op
|
||||
BenchmarkMapGet-8 5000000 407 ns/op 24 B/op 1 allocs/op
|
||||
BenchmarkConcurrentMapGet-8 3000000 558 ns/op 24 B/op 2 allocs/op
|
||||
BenchmarkFreeCacheGet-8 2000000 682 ns/op 136 B/op 2 allocs/op
|
||||
BenchmarkBigCacheGet-8 3000000 512 ns/op 152 B/op 4 allocs/op
|
||||
BenchmarkBigCacheSetParallel-8 10000000 225 ns/op 313 B/op 3 allocs/op
|
||||
BenchmarkFreeCacheSetParallel-8 10000000 218 ns/op 341 B/op 3 allocs/op
|
||||
BenchmarkConcurrentMapSetParallel-8 5000000 318 ns/op 200 B/op 6 allocs/op
|
||||
BenchmarkBigCacheGetParallel-8 20000000 178 ns/op 152 B/op 4 allocs/op
|
||||
BenchmarkFreeCacheGetParallel-8 20000000 295 ns/op 136 B/op 3 allocs/op
|
||||
BenchmarkConcurrentMapGetParallel-8 10000000 237 ns/op 24 B/op 2 allocs/op
|
||||
```
|
||||
|
||||
Writes and reads in bigcache are faster than in freecache.
|
||||
Writes to map are the slowest.
|
||||
|
||||
### GC pause time
|
||||
|
||||
```bash
|
||||
cd caches_bench; go run caches_gc_overhead_comparison.go
|
||||
|
||||
Number of entries: 20000000
|
||||
GC pause for bigcache: 5.8658ms
|
||||
GC pause for freecache: 32.4341ms
|
||||
GC pause for map: 52.9661ms
|
||||
```
|
||||
|
||||
Test shows how long are the GC pauses for caches filled with 20mln of entries.
|
||||
Bigcache and freecache have very similar GC pause time.
|
||||
It is clear that both reduce GC overhead in contrast to map
|
||||
which GC pause time took more than 10 seconds.
|
||||
|
||||
## How it works
|
||||
|
||||
BigCache relies on optimization presented in 1.5 version of Go ([issue-9477](https://github.com/golang/go/issues/9477)).
|
||||
This optimization states that if map without pointers in keys and values is used then GC will omit its content.
|
||||
Therefore BigCache uses `map[uint64]uint32` where keys are hashed and values are offsets of entries.
|
||||
|
||||
Entries are kept in bytes array, to omit GC again.
|
||||
Bytes array size can grow to gigabytes without impact on performance
|
||||
because GC will only see single pointer to it.
|
||||
|
||||
## Bigcache vs Freecache
|
||||
|
||||
Both caches provide the same core features but they reduce GC overhead in different ways.
|
||||
Bigcache relies on `map[uint64]uint32`, freecache implements its own mapping built on
|
||||
slices to reduce number of pointers.
|
||||
|
||||
Results from benchmark tests are presented above.
|
||||
One of the advantage of bigcache over freecache is that you don’t need to know
|
||||
the size of the cache in advance, because when bigcache is full,
|
||||
it can allocate additional memory for new entries instead of
|
||||
overwriting existing ones as freecache does currently.
|
||||
However hard max size in bigcache also can be set, check [HardMaxCacheSize](https://godoc.org/github.com/allegro/bigcache#Config).
|
||||
|
||||
## HTTP Server
|
||||
|
||||
This package also includes an easily deployable HTTP implementation of BigCache, which can be found in the [server](/server) package.
|
||||
|
||||
## More
|
||||
|
||||
Bigcache genesis is described in allegro.tech blog post: [writing a very fast cache service in Go](http://allegro.tech/2016/03/writing-fast-cache-service-in-go.html)
|
||||
|
||||
## License
|
||||
|
||||
BigCache is released under the Apache 2.0 license (see [LICENSE](LICENSE))
|
202
vendor/github.com/allegro/bigcache/bigcache.go
generated
vendored
202
vendor/github.com/allegro/bigcache/bigcache.go
generated
vendored
@ -1,202 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
minimumEntriesInShard = 10 // Minimum number of entries in single shard
|
||||
)
|
||||
|
||||
// BigCache is fast, concurrent, evicting cache created to keep big number of entries without impact on performance.
|
||||
// It keeps entries on heap but omits GC for them. To achieve that, operations take place on byte arrays,
|
||||
// therefore entries (de)serialization in front of the cache will be needed in most use cases.
|
||||
type BigCache struct {
|
||||
shards []*cacheShard
|
||||
lifeWindow uint64
|
||||
clock clock
|
||||
hash Hasher
|
||||
config Config
|
||||
shardMask uint64
|
||||
maxShardSize uint32
|
||||
close chan struct{}
|
||||
}
|
||||
|
||||
// RemoveReason is a value used to signal to the user why a particular key was removed in the OnRemove callback.
|
||||
type RemoveReason uint32
|
||||
|
||||
const (
|
||||
// Expired means the key is past its LifeWindow.
|
||||
Expired RemoveReason = iota
|
||||
// NoSpace means the key is the oldest and the cache size was at its maximum when Set was called, or the
|
||||
// entry exceeded the maximum shard size.
|
||||
NoSpace
|
||||
// Deleted means Delete was called and this key was removed as a result.
|
||||
Deleted
|
||||
)
|
||||
|
||||
// NewBigCache initialize new instance of BigCache
|
||||
func NewBigCache(config Config) (*BigCache, error) {
|
||||
return newBigCache(config, &systemClock{})
|
||||
}
|
||||
|
||||
func newBigCache(config Config, clock clock) (*BigCache, error) {
|
||||
|
||||
if !isPowerOfTwo(config.Shards) {
|
||||
return nil, fmt.Errorf("Shards number must be power of two")
|
||||
}
|
||||
|
||||
if config.Hasher == nil {
|
||||
config.Hasher = newDefaultHasher()
|
||||
}
|
||||
|
||||
cache := &BigCache{
|
||||
shards: make([]*cacheShard, config.Shards),
|
||||
lifeWindow: uint64(config.LifeWindow.Seconds()),
|
||||
clock: clock,
|
||||
hash: config.Hasher,
|
||||
config: config,
|
||||
shardMask: uint64(config.Shards - 1),
|
||||
maxShardSize: uint32(config.maximumShardSize()),
|
||||
close: make(chan struct{}),
|
||||
}
|
||||
|
||||
var onRemove func(wrappedEntry []byte, reason RemoveReason)
|
||||
if config.OnRemove != nil {
|
||||
onRemove = cache.providedOnRemove
|
||||
} else if config.OnRemoveWithReason != nil {
|
||||
onRemove = cache.providedOnRemoveWithReason
|
||||
} else {
|
||||
onRemove = cache.notProvidedOnRemove
|
||||
}
|
||||
|
||||
for i := 0; i < config.Shards; i++ {
|
||||
cache.shards[i] = initNewShard(config, onRemove, clock)
|
||||
}
|
||||
|
||||
if config.CleanWindow > 0 {
|
||||
go func() {
|
||||
ticker := time.NewTicker(config.CleanWindow)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case t := <-ticker.C:
|
||||
cache.cleanUp(uint64(t.Unix()))
|
||||
case <-cache.close:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return cache, nil
|
||||
}
|
||||
|
||||
// Close is used to signal a shutdown of the cache when you are done with it.
|
||||
// This allows the cleaning goroutines to exit and ensures references are not
|
||||
// kept to the cache preventing GC of the entire cache.
|
||||
func (c *BigCache) Close() error {
|
||||
close(c.close)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get reads entry for the key.
|
||||
// It returns an ErrEntryNotFound when
|
||||
// no entry exists for the given key.
|
||||
func (c *BigCache) Get(key string) ([]byte, error) {
|
||||
hashedKey := c.hash.Sum64(key)
|
||||
shard := c.getShard(hashedKey)
|
||||
return shard.get(key, hashedKey)
|
||||
}
|
||||
|
||||
// Set saves entry under the key
|
||||
func (c *BigCache) Set(key string, entry []byte) error {
|
||||
hashedKey := c.hash.Sum64(key)
|
||||
shard := c.getShard(hashedKey)
|
||||
return shard.set(key, hashedKey, entry)
|
||||
}
|
||||
|
||||
// Delete removes the key
|
||||
func (c *BigCache) Delete(key string) error {
|
||||
hashedKey := c.hash.Sum64(key)
|
||||
shard := c.getShard(hashedKey)
|
||||
return shard.del(key, hashedKey)
|
||||
}
|
||||
|
||||
// Reset empties all cache shards
|
||||
func (c *BigCache) Reset() error {
|
||||
for _, shard := range c.shards {
|
||||
shard.reset(c.config)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Len computes number of entries in cache
|
||||
func (c *BigCache) Len() int {
|
||||
var len int
|
||||
for _, shard := range c.shards {
|
||||
len += shard.len()
|
||||
}
|
||||
return len
|
||||
}
|
||||
|
||||
// Capacity returns amount of bytes store in the cache.
|
||||
func (c *BigCache) Capacity() int {
|
||||
var len int
|
||||
for _, shard := range c.shards {
|
||||
len += shard.capacity()
|
||||
}
|
||||
return len
|
||||
}
|
||||
|
||||
// Stats returns cache's statistics
|
||||
func (c *BigCache) Stats() Stats {
|
||||
var s Stats
|
||||
for _, shard := range c.shards {
|
||||
tmp := shard.getStats()
|
||||
s.Hits += tmp.Hits
|
||||
s.Misses += tmp.Misses
|
||||
s.DelHits += tmp.DelHits
|
||||
s.DelMisses += tmp.DelMisses
|
||||
s.Collisions += tmp.Collisions
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Iterator returns iterator function to iterate over EntryInfo's from whole cache.
|
||||
func (c *BigCache) Iterator() *EntryInfoIterator {
|
||||
return newIterator(c)
|
||||
}
|
||||
|
||||
func (c *BigCache) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func(reason RemoveReason) error) bool {
|
||||
oldestTimestamp := readTimestampFromEntry(oldestEntry)
|
||||
if currentTimestamp-oldestTimestamp > c.lifeWindow {
|
||||
evict(Expired)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *BigCache) cleanUp(currentTimestamp uint64) {
|
||||
for _, shard := range c.shards {
|
||||
shard.cleanUp(currentTimestamp)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *BigCache) getShard(hashedKey uint64) (shard *cacheShard) {
|
||||
return c.shards[hashedKey&c.shardMask]
|
||||
}
|
||||
|
||||
func (c *BigCache) providedOnRemove(wrappedEntry []byte, reason RemoveReason) {
|
||||
c.config.OnRemove(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry))
|
||||
}
|
||||
|
||||
func (c *BigCache) providedOnRemoveWithReason(wrappedEntry []byte, reason RemoveReason) {
|
||||
if c.config.onRemoveFilter == 0 || (1<<uint(reason))&c.config.onRemoveFilter > 0 {
|
||||
c.config.OnRemoveWithReason(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry), reason)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *BigCache) notProvidedOnRemove(wrappedEntry []byte, reason RemoveReason) {
|
||||
}
|
14
vendor/github.com/allegro/bigcache/bytes.go
generated
vendored
14
vendor/github.com/allegro/bigcache/bytes.go
generated
vendored
@ -1,14 +0,0 @@
|
||||
// +build !appengine
|
||||
|
||||
package bigcache
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func bytesToString(b []byte) string {
|
||||
bytesHeader := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
strHeader := reflect.StringHeader{Data: bytesHeader.Data, Len: bytesHeader.Len}
|
||||
return *(*string)(unsafe.Pointer(&strHeader))
|
||||
}
|
7
vendor/github.com/allegro/bigcache/bytes_appengine.go
generated
vendored
7
vendor/github.com/allegro/bigcache/bytes_appengine.go
generated
vendored
@ -1,7 +0,0 @@
|
||||
// +build appengine
|
||||
|
||||
package bigcache
|
||||
|
||||
func bytesToString(b []byte) string {
|
||||
return string(b)
|
||||
}
|
14
vendor/github.com/allegro/bigcache/clock.go
generated
vendored
14
vendor/github.com/allegro/bigcache/clock.go
generated
vendored
@ -1,14 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
import "time"
|
||||
|
||||
type clock interface {
|
||||
epoch() int64
|
||||
}
|
||||
|
||||
type systemClock struct {
|
||||
}
|
||||
|
||||
func (c systemClock) epoch() int64 {
|
||||
return time.Now().Unix()
|
||||
}
|
86
vendor/github.com/allegro/bigcache/config.go
generated
vendored
86
vendor/github.com/allegro/bigcache/config.go
generated
vendored
@ -1,86 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
import "time"
|
||||
|
||||
// Config for BigCache
|
||||
type Config struct {
|
||||
// Number of cache shards, value must be a power of two
|
||||
Shards int
|
||||
// Time after which entry can be evicted
|
||||
LifeWindow time.Duration
|
||||
// Interval between removing expired entries (clean up).
|
||||
// If set to <= 0 then no action is performed. Setting to < 1 second is counterproductive — bigcache has a one second resolution.
|
||||
CleanWindow time.Duration
|
||||
// Max number of entries in life window. Used only to calculate initial size for cache shards.
|
||||
// When proper value is set then additional memory allocation does not occur.
|
||||
MaxEntriesInWindow int
|
||||
// Max size of entry in bytes. Used only to calculate initial size for cache shards.
|
||||
MaxEntrySize int
|
||||
// Verbose mode prints information about new memory allocation
|
||||
Verbose bool
|
||||
// Hasher used to map between string keys and unsigned 64bit integers, by default fnv64 hashing is used.
|
||||
Hasher Hasher
|
||||
// HardMaxCacheSize is a limit for cache size in MB. Cache will not allocate more memory than this limit.
|
||||
// It can protect application from consuming all available memory on machine, therefore from running OOM Killer.
|
||||
// Default value is 0 which means unlimited size. When the limit is higher than 0 and reached then
|
||||
// the oldest entries are overridden for the new ones.
|
||||
HardMaxCacheSize int
|
||||
// OnRemove is a callback fired when the oldest entry is removed because of its expiration time or no space left
|
||||
// for the new entry, or because delete was called.
|
||||
// Default value is nil which means no callback and it prevents from unwrapping the oldest entry.
|
||||
OnRemove func(key string, entry []byte)
|
||||
// OnRemoveWithReason is a callback fired when the oldest entry is removed because of its expiration time or no space left
|
||||
// for the new entry, or because delete was called. A constant representing the reason will be passed through.
|
||||
// Default value is nil which means no callback and it prevents from unwrapping the oldest entry.
|
||||
// Ignored if OnRemove is specified.
|
||||
OnRemoveWithReason func(key string, entry []byte, reason RemoveReason)
|
||||
|
||||
onRemoveFilter int
|
||||
|
||||
// Logger is a logging interface and used in combination with `Verbose`
|
||||
// Defaults to `DefaultLogger()`
|
||||
Logger Logger
|
||||
}
|
||||
|
||||
// DefaultConfig initializes config with default values.
|
||||
// When load for BigCache can be predicted in advance then it is better to use custom config.
|
||||
func DefaultConfig(eviction time.Duration) Config {
|
||||
return Config{
|
||||
Shards: 1024,
|
||||
LifeWindow: eviction,
|
||||
CleanWindow: 0,
|
||||
MaxEntriesInWindow: 1000 * 10 * 60,
|
||||
MaxEntrySize: 500,
|
||||
Verbose: true,
|
||||
Hasher: newDefaultHasher(),
|
||||
HardMaxCacheSize: 0,
|
||||
Logger: DefaultLogger(),
|
||||
}
|
||||
}
|
||||
|
||||
// initialShardSize computes initial shard size
|
||||
func (c Config) initialShardSize() int {
|
||||
return max(c.MaxEntriesInWindow/c.Shards, minimumEntriesInShard)
|
||||
}
|
||||
|
||||
// maximumShardSize computes maximum shard size
|
||||
func (c Config) maximumShardSize() int {
|
||||
maxShardSize := 0
|
||||
|
||||
if c.HardMaxCacheSize > 0 {
|
||||
maxShardSize = convertMBToBytes(c.HardMaxCacheSize) / c.Shards
|
||||
}
|
||||
|
||||
return maxShardSize
|
||||
}
|
||||
|
||||
// OnRemoveFilterSet sets which remove reasons will trigger a call to OnRemoveWithReason.
|
||||
// Filtering out reasons prevents bigcache from unwrapping them, which saves cpu.
|
||||
func (c Config) OnRemoveFilterSet(reasons ...RemoveReason) Config {
|
||||
c.onRemoveFilter = 0
|
||||
for i := range reasons {
|
||||
c.onRemoveFilter |= 1 << uint(reasons[i])
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
62
vendor/github.com/allegro/bigcache/encoding.go
generated
vendored
62
vendor/github.com/allegro/bigcache/encoding.go
generated
vendored
@ -1,62 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
const (
|
||||
timestampSizeInBytes = 8 // Number of bytes used for timestamp
|
||||
hashSizeInBytes = 8 // Number of bytes used for hash
|
||||
keySizeInBytes = 2 // Number of bytes used for size of entry key
|
||||
headersSizeInBytes = timestampSizeInBytes + hashSizeInBytes + keySizeInBytes // Number of bytes used for all headers
|
||||
)
|
||||
|
||||
func wrapEntry(timestamp uint64, hash uint64, key string, entry []byte, buffer *[]byte) []byte {
|
||||
keyLength := len(key)
|
||||
blobLength := len(entry) + headersSizeInBytes + keyLength
|
||||
|
||||
if blobLength > len(*buffer) {
|
||||
*buffer = make([]byte, blobLength)
|
||||
}
|
||||
blob := *buffer
|
||||
|
||||
binary.LittleEndian.PutUint64(blob, timestamp)
|
||||
binary.LittleEndian.PutUint64(blob[timestampSizeInBytes:], hash)
|
||||
binary.LittleEndian.PutUint16(blob[timestampSizeInBytes+hashSizeInBytes:], uint16(keyLength))
|
||||
copy(blob[headersSizeInBytes:], key)
|
||||
copy(blob[headersSizeInBytes+keyLength:], entry)
|
||||
|
||||
return blob[:blobLength]
|
||||
}
|
||||
|
||||
func readEntry(data []byte) []byte {
|
||||
length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:])
|
||||
|
||||
// copy on read
|
||||
dst := make([]byte, len(data)-int(headersSizeInBytes+length))
|
||||
copy(dst, data[headersSizeInBytes+length:])
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
func readTimestampFromEntry(data []byte) uint64 {
|
||||
return binary.LittleEndian.Uint64(data)
|
||||
}
|
||||
|
||||
func readKeyFromEntry(data []byte) string {
|
||||
length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:])
|
||||
|
||||
// copy on read
|
||||
dst := make([]byte, length)
|
||||
copy(dst, data[headersSizeInBytes:headersSizeInBytes+length])
|
||||
|
||||
return bytesToString(dst)
|
||||
}
|
||||
|
||||
func readHashFromEntry(data []byte) uint64 {
|
||||
return binary.LittleEndian.Uint64(data[timestampSizeInBytes:])
|
||||
}
|
||||
|
||||
func resetKeyFromEntry(data []byte) {
|
||||
binary.LittleEndian.PutUint64(data[timestampSizeInBytes:], 0)
|
||||
}
|
6
vendor/github.com/allegro/bigcache/entry_not_found_error.go
generated
vendored
6
vendor/github.com/allegro/bigcache/entry_not_found_error.go
generated
vendored
@ -1,6 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
import "errors"
|
||||
|
||||
// ErrEntryNotFound is an error type struct which is returned when entry was not found for provided key
|
||||
var ErrEntryNotFound = errors.New("Entry not found")
|
28
vendor/github.com/allegro/bigcache/fnv.go
generated
vendored
28
vendor/github.com/allegro/bigcache/fnv.go
generated
vendored
@ -1,28 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
// newDefaultHasher returns a new 64-bit FNV-1a Hasher which makes no memory allocations.
|
||||
// Its Sum64 method will lay the value out in big-endian byte order.
|
||||
// See https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function
|
||||
func newDefaultHasher() Hasher {
|
||||
return fnv64a{}
|
||||
}
|
||||
|
||||
type fnv64a struct{}
|
||||
|
||||
const (
|
||||
// offset64 FNVa offset basis. See https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash
|
||||
offset64 = 14695981039346656037
|
||||
// prime64 FNVa prime value. See https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash
|
||||
prime64 = 1099511628211
|
||||
)
|
||||
|
||||
// Sum64 gets the string and returns its uint64 hash value.
|
||||
func (f fnv64a) Sum64(key string) uint64 {
|
||||
var hash uint64 = offset64
|
||||
for i := 0; i < len(key); i++ {
|
||||
hash ^= uint64(key[i])
|
||||
hash *= prime64
|
||||
}
|
||||
|
||||
return hash
|
||||
}
|
8
vendor/github.com/allegro/bigcache/hash.go
generated
vendored
8
vendor/github.com/allegro/bigcache/hash.go
generated
vendored
@ -1,8 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
// Hasher is responsible for generating unsigned, 64 bit hash of provided string. Hasher should minimize collisions
|
||||
// (generating same hash for different strings) and while performance is also important fast functions are preferable (i.e.
|
||||
// you can use FarmHash family).
|
||||
type Hasher interface {
|
||||
Sum64(string) uint64
|
||||
}
|
122
vendor/github.com/allegro/bigcache/iterator.go
generated
vendored
122
vendor/github.com/allegro/bigcache/iterator.go
generated
vendored
@ -1,122 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
import "sync"
|
||||
|
||||
type iteratorError string
|
||||
|
||||
func (e iteratorError) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
// ErrInvalidIteratorState is reported when iterator is in invalid state
|
||||
const ErrInvalidIteratorState = iteratorError("Iterator is in invalid state. Use SetNext() to move to next position")
|
||||
|
||||
// ErrCannotRetrieveEntry is reported when entry cannot be retrieved from underlying
|
||||
const ErrCannotRetrieveEntry = iteratorError("Could not retrieve entry from cache")
|
||||
|
||||
var emptyEntryInfo = EntryInfo{}
|
||||
|
||||
// EntryInfo holds informations about entry in the cache
|
||||
type EntryInfo struct {
|
||||
timestamp uint64
|
||||
hash uint64
|
||||
key string
|
||||
value []byte
|
||||
}
|
||||
|
||||
// Key returns entry's underlying key
|
||||
func (e EntryInfo) Key() string {
|
||||
return e.key
|
||||
}
|
||||
|
||||
// Hash returns entry's hash value
|
||||
func (e EntryInfo) Hash() uint64 {
|
||||
return e.hash
|
||||
}
|
||||
|
||||
// Timestamp returns entry's timestamp (time of insertion)
|
||||
func (e EntryInfo) Timestamp() uint64 {
|
||||
return e.timestamp
|
||||
}
|
||||
|
||||
// Value returns entry's underlying value
|
||||
func (e EntryInfo) Value() []byte {
|
||||
return e.value
|
||||
}
|
||||
|
||||
// EntryInfoIterator allows to iterate over entries in the cache
|
||||
type EntryInfoIterator struct {
|
||||
mutex sync.Mutex
|
||||
cache *BigCache
|
||||
currentShard int
|
||||
currentIndex int
|
||||
elements []uint32
|
||||
elementsCount int
|
||||
valid bool
|
||||
}
|
||||
|
||||
// SetNext moves to next element and returns true if it exists.
|
||||
func (it *EntryInfoIterator) SetNext() bool {
|
||||
it.mutex.Lock()
|
||||
|
||||
it.valid = false
|
||||
it.currentIndex++
|
||||
|
||||
if it.elementsCount > it.currentIndex {
|
||||
it.valid = true
|
||||
it.mutex.Unlock()
|
||||
return true
|
||||
}
|
||||
|
||||
for i := it.currentShard + 1; i < it.cache.config.Shards; i++ {
|
||||
it.elements, it.elementsCount = it.cache.shards[i].copyKeys()
|
||||
|
||||
// Non empty shard - stick with it
|
||||
if it.elementsCount > 0 {
|
||||
it.currentIndex = 0
|
||||
it.currentShard = i
|
||||
it.valid = true
|
||||
it.mutex.Unlock()
|
||||
return true
|
||||
}
|
||||
}
|
||||
it.mutex.Unlock()
|
||||
return false
|
||||
}
|
||||
|
||||
func newIterator(cache *BigCache) *EntryInfoIterator {
|
||||
elements, count := cache.shards[0].copyKeys()
|
||||
|
||||
return &EntryInfoIterator{
|
||||
cache: cache,
|
||||
currentShard: 0,
|
||||
currentIndex: -1,
|
||||
elements: elements,
|
||||
elementsCount: count,
|
||||
}
|
||||
}
|
||||
|
||||
// Value returns current value from the iterator
|
||||
func (it *EntryInfoIterator) Value() (EntryInfo, error) {
|
||||
it.mutex.Lock()
|
||||
|
||||
if !it.valid {
|
||||
it.mutex.Unlock()
|
||||
return emptyEntryInfo, ErrInvalidIteratorState
|
||||
}
|
||||
|
||||
entry, err := it.cache.shards[it.currentShard].getEntry(int(it.elements[it.currentIndex]))
|
||||
|
||||
if err != nil {
|
||||
it.mutex.Unlock()
|
||||
return emptyEntryInfo, ErrCannotRetrieveEntry
|
||||
}
|
||||
it.mutex.Unlock()
|
||||
|
||||
return EntryInfo{
|
||||
timestamp: readTimestampFromEntry(entry),
|
||||
hash: readHashFromEntry(entry),
|
||||
key: readKeyFromEntry(entry),
|
||||
value: readEntry(entry),
|
||||
}, nil
|
||||
}
|
30
vendor/github.com/allegro/bigcache/logger.go
generated
vendored
30
vendor/github.com/allegro/bigcache/logger.go
generated
vendored
@ -1,30 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Logger is invoked when `Config.Verbose=true`
|
||||
type Logger interface {
|
||||
Printf(format string, v ...interface{})
|
||||
}
|
||||
|
||||
// this is a safeguard, breaking on compile time in case
|
||||
// `log.Logger` does not adhere to our `Logger` interface.
|
||||
// see https://golang.org/doc/faq#guarantee_satisfies_interface
|
||||
var _ Logger = &log.Logger{}
|
||||
|
||||
// DefaultLogger returns a `Logger` implementation
|
||||
// backed by stdlib's log
|
||||
func DefaultLogger() *log.Logger {
|
||||
return log.New(os.Stdout, "", log.LstdFlags)
|
||||
}
|
||||
|
||||
func newLogger(custom Logger) Logger {
|
||||
if custom != nil {
|
||||
return custom
|
||||
}
|
||||
|
||||
return DefaultLogger()
|
||||
}
|
210
vendor/github.com/allegro/bigcache/queue/bytes_queue.go
generated
vendored
210
vendor/github.com/allegro/bigcache/queue/bytes_queue.go
generated
vendored
@ -1,210 +0,0 @@
|
||||
package queue
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// Number of bytes used to keep information about entry size
|
||||
headerEntrySize = 4
|
||||
// Bytes before left margin are not used. Zero index means element does not exist in queue, useful while reading slice from index
|
||||
leftMarginIndex = 1
|
||||
// Minimum empty blob size in bytes. Empty blob fills space between tail and head in additional memory allocation.
|
||||
// It keeps entries indexes unchanged
|
||||
minimumEmptyBlobSize = 32 + headerEntrySize
|
||||
)
|
||||
|
||||
// BytesQueue is a non-thread safe queue type of fifo based on bytes array.
|
||||
// For every push operation index of entry is returned. It can be used to read the entry later
|
||||
type BytesQueue struct {
|
||||
array []byte
|
||||
capacity int
|
||||
maxCapacity int
|
||||
head int
|
||||
tail int
|
||||
count int
|
||||
rightMargin int
|
||||
headerBuffer []byte
|
||||
verbose bool
|
||||
initialCapacity int
|
||||
}
|
||||
|
||||
type queueError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
// NewBytesQueue initialize new bytes queue.
|
||||
// Initial capacity is used in bytes array allocation
|
||||
// When verbose flag is set then information about memory allocation are printed
|
||||
func NewBytesQueue(initialCapacity int, maxCapacity int, verbose bool) *BytesQueue {
|
||||
return &BytesQueue{
|
||||
array: make([]byte, initialCapacity),
|
||||
capacity: initialCapacity,
|
||||
maxCapacity: maxCapacity,
|
||||
headerBuffer: make([]byte, headerEntrySize),
|
||||
tail: leftMarginIndex,
|
||||
head: leftMarginIndex,
|
||||
rightMargin: leftMarginIndex,
|
||||
verbose: verbose,
|
||||
initialCapacity: initialCapacity,
|
||||
}
|
||||
}
|
||||
|
||||
// Reset removes all entries from queue
|
||||
func (q *BytesQueue) Reset() {
|
||||
// Just reset indexes
|
||||
q.tail = leftMarginIndex
|
||||
q.head = leftMarginIndex
|
||||
q.rightMargin = leftMarginIndex
|
||||
q.count = 0
|
||||
}
|
||||
|
||||
// Push copies entry at the end of queue and moves tail pointer. Allocates more space if needed.
|
||||
// Returns index for pushed data or error if maximum size queue limit is reached.
|
||||
func (q *BytesQueue) Push(data []byte) (int, error) {
|
||||
dataLen := len(data)
|
||||
|
||||
if q.availableSpaceAfterTail() < dataLen+headerEntrySize {
|
||||
if q.availableSpaceBeforeHead() >= dataLen+headerEntrySize {
|
||||
q.tail = leftMarginIndex
|
||||
} else if q.capacity+headerEntrySize+dataLen >= q.maxCapacity && q.maxCapacity > 0 {
|
||||
return -1, &queueError{"Full queue. Maximum size limit reached."}
|
||||
} else {
|
||||
q.allocateAdditionalMemory(dataLen + headerEntrySize)
|
||||
}
|
||||
}
|
||||
|
||||
index := q.tail
|
||||
|
||||
q.push(data, dataLen)
|
||||
|
||||
return index, nil
|
||||
}
|
||||
|
||||
func (q *BytesQueue) allocateAdditionalMemory(minimum int) {
|
||||
start := time.Now()
|
||||
if q.capacity < minimum {
|
||||
q.capacity += minimum
|
||||
}
|
||||
q.capacity = q.capacity * 2
|
||||
if q.capacity > q.maxCapacity && q.maxCapacity > 0 {
|
||||
q.capacity = q.maxCapacity
|
||||
}
|
||||
|
||||
oldArray := q.array
|
||||
q.array = make([]byte, q.capacity)
|
||||
|
||||
if leftMarginIndex != q.rightMargin {
|
||||
copy(q.array, oldArray[:q.rightMargin])
|
||||
|
||||
if q.tail < q.head {
|
||||
emptyBlobLen := q.head - q.tail - headerEntrySize
|
||||
q.push(make([]byte, emptyBlobLen), emptyBlobLen)
|
||||
q.head = leftMarginIndex
|
||||
q.tail = q.rightMargin
|
||||
}
|
||||
}
|
||||
|
||||
if q.verbose {
|
||||
log.Printf("Allocated new queue in %s; Capacity: %d \n", time.Since(start), q.capacity)
|
||||
}
|
||||
}
|
||||
|
||||
func (q *BytesQueue) push(data []byte, len int) {
|
||||
binary.LittleEndian.PutUint32(q.headerBuffer, uint32(len))
|
||||
q.copy(q.headerBuffer, headerEntrySize)
|
||||
|
||||
q.copy(data, len)
|
||||
|
||||
if q.tail > q.head {
|
||||
q.rightMargin = q.tail
|
||||
}
|
||||
|
||||
q.count++
|
||||
}
|
||||
|
||||
func (q *BytesQueue) copy(data []byte, len int) {
|
||||
q.tail += copy(q.array[q.tail:], data[:len])
|
||||
}
|
||||
|
||||
// Pop reads the oldest entry from queue and moves head pointer to the next one
|
||||
func (q *BytesQueue) Pop() ([]byte, error) {
|
||||
data, size, err := q.peek(q.head)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
q.head += headerEntrySize + size
|
||||
q.count--
|
||||
|
||||
if q.head == q.rightMargin {
|
||||
q.head = leftMarginIndex
|
||||
if q.tail == q.rightMargin {
|
||||
q.tail = leftMarginIndex
|
||||
}
|
||||
q.rightMargin = q.tail
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Peek reads the oldest entry from list without moving head pointer
|
||||
func (q *BytesQueue) Peek() ([]byte, error) {
|
||||
data, _, err := q.peek(q.head)
|
||||
return data, err
|
||||
}
|
||||
|
||||
// Get reads entry from index
|
||||
func (q *BytesQueue) Get(index int) ([]byte, error) {
|
||||
data, _, err := q.peek(index)
|
||||
return data, err
|
||||
}
|
||||
|
||||
// Capacity returns number of allocated bytes for queue
|
||||
func (q *BytesQueue) Capacity() int {
|
||||
return q.capacity
|
||||
}
|
||||
|
||||
// Len returns number of entries kept in queue
|
||||
func (q *BytesQueue) Len() int {
|
||||
return q.count
|
||||
}
|
||||
|
||||
// Error returns error message
|
||||
func (e *queueError) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
func (q *BytesQueue) peek(index int) ([]byte, int, error) {
|
||||
|
||||
if q.count == 0 {
|
||||
return nil, 0, &queueError{"Empty queue"}
|
||||
}
|
||||
|
||||
if index <= 0 {
|
||||
return nil, 0, &queueError{"Index must be grater than zero. Invalid index."}
|
||||
}
|
||||
|
||||
if index+headerEntrySize >= len(q.array) {
|
||||
return nil, 0, &queueError{"Index out of range"}
|
||||
}
|
||||
|
||||
blockSize := int(binary.LittleEndian.Uint32(q.array[index : index+headerEntrySize]))
|
||||
return q.array[index+headerEntrySize : index+headerEntrySize+blockSize], blockSize, nil
|
||||
}
|
||||
|
||||
func (q *BytesQueue) availableSpaceAfterTail() int {
|
||||
if q.tail >= q.head {
|
||||
return q.capacity - q.tail
|
||||
}
|
||||
return q.head - q.tail - minimumEmptyBlobSize
|
||||
}
|
||||
|
||||
func (q *BytesQueue) availableSpaceBeforeHead() int {
|
||||
if q.tail >= q.head {
|
||||
return q.head - leftMarginIndex - minimumEmptyBlobSize
|
||||
}
|
||||
return q.head - q.tail - minimumEmptyBlobSize
|
||||
}
|
239
vendor/github.com/allegro/bigcache/shard.go
generated
vendored
239
vendor/github.com/allegro/bigcache/shard.go
generated
vendored
@ -1,239 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/allegro/bigcache/queue"
|
||||
)
|
||||
|
||||
type onRemoveCallback func(wrappedEntry []byte, reason RemoveReason)
|
||||
|
||||
type cacheShard struct {
|
||||
hashmap map[uint64]uint32
|
||||
entries queue.BytesQueue
|
||||
lock sync.RWMutex
|
||||
entryBuffer []byte
|
||||
onRemove onRemoveCallback
|
||||
|
||||
isVerbose bool
|
||||
logger Logger
|
||||
clock clock
|
||||
lifeWindow uint64
|
||||
|
||||
stats Stats
|
||||
}
|
||||
|
||||
func (s *cacheShard) get(key string, hashedKey uint64) ([]byte, error) {
|
||||
s.lock.RLock()
|
||||
itemIndex := s.hashmap[hashedKey]
|
||||
|
||||
if itemIndex == 0 {
|
||||
s.lock.RUnlock()
|
||||
s.miss()
|
||||
return nil, ErrEntryNotFound
|
||||
}
|
||||
|
||||
wrappedEntry, err := s.entries.Get(int(itemIndex))
|
||||
if err != nil {
|
||||
s.lock.RUnlock()
|
||||
s.miss()
|
||||
return nil, err
|
||||
}
|
||||
if entryKey := readKeyFromEntry(wrappedEntry); key != entryKey {
|
||||
if s.isVerbose {
|
||||
s.logger.Printf("Collision detected. Both %q and %q have the same hash %x", key, entryKey, hashedKey)
|
||||
}
|
||||
s.lock.RUnlock()
|
||||
s.collision()
|
||||
return nil, ErrEntryNotFound
|
||||
}
|
||||
s.lock.RUnlock()
|
||||
s.hit()
|
||||
return readEntry(wrappedEntry), nil
|
||||
}
|
||||
|
||||
func (s *cacheShard) set(key string, hashedKey uint64, entry []byte) error {
|
||||
currentTimestamp := uint64(s.clock.epoch())
|
||||
|
||||
s.lock.Lock()
|
||||
|
||||
if previousIndex := s.hashmap[hashedKey]; previousIndex != 0 {
|
||||
if previousEntry, err := s.entries.Get(int(previousIndex)); err == nil {
|
||||
resetKeyFromEntry(previousEntry)
|
||||
}
|
||||
}
|
||||
|
||||
if oldestEntry, err := s.entries.Peek(); err == nil {
|
||||
s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry)
|
||||
}
|
||||
|
||||
w := wrapEntry(currentTimestamp, hashedKey, key, entry, &s.entryBuffer)
|
||||
|
||||
for {
|
||||
if index, err := s.entries.Push(w); err == nil {
|
||||
s.hashmap[hashedKey] = uint32(index)
|
||||
s.lock.Unlock()
|
||||
return nil
|
||||
}
|
||||
if s.removeOldestEntry(NoSpace) != nil {
|
||||
s.lock.Unlock()
|
||||
return fmt.Errorf("entry is bigger than max shard size")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *cacheShard) del(key string, hashedKey uint64) error {
|
||||
s.lock.RLock()
|
||||
itemIndex := s.hashmap[hashedKey]
|
||||
|
||||
if itemIndex == 0 {
|
||||
s.lock.RUnlock()
|
||||
s.delmiss()
|
||||
return ErrEntryNotFound
|
||||
}
|
||||
|
||||
wrappedEntry, err := s.entries.Get(int(itemIndex))
|
||||
if err != nil {
|
||||
s.lock.RUnlock()
|
||||
s.delmiss()
|
||||
return err
|
||||
}
|
||||
s.lock.RUnlock()
|
||||
|
||||
s.lock.Lock()
|
||||
{
|
||||
delete(s.hashmap, hashedKey)
|
||||
s.onRemove(wrappedEntry, Deleted)
|
||||
resetKeyFromEntry(wrappedEntry)
|
||||
}
|
||||
s.lock.Unlock()
|
||||
|
||||
s.delhit()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *cacheShard) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func(reason RemoveReason) error) bool {
|
||||
oldestTimestamp := readTimestampFromEntry(oldestEntry)
|
||||
if currentTimestamp-oldestTimestamp > s.lifeWindow {
|
||||
evict(Expired)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *cacheShard) cleanUp(currentTimestamp uint64) {
|
||||
s.lock.Lock()
|
||||
for {
|
||||
if oldestEntry, err := s.entries.Peek(); err != nil {
|
||||
break
|
||||
} else if evicted := s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry); !evicted {
|
||||
break
|
||||
}
|
||||
}
|
||||
s.lock.Unlock()
|
||||
}
|
||||
|
||||
func (s *cacheShard) getOldestEntry() ([]byte, error) {
|
||||
return s.entries.Peek()
|
||||
}
|
||||
|
||||
func (s *cacheShard) getEntry(index int) ([]byte, error) {
|
||||
s.lock.RLock()
|
||||
entry, err := s.entries.Get(index)
|
||||
s.lock.RUnlock()
|
||||
|
||||
return entry, err
|
||||
}
|
||||
|
||||
func (s *cacheShard) copyKeys() (keys []uint32, next int) {
|
||||
s.lock.RLock()
|
||||
keys = make([]uint32, len(s.hashmap))
|
||||
|
||||
for _, index := range s.hashmap {
|
||||
keys[next] = index
|
||||
next++
|
||||
}
|
||||
|
||||
s.lock.RUnlock()
|
||||
return keys, next
|
||||
}
|
||||
|
||||
func (s *cacheShard) removeOldestEntry(reason RemoveReason) error {
|
||||
oldest, err := s.entries.Pop()
|
||||
if err == nil {
|
||||
hash := readHashFromEntry(oldest)
|
||||
delete(s.hashmap, hash)
|
||||
s.onRemove(oldest, reason)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *cacheShard) reset(config Config) {
|
||||
s.lock.Lock()
|
||||
s.hashmap = make(map[uint64]uint32, config.initialShardSize())
|
||||
s.entryBuffer = make([]byte, config.MaxEntrySize+headersSizeInBytes)
|
||||
s.entries.Reset()
|
||||
s.lock.Unlock()
|
||||
}
|
||||
|
||||
func (s *cacheShard) len() int {
|
||||
s.lock.RLock()
|
||||
res := len(s.hashmap)
|
||||
s.lock.RUnlock()
|
||||
return res
|
||||
}
|
||||
|
||||
func (s *cacheShard) capacity() int {
|
||||
s.lock.RLock()
|
||||
res := s.entries.Capacity()
|
||||
s.lock.RUnlock()
|
||||
return res
|
||||
}
|
||||
|
||||
func (s *cacheShard) getStats() Stats {
|
||||
var stats = Stats{
|
||||
Hits: atomic.LoadInt64(&s.stats.Hits),
|
||||
Misses: atomic.LoadInt64(&s.stats.Misses),
|
||||
DelHits: atomic.LoadInt64(&s.stats.DelHits),
|
||||
DelMisses: atomic.LoadInt64(&s.stats.DelMisses),
|
||||
Collisions: atomic.LoadInt64(&s.stats.Collisions),
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
func (s *cacheShard) hit() {
|
||||
atomic.AddInt64(&s.stats.Hits, 1)
|
||||
}
|
||||
|
||||
func (s *cacheShard) miss() {
|
||||
atomic.AddInt64(&s.stats.Misses, 1)
|
||||
}
|
||||
|
||||
func (s *cacheShard) delhit() {
|
||||
atomic.AddInt64(&s.stats.DelHits, 1)
|
||||
}
|
||||
|
||||
func (s *cacheShard) delmiss() {
|
||||
atomic.AddInt64(&s.stats.DelMisses, 1)
|
||||
}
|
||||
|
||||
func (s *cacheShard) collision() {
|
||||
atomic.AddInt64(&s.stats.Collisions, 1)
|
||||
}
|
||||
|
||||
func initNewShard(config Config, callback onRemoveCallback, clock clock) *cacheShard {
|
||||
return &cacheShard{
|
||||
hashmap: make(map[uint64]uint32, config.initialShardSize()),
|
||||
entries: *queue.NewBytesQueue(config.initialShardSize()*config.MaxEntrySize, config.maximumShardSize(), config.Verbose),
|
||||
entryBuffer: make([]byte, config.MaxEntrySize+headersSizeInBytes),
|
||||
onRemove: callback,
|
||||
|
||||
isVerbose: config.Verbose,
|
||||
logger: newLogger(config.Logger),
|
||||
clock: clock,
|
||||
lifeWindow: uint64(config.LifeWindow.Seconds()),
|
||||
}
|
||||
}
|
15
vendor/github.com/allegro/bigcache/stats.go
generated
vendored
15
vendor/github.com/allegro/bigcache/stats.go
generated
vendored
@ -1,15 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
// Stats stores cache statistics
|
||||
type Stats struct {
|
||||
// Hits is a number of successfully found keys
|
||||
Hits int64 `json:"hits"`
|
||||
// Misses is a number of not found keys
|
||||
Misses int64 `json:"misses"`
|
||||
// DelHits is a number of successfully deleted keys
|
||||
DelHits int64 `json:"delete_hits"`
|
||||
// DelMisses is a number of not deleted keys
|
||||
DelMisses int64 `json:"delete_misses"`
|
||||
// Collisions is a number of happened key-collisions
|
||||
Collisions int64 `json:"collisions"`
|
||||
}
|
16
vendor/github.com/allegro/bigcache/utils.go
generated
vendored
16
vendor/github.com/allegro/bigcache/utils.go
generated
vendored
@ -1,16 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
func max(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func convertMBToBytes(value int) int {
|
||||
return value * 1024 * 1024
|
||||
}
|
||||
|
||||
func isPowerOfTwo(number int) bool {
|
||||
return (number & (number - 1)) == 0
|
||||
}
|
25
vendor/github.com/aristanetworks/goarista/AUTHORS
generated
vendored
25
vendor/github.com/aristanetworks/goarista/AUTHORS
generated
vendored
@ -1,25 +0,0 @@
|
||||
All contributors are required to sign a "Contributor License Agreement" at
|
||||
<TBD>
|
||||
|
||||
The following organizations and people have contributed code to this library.
|
||||
(Please keep both lists sorted alphabetically.)
|
||||
|
||||
|
||||
Arista Networks, Inc.
|
||||
|
||||
|
||||
Benoit Sigoure
|
||||
Fabrice Rabaute
|
||||
|
||||
|
||||
|
||||
The list of individual contributors for code currently in HEAD can be obtained
|
||||
at any time with the following script:
|
||||
|
||||
find . -type f \
|
||||
| while read i; do \
|
||||
git blame -t $i 2>/dev/null; \
|
||||
done \
|
||||
| sed 's/^[0-9a-f]\{8\} [^(]*(\([^)]*\) [-+0-9 ]\{14,\}).*/\1/;s/ *$//' \
|
||||
| awk '{a[$0]++; t++} END{for(n in a) print n}' \
|
||||
| sort
|
177
vendor/github.com/aristanetworks/goarista/COPYING
generated
vendored
177
vendor/github.com/aristanetworks/goarista/COPYING
generated
vendored
@ -1,177 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
6
vendor/github.com/aristanetworks/goarista/monotime/issue15006.s
generated
vendored
6
vendor/github.com/aristanetworks/goarista/monotime/issue15006.s
generated
vendored
@ -1,6 +0,0 @@
|
||||
// Copyright (c) 2016 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
// This file is intentionally empty.
|
||||
// It's a workaround for https://github.com/golang/go/issues/15006
|
31
vendor/github.com/aristanetworks/goarista/monotime/nanotime.go
generated
vendored
31
vendor/github.com/aristanetworks/goarista/monotime/nanotime.go
generated
vendored
@ -1,31 +0,0 @@
|
||||
// Copyright (c) 2016 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
// Package monotime provides a fast monotonic clock source.
|
||||
package monotime
|
||||
|
||||
import (
|
||||
"time"
|
||||
_ "unsafe" // required to use //go:linkname
|
||||
)
|
||||
|
||||
//go:noescape
|
||||
//go:linkname nanotime runtime.nanotime
|
||||
func nanotime() int64
|
||||
|
||||
// Now returns the current time in nanoseconds from a monotonic clock.
|
||||
// The time returned is based on some arbitrary platform-specific point in the
|
||||
// past. The time returned is guaranteed to increase monotonically at a
|
||||
// constant rate, unlike time.Now() from the Go standard library, which may
|
||||
// slow down, speed up, jump forward or backward, due to NTP activity or leap
|
||||
// seconds.
|
||||
func Now() uint64 {
|
||||
return uint64(nanotime())
|
||||
}
|
||||
|
||||
// Since returns the amount of time that has elapsed since t. t should be
|
||||
// the result of a call to Now() on the same machine.
|
||||
func Since(t uint64) time.Duration {
|
||||
return time.Duration(Now() - t)
|
||||
}
|
12
vendor/github.com/btcsuite/btcd/btcec/README.md
generated
vendored
12
vendor/github.com/btcsuite/btcd/btcec/README.md
generated
vendored
@ -1,9 +1,9 @@
|
||||
btcec
|
||||
=====
|
||||
|
||||
[](https://travis-ci.org/btcsuite/btcec)
|
||||
[](https://github.com/btcsuite/btcd/actions)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/btcsuite/btcd/btcec)
|
||||
[](https://pkg.go.dev/github.com/btcsuite/btcd/btcec)
|
||||
|
||||
Package btcec implements elliptic curve cryptography needed for working with
|
||||
Bitcoin (secp256k1 only for now). It is designed so that it may be used with the
|
||||
@ -25,19 +25,19 @@ $ go get -u github.com/btcsuite/btcd/btcec
|
||||
|
||||
## Examples
|
||||
|
||||
* [Sign Message](http://godoc.org/github.com/btcsuite/btcd/btcec#example-package--SignMessage)
|
||||
* [Sign Message](https://pkg.go.dev/github.com/btcsuite/btcd/btcec#example-package--SignMessage)
|
||||
Demonstrates signing a message with a secp256k1 private key that is first
|
||||
parsed form raw bytes and serializing the generated signature.
|
||||
|
||||
* [Verify Signature](http://godoc.org/github.com/btcsuite/btcd/btcec#example-package--VerifySignature)
|
||||
* [Verify Signature](https://pkg.go.dev/github.com/btcsuite/btcd/btcec#example-package--VerifySignature)
|
||||
Demonstrates verifying a secp256k1 signature against a public key that is
|
||||
first parsed from raw bytes. The signature is also parsed from raw bytes.
|
||||
|
||||
* [Encryption](http://godoc.org/github.com/btcsuite/btcd/btcec#example-package--EncryptMessage)
|
||||
* [Encryption](https://pkg.go.dev/github.com/btcsuite/btcd/btcec#example-package--EncryptMessage)
|
||||
Demonstrates encrypting a message for a public key that is first parsed from
|
||||
raw bytes, then decrypting it using the corresponding private key.
|
||||
|
||||
* [Decryption](http://godoc.org/github.com/btcsuite/btcd/btcec#example-package--DecryptMessage)
|
||||
* [Decryption](https://pkg.go.dev/github.com/btcsuite/btcd/btcec#example-package--DecryptMessage)
|
||||
Demonstrates decrypting a message using a private key that is first parsed
|
||||
from raw bytes.
|
||||
|
||||
|
2
vendor/github.com/btcsuite/btcd/btcec/btcec.go
generated
vendored
2
vendor/github.com/btcsuite/btcd/btcec/btcec.go
generated
vendored
@ -930,6 +930,8 @@ func initS256() {
|
||||
secp256k1.Gx = fromHex("79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798")
|
||||
secp256k1.Gy = fromHex("483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8")
|
||||
secp256k1.BitSize = 256
|
||||
// Curve name taken from https://safecurves.cr.yp.to/.
|
||||
secp256k1.Name = "secp256k1"
|
||||
secp256k1.q = new(big.Int).Div(new(big.Int).Add(secp256k1.P,
|
||||
big.NewInt(1)), big.NewInt(4))
|
||||
secp256k1.H = 1
|
||||
|
20
vendor/github.com/btcsuite/btcd/btcec/field.go
generated
vendored
20
vendor/github.com/btcsuite/btcd/btcec/field.go
generated
vendored
@ -226,20 +226,24 @@ func (f *fieldVal) SetBytes(b *[32]byte) *fieldVal {
|
||||
return f
|
||||
}
|
||||
|
||||
// SetByteSlice packs the passed big-endian value into the internal field value
|
||||
// representation. Only the first 32-bytes are used. As a result, it is up to
|
||||
// the caller to ensure numbers of the appropriate size are used or the value
|
||||
// will be truncated.
|
||||
// SetByteSlice interprets the provided slice as a 256-bit big-endian unsigned
|
||||
// integer (meaning it is truncated to the first 32 bytes), packs it into the
|
||||
// internal field value representation, and returns the updated field value.
|
||||
//
|
||||
// Note that since passing a slice with more than 32 bytes is truncated, it is
|
||||
// possible that the truncated value is less than the field prime. It is up to
|
||||
// the caller to decide whether it needs to provide numbers of the appropriate
|
||||
// size or if it is acceptable to use this function with the described
|
||||
// truncation behavior.
|
||||
//
|
||||
// The field value is returned to support chaining. This enables syntax like:
|
||||
// f := new(fieldVal).SetByteSlice(byteSlice)
|
||||
func (f *fieldVal) SetByteSlice(b []byte) *fieldVal {
|
||||
var b32 [32]byte
|
||||
for i := 0; i < len(b); i++ {
|
||||
if i < 32 {
|
||||
b32[i+(32-len(b))] = b[i]
|
||||
}
|
||||
if len(b) > 32 {
|
||||
b = b[:32]
|
||||
}
|
||||
copy(b32[32-len(b):], b)
|
||||
return f.SetBytes(&b32)
|
||||
}
|
||||
|
||||
|
21
vendor/github.com/btcsuite/btcd/btcec/signature.go
generated
vendored
21
vendor/github.com/btcsuite/btcd/btcec/signature.go
generated
vendored
@ -284,6 +284,25 @@ func hashToInt(hash []byte, c elliptic.Curve) *big.Int {
|
||||
// format and thus we match bitcoind's behaviour here.
|
||||
func recoverKeyFromSignature(curve *KoblitzCurve, sig *Signature, msg []byte,
|
||||
iter int, doChecks bool) (*PublicKey, error) {
|
||||
// Parse and validate the R and S signature components.
|
||||
//
|
||||
// Fail if r and s are not in [1, N-1].
|
||||
if sig.R.Cmp(curve.Params().N) != -1 {
|
||||
return nil, errors.New("signature R is >= curve order")
|
||||
}
|
||||
|
||||
if sig.R.Sign() == 0 {
|
||||
return nil, errors.New("signature R is 0")
|
||||
}
|
||||
|
||||
if sig.S.Cmp(curve.Params().N) != -1 {
|
||||
return nil, errors.New("signature S is >= curve order")
|
||||
}
|
||||
|
||||
if sig.S.Sign() == 0 {
|
||||
return nil, errors.New("signature S is 0")
|
||||
}
|
||||
|
||||
// 1.1 x = (n * i) + r
|
||||
Rx := new(big.Int).Mul(curve.Params().N,
|
||||
new(big.Int).SetInt64(int64(iter/2)))
|
||||
@ -393,7 +412,7 @@ func SignCompact(curve *KoblitzCurve, key *PrivateKey,
|
||||
|
||||
// RecoverCompact verifies the compact signature "signature" of "hash" for the
|
||||
// Koblitz curve in "curve". If the signature matches then the recovered public
|
||||
// key will be returned as well as a boolen if the original key was compressed
|
||||
// key will be returned as well as a boolean if the original key was compressed
|
||||
// or not, else an error will be returned.
|
||||
func RecoverCompact(curve *KoblitzCurve, signature,
|
||||
hash []byte) (*PublicKey, bool, error) {
|
||||
|
4
vendor/github.com/btcsuite/btcd/chaincfg/README.md
generated
vendored
4
vendor/github.com/btcsuite/btcd/chaincfg/README.md
generated
vendored
@ -1,9 +1,9 @@
|
||||
chaincfg
|
||||
========
|
||||
|
||||
[](https://travis-ci.org/btcsuite/btcd)
|
||||
[](https://github.com/btcsuite/btcd/actions)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/btcsuite/btcd/chaincfg)
|
||||
[](https://pkg.go.dev/github.com/btcsuite/btcd/chaincfg)
|
||||
|
||||
Package chaincfg defines chain configuration parameters for the three standard
|
||||
Bitcoin networks and provides the ability for callers to define their own custom
|
||||
|
4
vendor/github.com/btcsuite/btcd/chaincfg/chainhash/README.md
generated
vendored
4
vendor/github.com/btcsuite/btcd/chaincfg/chainhash/README.md
generated
vendored
@ -1,9 +1,9 @@
|
||||
chainhash
|
||||
=========
|
||||
|
||||
[](https://travis-ci.org/btcsuite/btcd)
|
||||
[](https://github.com/btcsuite/btcd/actions)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/btcsuite/btcd/chaincfg/chainhash)
|
||||
[](https://pkg.go.dev/github.com/btcsuite/btcd/chaincfg/chainhash)
|
||||
=======
|
||||
|
||||
chainhash provides a generic hash type and associated functions that allows the
|
||||
|
28
vendor/github.com/btcsuite/btcd/chaincfg/genesis.go
generated
vendored
28
vendor/github.com/btcsuite/btcd/chaincfg/genesis.go
generated
vendored
@ -170,3 +170,31 @@ var simNetGenesisBlock = wire.MsgBlock{
|
||||
},
|
||||
Transactions: []*wire.MsgTx{&genesisCoinbaseTx},
|
||||
}
|
||||
|
||||
// sigNetGenesisHash is the hash of the first block in the block chain for the
|
||||
// signet test network.
|
||||
var sigNetGenesisHash = chainhash.Hash{
|
||||
0xf6, 0x1e, 0xee, 0x3b, 0x63, 0xa3, 0x80, 0xa4,
|
||||
0x77, 0xa0, 0x63, 0xaf, 0x32, 0xb2, 0xbb, 0xc9,
|
||||
0x7c, 0x9f, 0xf9, 0xf0, 0x1f, 0x2c, 0x42, 0x25,
|
||||
0xe9, 0x73, 0x98, 0x81, 0x08, 0x00, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// sigNetGenesisMerkleRoot is the hash of the first transaction in the genesis
|
||||
// block for the signet test network. It is the same as the merkle root for
|
||||
// the main network.
|
||||
var sigNetGenesisMerkleRoot = genesisMerkleRoot
|
||||
|
||||
// sigNetGenesisBlock defines the genesis block of the block chain which serves
|
||||
// as the public transaction ledger for the signet test network.
|
||||
var sigNetGenesisBlock = wire.MsgBlock{
|
||||
Header: wire.BlockHeader{
|
||||
Version: 1,
|
||||
PrevBlock: chainhash.Hash{}, // 0000000000000000000000000000000000000000000000000000000000000000
|
||||
MerkleRoot: sigNetGenesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b
|
||||
Timestamp: time.Unix(1598918400, 0), // 2020-09-01 00:00:00 +0000 UTC
|
||||
Bits: 0x1e0377ae, // 503543726 [00000377ae000000000000000000000000000000000000000000000000000000]
|
||||
Nonce: 52613770,
|
||||
},
|
||||
Transactions: []*wire.MsgTx{&genesisCoinbaseTx},
|
||||
}
|
||||
|
165
vendor/github.com/btcsuite/btcd/chaincfg/params.go
generated
vendored
165
vendor/github.com/btcsuite/btcd/chaincfg/params.go
generated
vendored
@ -5,6 +5,8 @@
|
||||
package chaincfg
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"math"
|
||||
"math/big"
|
||||
@ -38,6 +40,30 @@ var (
|
||||
// simNetPowLimit is the highest proof of work value a Bitcoin block
|
||||
// can have for the simulation test network. It is the value 2^255 - 1.
|
||||
simNetPowLimit = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 255), bigOne)
|
||||
|
||||
// sigNetPowLimit is the highest proof of work value a bitcoin block can
|
||||
// have for the signet test network. It is the value 0x0377ae << 216.
|
||||
sigNetPowLimit = new(big.Int).Lsh(new(big.Int).SetInt64(0x0377ae), 216)
|
||||
|
||||
// DefaultSignetChallenge is the byte representation of the signet
|
||||
// challenge for the default (public, Taproot enabled) signet network.
|
||||
// This is the binary equivalent of the bitcoin script
|
||||
// 1 03ad5e0edad18cb1f0fc0d28a3d4f1f3e445640337489abb10404f2d1e086be430
|
||||
// 0359ef5021964fe22d6f8e05b2463c9540ce96883fe3b278760f048f5189f2e6c4 2
|
||||
// OP_CHECKMULTISIG
|
||||
DefaultSignetChallenge, _ = hex.DecodeString(
|
||||
"512103ad5e0edad18cb1f0fc0d28a3d4f1f3e445640337489abb10404f2d" +
|
||||
"1e086be430210359ef5021964fe22d6f8e05b2463c9540ce9688" +
|
||||
"3fe3b278760f048f5189f2e6c452ae",
|
||||
)
|
||||
|
||||
// DefaultSignetDNSSeeds is the list of seed nodes for the default
|
||||
// (public, Taproot enabled) signet network.
|
||||
DefaultSignetDNSSeeds = []DNSSeed{
|
||||
{"178.128.221.177", false},
|
||||
{"2a01:7c8:d005:390::5", false},
|
||||
{"v7ajjeirttkbnt32wpy3c6w3emwnfr3fkla7hpxcfokr3ysd3kqtzmqd.onion:38333", false},
|
||||
}
|
||||
)
|
||||
|
||||
// Checkpoint identifies a known good point in the block chain. Using
|
||||
@ -96,6 +122,11 @@ const (
|
||||
// includes the deployment of BIPS 141, 142, 144, 145, 147 and 173.
|
||||
DeploymentSegwit
|
||||
|
||||
// DeploymentTaproot defines the rule change deployment ID for the
|
||||
// Taproot (+Schnorr) soft-fork package. The taproot package includes
|
||||
// the deployment of BIPS 340, 341 and 342.
|
||||
DeploymentTaproot
|
||||
|
||||
// NOTE: DefinedDeployments must always come last since it is used to
|
||||
// determine how many defined deployments there currently are.
|
||||
|
||||
@ -578,6 +609,107 @@ var SimNetParams = Params{
|
||||
HDCoinType: 115, // ASCII for s
|
||||
}
|
||||
|
||||
// SigNetParams defines the network parameters for the default public signet
|
||||
// Bitcoin network. Not to be confused with the regression test network, this
|
||||
// network is sometimes simply called "signet" or "taproot signet".
|
||||
var SigNetParams = CustomSignetParams(
|
||||
DefaultSignetChallenge, DefaultSignetDNSSeeds,
|
||||
)
|
||||
|
||||
// CustomSignetParams creates network parameters for a custom signet network
|
||||
// from a challenge. The challenge is the binary compiled version of the block
|
||||
// challenge script.
|
||||
func CustomSignetParams(challenge []byte, dnsSeeds []DNSSeed) Params {
|
||||
// The message start is defined as the first four bytes of the sha256d
|
||||
// of the challenge script, as a single push (i.e. prefixed with the
|
||||
// challenge script length).
|
||||
challengeLength := byte(len(challenge))
|
||||
hashDouble := chainhash.DoubleHashB(
|
||||
append([]byte{challengeLength}, challenge...),
|
||||
)
|
||||
|
||||
// We use little endian encoding of the hash prefix to be in line with
|
||||
// the other wire network identities.
|
||||
net := binary.LittleEndian.Uint32(hashDouble[0:4])
|
||||
return Params{
|
||||
Name: "signet",
|
||||
Net: wire.BitcoinNet(net),
|
||||
DefaultPort: "38333",
|
||||
DNSSeeds: dnsSeeds,
|
||||
|
||||
// Chain parameters
|
||||
GenesisBlock: &sigNetGenesisBlock,
|
||||
GenesisHash: &sigNetGenesisHash,
|
||||
PowLimit: sigNetPowLimit,
|
||||
PowLimitBits: 0x1e0377ae,
|
||||
BIP0034Height: 1,
|
||||
BIP0065Height: 1,
|
||||
BIP0066Height: 1,
|
||||
CoinbaseMaturity: 100,
|
||||
SubsidyReductionInterval: 210000,
|
||||
TargetTimespan: time.Hour * 24 * 14, // 14 days
|
||||
TargetTimePerBlock: time.Minute * 10, // 10 minutes
|
||||
RetargetAdjustmentFactor: 4, // 25% less, 400% more
|
||||
ReduceMinDifficulty: false,
|
||||
MinDiffReductionTime: time.Minute * 20, // TargetTimePerBlock * 2
|
||||
GenerateSupported: false,
|
||||
|
||||
// Checkpoints ordered from oldest to newest.
|
||||
Checkpoints: nil,
|
||||
|
||||
// Consensus rule change deployments.
|
||||
//
|
||||
// The miner confirmation window is defined as:
|
||||
// target proof of work timespan / target proof of work spacing
|
||||
RuleChangeActivationThreshold: 1916, // 95% of 2016
|
||||
MinerConfirmationWindow: 2016,
|
||||
Deployments: [DefinedDeployments]ConsensusDeployment{
|
||||
DeploymentTestDummy: {
|
||||
BitNumber: 28,
|
||||
StartTime: 1199145601, // January 1, 2008 UTC
|
||||
ExpireTime: 1230767999, // December 31, 2008 UTC
|
||||
},
|
||||
DeploymentCSV: {
|
||||
BitNumber: 29,
|
||||
StartTime: 0, // Always available for vote
|
||||
ExpireTime: math.MaxInt64, // Never expires
|
||||
},
|
||||
DeploymentSegwit: {
|
||||
BitNumber: 29,
|
||||
StartTime: 0, // Always available for vote
|
||||
ExpireTime: math.MaxInt64, // Never expires.
|
||||
},
|
||||
DeploymentTaproot: {
|
||||
BitNumber: 29,
|
||||
StartTime: 0, // Always available for vote
|
||||
ExpireTime: math.MaxInt64, // Never expires.
|
||||
},
|
||||
},
|
||||
|
||||
// Mempool parameters
|
||||
RelayNonStdTxs: false,
|
||||
|
||||
// Human-readable part for Bech32 encoded segwit addresses, as defined in
|
||||
// BIP 173.
|
||||
Bech32HRPSegwit: "tb", // always tb for test net
|
||||
|
||||
// Address encoding magics
|
||||
PubKeyHashAddrID: 0x6f, // starts with m or n
|
||||
ScriptHashAddrID: 0xc4, // starts with 2
|
||||
WitnessPubKeyHashAddrID: 0x03, // starts with QW
|
||||
WitnessScriptHashAddrID: 0x28, // starts with T7n
|
||||
PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed)
|
||||
|
||||
// BIP32 hierarchical deterministic extended key magics
|
||||
HDPrivateKeyID: [4]byte{0x04, 0x35, 0x83, 0x94}, // starts with tprv
|
||||
HDPublicKeyID: [4]byte{0x04, 0x35, 0x87, 0xcf}, // starts with tpub
|
||||
|
||||
// BIP44 coin type used in the hierarchical deterministic path for
|
||||
// address generation.
|
||||
HDCoinType: 1,
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrDuplicateNet describes an error where the parameters for a Bitcoin
|
||||
// network could not be set due to the network already being a standard
|
||||
@ -588,6 +720,10 @@ var (
|
||||
// is intended to identify the network for a hierarchical deterministic
|
||||
// private extended key is not registered.
|
||||
ErrUnknownHDKeyID = errors.New("unknown hd private extended key bytes")
|
||||
|
||||
// ErrInvalidHDKeyID describes an error where the provided hierarchical
|
||||
// deterministic version bytes, or hd key id, is malformed.
|
||||
ErrInvalidHDKeyID = errors.New("invalid hd extended key version bytes")
|
||||
)
|
||||
|
||||
var (
|
||||
@ -619,7 +755,11 @@ func Register(params *Params) error {
|
||||
registeredNets[params.Net] = struct{}{}
|
||||
pubKeyHashAddrIDs[params.PubKeyHashAddrID] = struct{}{}
|
||||
scriptHashAddrIDs[params.ScriptHashAddrID] = struct{}{}
|
||||
hdPrivToPubKeyIDs[params.HDPrivateKeyID] = params.HDPublicKeyID[:]
|
||||
|
||||
err := RegisterHDKeyID(params.HDPublicKeyID[:], params.HDPrivateKeyID[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// A valid Bech32 encoded segwit address always has as prefix the
|
||||
// human-readable part for the given net followed by '1'.
|
||||
@ -666,6 +806,29 @@ func IsBech32SegwitPrefix(prefix string) bool {
|
||||
return ok
|
||||
}
|
||||
|
||||
// RegisterHDKeyID registers a public and private hierarchical deterministic
|
||||
// extended key ID pair.
|
||||
//
|
||||
// Non-standard HD version bytes, such as the ones documented in SLIP-0132,
|
||||
// should be registered using this method for library packages to lookup key
|
||||
// IDs (aka HD version bytes). When the provided key IDs are invalid, the
|
||||
// ErrInvalidHDKeyID error will be returned.
|
||||
//
|
||||
// Reference:
|
||||
// SLIP-0132 : Registered HD version bytes for BIP-0032
|
||||
// https://github.com/satoshilabs/slips/blob/master/slip-0132.md
|
||||
func RegisterHDKeyID(hdPublicKeyID []byte, hdPrivateKeyID []byte) error {
|
||||
if len(hdPublicKeyID) != 4 || len(hdPrivateKeyID) != 4 {
|
||||
return ErrInvalidHDKeyID
|
||||
}
|
||||
|
||||
var keyID [4]byte
|
||||
copy(keyID[:], hdPrivateKeyID)
|
||||
hdPrivToPubKeyIDs[keyID] = hdPublicKeyID
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// HDPrivateKeyToPublicKeyID accepts a private hierarchical deterministic
|
||||
// extended key id and returns the associated public key id. When the provided
|
||||
// id is not registered, the ErrUnknownHDKeyID error will be returned.
|
||||
|
4
vendor/github.com/btcsuite/btcd/wire/README.md
generated
vendored
4
vendor/github.com/btcsuite/btcd/wire/README.md
generated
vendored
@ -1,9 +1,9 @@
|
||||
wire
|
||||
====
|
||||
|
||||
[](https://travis-ci.org/btcsuite/btcd)
|
||||
[](https://github.com/btcsuite/btcd/actions)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/btcsuite/btcd/wire)
|
||||
[](https://pkg.go.dev/github.com/btcsuite/btcd/wire)
|
||||
=======
|
||||
|
||||
Package wire implements the bitcoin wire protocol. A comprehensive suite of
|
||||
|
8
vendor/github.com/btcsuite/btcd/wire/message.go
generated
vendored
8
vendor/github.com/btcsuite/btcd/wire/message.go
generated
vendored
@ -57,6 +57,7 @@ const (
|
||||
CmdCFilter = "cfilter"
|
||||
CmdCFHeaders = "cfheaders"
|
||||
CmdCFCheckpt = "cfcheckpt"
|
||||
CmdSendAddrV2 = "sendaddrv2"
|
||||
)
|
||||
|
||||
// MessageEncoding represents the wire message encoding format to be used.
|
||||
@ -99,6 +100,9 @@ func makeEmptyMessage(command string) (Message, error) {
|
||||
case CmdVerAck:
|
||||
msg = &MsgVerAck{}
|
||||
|
||||
case CmdSendAddrV2:
|
||||
msg = &MsgSendAddrV2{}
|
||||
|
||||
case CmdGetAddr:
|
||||
msg = &MsgGetAddr{}
|
||||
|
||||
@ -213,7 +217,7 @@ func readMessageHeader(r io.Reader) (int, *messageHeader, error) {
|
||||
readElements(hr, &hdr.magic, &command, &hdr.length, &hdr.checksum)
|
||||
|
||||
// Strip trailing zeros from command string.
|
||||
hdr.command = string(bytes.TrimRight(command[:], string(0)))
|
||||
hdr.command = string(bytes.TrimRight(command[:], "\x00"))
|
||||
|
||||
return n, &hdr, nil
|
||||
}
|
||||
@ -401,7 +405,7 @@ func ReadMessageWithEncodingN(r io.Reader, pver uint32, btcnet BitcoinNet,
|
||||
|
||||
// Test checksum.
|
||||
checksum := chainhash.DoubleHashB(payload)[0:4]
|
||||
if !bytes.Equal(checksum[:], hdr.checksum[:]) {
|
||||
if !bytes.Equal(checksum, hdr.checksum[:]) {
|
||||
str := fmt.Sprintf("payload checksum failed - header "+
|
||||
"indicates %v, but actual checksum is %v.",
|
||||
hdr.checksum, checksum)
|
||||
|
42
vendor/github.com/btcsuite/btcd/wire/msgsendaddrv2.go
generated
vendored
Normal file
42
vendor/github.com/btcsuite/btcd/wire/msgsendaddrv2.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
package wire
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// MsgSendAddrV2 defines a bitcoin sendaddrv2 message which is used for a peer
|
||||
// to signal support for receiving ADDRV2 messages (BIP155). It implements the
|
||||
// Message interface.
|
||||
//
|
||||
// This message has no payload.
|
||||
type MsgSendAddrV2 struct{}
|
||||
|
||||
// BtcDecode decodes r using the bitcoin protocol encoding into the receiver.
|
||||
// This is part of the Message interface implementation.
|
||||
func (msg *MsgSendAddrV2) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding.
|
||||
// This is part of the Message interface implementation.
|
||||
func (msg *MsgSendAddrV2) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgSendAddrV2) Command() string {
|
||||
return CmdSendAddrV2
|
||||
}
|
||||
|
||||
// MaxPayloadLength returns the maximum length the payload can be for the
|
||||
// receiver. This is part of the Message interface implementation.
|
||||
func (msg *MsgSendAddrV2) MaxPayloadLength(pver uint32) uint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// NewMsgSendAddrV2 returns a new bitcoin sendaddrv2 message that conforms to the
|
||||
// Message interface.
|
||||
func NewMsgSendAddrV2() *MsgSendAddrV2 {
|
||||
return &MsgSendAddrV2{}
|
||||
}
|
70
vendor/github.com/btcsuite/btcd/wire/msgtx.go
generated
vendored
70
vendor/github.com/btcsuite/btcd/wire/msgtx.go
generated
vendored
@ -109,14 +109,38 @@ const (
|
||||
maxWitnessItemSize = 11000
|
||||
)
|
||||
|
||||
// witnessMarkerBytes are a pair of bytes specific to the witness encoding. If
|
||||
// this sequence is encoutered, then it indicates a transaction has iwtness
|
||||
// data. The first byte is an always 0x00 marker byte, which allows decoders to
|
||||
// distinguish a serialized transaction with witnesses from a regular (legacy)
|
||||
// one. The second byte is the Flag field, which at the moment is always 0x01,
|
||||
// but may be extended in the future to accommodate auxiliary non-committed
|
||||
// fields.
|
||||
var witessMarkerBytes = []byte{0x00, 0x01}
|
||||
// TxFlagMarker is the first byte of the FLAG field in a bitcoin tx
|
||||
// message. It allows decoders to distinguish a regular serialized
|
||||
// transaction from one that would require a different parsing logic.
|
||||
//
|
||||
// Position of FLAG in a bitcoin tx message:
|
||||
// ┌─────────┬────────────────────┬─────────────┬─────┐
|
||||
// │ VERSION │ FLAG │ TX-IN-COUNT │ ... │
|
||||
// │ 4 bytes │ 2 bytes (optional) │ varint │ │
|
||||
// └─────────┴────────────────────┴─────────────┴─────┘
|
||||
//
|
||||
// Zooming into the FLAG field:
|
||||
// ┌── FLAG ─────────────┬────────┐
|
||||
// │ TxFlagMarker (0x00) │ TxFlag │
|
||||
// │ 1 byte │ 1 byte │
|
||||
// └─────────────────────┴────────┘
|
||||
const TxFlagMarker = 0x00
|
||||
|
||||
// TxFlag is the second byte of the FLAG field in a bitcoin tx message.
|
||||
// It indicates the decoding logic to use in the transaction parser, if
|
||||
// TxFlagMarker is detected in the tx message.
|
||||
//
|
||||
// As of writing this, only the witness flag (0x01) is supported, but may be
|
||||
// extended in the future to accommodate auxiliary non-committed fields.
|
||||
type TxFlag = byte
|
||||
|
||||
const (
|
||||
// WitnessFlag is a flag specific to witness encoding. If the TxFlagMarker
|
||||
// is encountered followed by the WitnessFlag, then it indicates a
|
||||
// transaction has witness data. This allows decoders to distinguish a
|
||||
// serialized transaction with witnesses from a legacy one.
|
||||
WitnessFlag TxFlag = 0x01
|
||||
)
|
||||
|
||||
// scriptFreeList defines a free list of byte slices (up to the maximum number
|
||||
// defined by the freeListMaxItems constant) that have a cap according to the
|
||||
@ -420,18 +444,19 @@ func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error
|
||||
return err
|
||||
}
|
||||
|
||||
// A count of zero (meaning no TxIn's to the uninitiated) indicates
|
||||
// this is a transaction with witness data.
|
||||
var flag [1]byte
|
||||
if count == 0 && enc == WitnessEncoding {
|
||||
// Next, we need to read the flag, which is a single byte.
|
||||
// A count of zero (meaning no TxIn's to the uninitiated) means that the
|
||||
// value is a TxFlagMarker, and hence indicates the presence of a flag.
|
||||
var flag [1]TxFlag
|
||||
if count == TxFlagMarker && enc == WitnessEncoding {
|
||||
// The count varint was in fact the flag marker byte. Next, we need to
|
||||
// read the flag value, which is a single byte.
|
||||
if _, err = io.ReadFull(r, flag[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// At the moment, the flag MUST be 0x01. In the future other
|
||||
// flag types may be supported.
|
||||
if flag[0] != 0x01 {
|
||||
// At the moment, the flag MUST be WitnessFlag (0x01). In the future
|
||||
// other flag types may be supported.
|
||||
if flag[0] != WitnessFlag {
|
||||
str := fmt.Sprintf("witness tx but flag byte is %x", flag)
|
||||
return messageError("MsgTx.BtcDecode", str)
|
||||
}
|
||||
@ -690,14 +715,11 @@ func (msg *MsgTx) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) error
|
||||
// defined in BIP0144.
|
||||
doWitness := enc == WitnessEncoding && msg.HasWitness()
|
||||
if doWitness {
|
||||
// After the txn's Version field, we include two additional
|
||||
// bytes specific to the witness encoding. The first byte is an
|
||||
// always 0x00 marker byte, which allows decoders to
|
||||
// distinguish a serialized transaction with witnesses from a
|
||||
// regular (legacy) one. The second byte is the Flag field,
|
||||
// which at the moment is always 0x01, but may be extended in
|
||||
// the future to accommodate auxiliary non-committed fields.
|
||||
if _, err := w.Write(witessMarkerBytes); err != nil {
|
||||
// After the transaction's Version field, we include two additional
|
||||
// bytes specific to the witness encoding. This byte sequence is known
|
||||
// as a flag. The first byte is a marker byte (TxFlagMarker) and the
|
||||
// second one is the flag value to indicate presence of witness data.
|
||||
if _, err := w.Write([]byte{TxFlagMarker, WitnessFlag}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
15
vendor/github.com/btcsuite/btcutil/.travis.yml
generated
vendored
15
vendor/github.com/btcsuite/btcutil/.travis.yml
generated
vendored
@ -1,15 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- "1.9.4"
|
||||
- "1.10"
|
||||
sudo: false
|
||||
install:
|
||||
- go get -d -t -v ./...
|
||||
- go get -v github.com/alecthomas/gometalinter
|
||||
- gometalinter --install
|
||||
script:
|
||||
- export PATH=$PATH:$HOME/gopath/bin
|
||||
- ./goclean.sh
|
||||
after_success:
|
||||
- go get -v github.com/mattn/goveralls
|
||||
- goveralls -coverprofile=profile.cov -service=travis-ci
|
7
vendor/github.com/btcsuite/btcutil/README.md
generated
vendored
7
vendor/github.com/btcsuite/btcutil/README.md
generated
vendored
@ -1,10 +1,9 @@
|
||||
btcutil
|
||||
=======
|
||||
|
||||
[](https://travis-ci.org/btcsuite/btcutil)
|
||||
[](https://coveralls.io/r/btcsuite/btcutil?branch=master)
|
||||
[](http://copyfree.org)
|
||||
[](http://godoc.org/github.com/btcsuite/btcutil)
|
||||
[](https://github.com/btcsuite/btcutil/actions)
|
||||
[](http://copyfree.org)
|
||||
[](https://godoc.org/github.com/btcsuite/btcutil)
|
||||
|
||||
Package btcutil provides bitcoin-specific convenience functions and types.
|
||||
A comprehensive suite of tests is provided to ensure proper functionality. See
|
||||
|
4
vendor/github.com/btcsuite/btcutil/address.go
generated
vendored
4
vendor/github.com/btcsuite/btcutil/address.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
||||
type UnsupportedWitnessVerError byte
|
||||
|
||||
func (e UnsupportedWitnessVerError) Error() string {
|
||||
return "unsupported witness version: " + string(e)
|
||||
return fmt.Sprintf("unsupported witness version: %#x", e)
|
||||
}
|
||||
|
||||
// UnsupportedWitnessProgLenError describes an error where a segwit address
|
||||
@ -31,7 +31,7 @@ func (e UnsupportedWitnessVerError) Error() string {
|
||||
type UnsupportedWitnessProgLenError int
|
||||
|
||||
func (e UnsupportedWitnessProgLenError) Error() string {
|
||||
return "unsupported witness program length: " + string(e)
|
||||
return fmt.Sprintf("unsupported witness program length: %d", e)
|
||||
}
|
||||
|
||||
var (
|
||||
|
95
vendor/github.com/btcsuite/btcutil/base58/base58.go
generated
vendored
95
vendor/github.com/btcsuite/btcutil/base58/base58.go
generated
vendored
@ -10,24 +10,63 @@ import (
|
||||
|
||||
//go:generate go run genalphabet.go
|
||||
|
||||
var bigRadix = big.NewInt(58)
|
||||
var bigZero = big.NewInt(0)
|
||||
var bigRadix = [...]*big.Int{
|
||||
big.NewInt(0),
|
||||
big.NewInt(58),
|
||||
big.NewInt(58 * 58),
|
||||
big.NewInt(58 * 58 * 58),
|
||||
big.NewInt(58 * 58 * 58 * 58),
|
||||
big.NewInt(58 * 58 * 58 * 58 * 58),
|
||||
big.NewInt(58 * 58 * 58 * 58 * 58 * 58),
|
||||
big.NewInt(58 * 58 * 58 * 58 * 58 * 58 * 58),
|
||||
big.NewInt(58 * 58 * 58 * 58 * 58 * 58 * 58 * 58),
|
||||
big.NewInt(58 * 58 * 58 * 58 * 58 * 58 * 58 * 58 * 58),
|
||||
bigRadix10,
|
||||
}
|
||||
|
||||
var bigRadix10 = big.NewInt(58 * 58 * 58 * 58 * 58 * 58 * 58 * 58 * 58 * 58) // 58^10
|
||||
|
||||
// Decode decodes a modified base58 string to a byte slice.
|
||||
func Decode(b string) []byte {
|
||||
answer := big.NewInt(0)
|
||||
j := big.NewInt(1)
|
||||
|
||||
scratch := new(big.Int)
|
||||
for i := len(b) - 1; i >= 0; i-- {
|
||||
tmp := b58[b[i]]
|
||||
if tmp == 255 {
|
||||
return []byte("")
|
||||
|
||||
// Calculating with big.Int is slow for each iteration.
|
||||
// x += b58[b[i]] * j
|
||||
// j *= 58
|
||||
//
|
||||
// Instead we can try to do as much calculations on int64.
|
||||
// We can represent a 10 digit base58 number using an int64.
|
||||
//
|
||||
// Hence we'll try to convert 10, base58 digits at a time.
|
||||
// The rough idea is to calculate `t`, such that:
|
||||
//
|
||||
// t := b58[b[i+9]] * 58^9 ... + b58[b[i+1]] * 58^1 + b58[b[i]] * 58^0
|
||||
// x *= 58^10
|
||||
// x += t
|
||||
//
|
||||
// Of course, in addition, we'll need to handle boundary condition when `b` is not multiple of 58^10.
|
||||
// In that case we'll use the bigRadix[n] lookup for the appropriate power.
|
||||
for t := b; len(t) > 0; {
|
||||
n := len(t)
|
||||
if n > 10 {
|
||||
n = 10
|
||||
}
|
||||
scratch.SetInt64(int64(tmp))
|
||||
scratch.Mul(j, scratch)
|
||||
|
||||
total := uint64(0)
|
||||
for _, v := range t[:n] {
|
||||
tmp := b58[v]
|
||||
if tmp == 255 {
|
||||
return []byte("")
|
||||
}
|
||||
total = total*58 + uint64(tmp)
|
||||
}
|
||||
|
||||
answer.Mul(answer, bigRadix[n])
|
||||
scratch.SetUint64(total)
|
||||
answer.Add(answer, scratch)
|
||||
j.Mul(j, bigRadix)
|
||||
|
||||
t = t[n:]
|
||||
}
|
||||
|
||||
tmpval := answer.Bytes()
|
||||
@ -50,11 +89,35 @@ func Encode(b []byte) string {
|
||||
x := new(big.Int)
|
||||
x.SetBytes(b)
|
||||
|
||||
answer := make([]byte, 0, len(b)*136/100)
|
||||
for x.Cmp(bigZero) > 0 {
|
||||
mod := new(big.Int)
|
||||
x.DivMod(x, bigRadix, mod)
|
||||
answer = append(answer, alphabet[mod.Int64()])
|
||||
// maximum length of output is log58(2^(8*len(b))) == len(b) * 8 / log(58)
|
||||
maxlen := int(float64(len(b))*1.365658237309761) + 1
|
||||
answer := make([]byte, 0, maxlen)
|
||||
mod := new(big.Int)
|
||||
for x.Sign() > 0 {
|
||||
// Calculating with big.Int is slow for each iteration.
|
||||
// x, mod = x / 58, x % 58
|
||||
//
|
||||
// Instead we can try to do as much calculations on int64.
|
||||
// x, mod = x / 58^10, x % 58^10
|
||||
//
|
||||
// Which will give us mod, which is 10 digit base58 number.
|
||||
// We'll loop that 10 times to convert to the answer.
|
||||
|
||||
x.DivMod(x, bigRadix10, mod)
|
||||
if x.Sign() == 0 {
|
||||
// When x = 0, we need to ensure we don't add any extra zeros.
|
||||
m := mod.Int64()
|
||||
for m > 0 {
|
||||
answer = append(answer, alphabet[m%58])
|
||||
m /= 58
|
||||
}
|
||||
} else {
|
||||
m := mod.Int64()
|
||||
for i := 0; i < 10; i++ {
|
||||
answer = append(answer, alphabet[m%58])
|
||||
m /= 58
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// leading zero bytes
|
||||
|
386
vendor/github.com/btcsuite/btcutil/bech32/bech32.go
generated
vendored
386
vendor/github.com/btcsuite/btcutil/bech32/bech32.go
generated
vendored
@ -1,54 +1,197 @@
|
||||
// Copyright (c) 2017 The btcsuite developers
|
||||
// Copyright (c) 2019 The Decred developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bech32
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// charset is the set of characters used in the data section of bech32 strings.
|
||||
// Note that this is ordered, such that for a given charset[i], i is the binary
|
||||
// value of the character.
|
||||
const charset = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
|
||||
|
||||
// gen encodes the generator polynomial for the bech32 BCH checksum.
|
||||
var gen = []int{0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3}
|
||||
|
||||
// Decode decodes a bech32 encoded string, returning the human-readable
|
||||
// part and the data part excluding the checksum.
|
||||
func Decode(bech string) (string, []byte, error) {
|
||||
// The maximum allowed length for a bech32 string is 90. It must also
|
||||
// be at least 8 characters, since it needs a non-empty HRP, a
|
||||
// separator, and a 6 character checksum.
|
||||
if len(bech) < 8 || len(bech) > 90 {
|
||||
return "", nil, fmt.Errorf("invalid bech32 string length %d",
|
||||
len(bech))
|
||||
// toBytes converts each character in the string 'chars' to the value of the
|
||||
// index of the correspoding character in 'charset'.
|
||||
func toBytes(chars string) ([]byte, error) {
|
||||
decoded := make([]byte, 0, len(chars))
|
||||
for i := 0; i < len(chars); i++ {
|
||||
index := strings.IndexByte(charset, chars[i])
|
||||
if index < 0 {
|
||||
return nil, ErrNonCharsetChar(chars[i])
|
||||
}
|
||||
decoded = append(decoded, byte(index))
|
||||
}
|
||||
// Only ASCII characters between 33 and 126 are allowed.
|
||||
for i := 0; i < len(bech); i++ {
|
||||
if bech[i] < 33 || bech[i] > 126 {
|
||||
return "", nil, fmt.Errorf("invalid character in "+
|
||||
"string: '%c'", bech[i])
|
||||
return decoded, nil
|
||||
}
|
||||
|
||||
// bech32Polymod calculates the BCH checksum for a given hrp, values and
|
||||
// checksum data. Checksum is optional, and if nil a 0 checksum is assumed.
|
||||
//
|
||||
// Values and checksum (if provided) MUST be encoded as 5 bits per element (base
|
||||
// 32), otherwise the results are undefined.
|
||||
//
|
||||
// For more details on the polymod calculation, please refer to BIP 173.
|
||||
func bech32Polymod(hrp string, values, checksum []byte) int {
|
||||
chk := 1
|
||||
|
||||
// Account for the high bits of the HRP in the checksum.
|
||||
for i := 0; i < len(hrp); i++ {
|
||||
b := chk >> 25
|
||||
hiBits := int(hrp[i]) >> 5
|
||||
chk = (chk&0x1ffffff)<<5 ^ hiBits
|
||||
for i := 0; i < 5; i++ {
|
||||
if (b>>uint(i))&1 == 1 {
|
||||
chk ^= gen[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The characters must be either all lowercase or all uppercase.
|
||||
lower := strings.ToLower(bech)
|
||||
upper := strings.ToUpper(bech)
|
||||
if bech != lower && bech != upper {
|
||||
return "", nil, fmt.Errorf("string not all lowercase or all " +
|
||||
"uppercase")
|
||||
// Account for the separator (0) between high and low bits of the HRP.
|
||||
// x^0 == x, so we eliminate the redundant xor used in the other rounds.
|
||||
b := chk >> 25
|
||||
chk = (chk & 0x1ffffff) << 5
|
||||
for i := 0; i < 5; i++ {
|
||||
if (b>>uint(i))&1 == 1 {
|
||||
chk ^= gen[i]
|
||||
}
|
||||
}
|
||||
|
||||
// We'll work with the lowercase string from now on.
|
||||
bech = lower
|
||||
// Account for the low bits of the HRP.
|
||||
for i := 0; i < len(hrp); i++ {
|
||||
b := chk >> 25
|
||||
loBits := int(hrp[i]) & 31
|
||||
chk = (chk&0x1ffffff)<<5 ^ loBits
|
||||
for i := 0; i < 5; i++ {
|
||||
if (b>>uint(i))&1 == 1 {
|
||||
chk ^= gen[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Account for the values.
|
||||
for _, v := range values {
|
||||
b := chk >> 25
|
||||
chk = (chk&0x1ffffff)<<5 ^ int(v)
|
||||
for i := 0; i < 5; i++ {
|
||||
if (b>>uint(i))&1 == 1 {
|
||||
chk ^= gen[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if checksum == nil {
|
||||
// A nil checksum is used during encoding, so assume all bytes are zero.
|
||||
// x^0 == x, so we eliminate the redundant xor used in the other rounds.
|
||||
for v := 0; v < 6; v++ {
|
||||
b := chk >> 25
|
||||
chk = (chk & 0x1ffffff) << 5
|
||||
for i := 0; i < 5; i++ {
|
||||
if (b>>uint(i))&1 == 1 {
|
||||
chk ^= gen[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Checksum is provided during decoding, so use it.
|
||||
for _, v := range checksum {
|
||||
b := chk >> 25
|
||||
chk = (chk&0x1ffffff)<<5 ^ int(v)
|
||||
for i := 0; i < 5; i++ {
|
||||
if (b>>uint(i))&1 == 1 {
|
||||
chk ^= gen[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return chk
|
||||
}
|
||||
|
||||
// writeBech32Checksum calculates the checksum data expected for a string that
|
||||
// will have the given hrp and payload data and writes it to the provided string
|
||||
// builder.
|
||||
//
|
||||
// The payload data MUST be encoded as a base 32 (5 bits per element) byte slice
|
||||
// and the hrp MUST only use the allowed character set (ascii chars between 33
|
||||
// and 126), otherwise the results are undefined.
|
||||
//
|
||||
// For more details on the checksum calculation, please refer to BIP 173.
|
||||
func writeBech32Checksum(hrp string, data []byte, bldr *strings.Builder) {
|
||||
polymod := bech32Polymod(hrp, data, nil) ^ 1
|
||||
for i := 0; i < 6; i++ {
|
||||
b := byte((polymod >> uint(5*(5-i))) & 31)
|
||||
|
||||
// This can't fail, given we explicitly cap the previous b byte by the
|
||||
// first 31 bits.
|
||||
c := charset[b]
|
||||
bldr.WriteByte(c)
|
||||
}
|
||||
}
|
||||
|
||||
// bech32VerifyChecksum verifies whether the bech32 string specified by the
|
||||
// provided hrp and payload data (encoded as 5 bits per element byte slice) has
|
||||
// the correct checksum suffix.
|
||||
//
|
||||
// Data MUST have more than 6 elements, otherwise this function panics.
|
||||
//
|
||||
// For more details on the checksum verification, please refer to BIP 173.
|
||||
func bech32VerifyChecksum(hrp string, data []byte) bool {
|
||||
checksum := data[len(data)-6:]
|
||||
values := data[:len(data)-6]
|
||||
polymod := bech32Polymod(hrp, values, checksum)
|
||||
return polymod == 1
|
||||
}
|
||||
|
||||
// DecodeNoLimit decodes a bech32 encoded string, returning the human-readable
|
||||
// part and the data part excluding the checksum. This function does NOT
|
||||
// validate against the BIP-173 maximum length allowed for bech32 strings and
|
||||
// is meant for use in custom applications (such as lightning network payment
|
||||
// requests), NOT on-chain addresses.
|
||||
//
|
||||
// Note that the returned data is 5-bit (base32) encoded and the human-readable
|
||||
// part will be lowercase.
|
||||
func DecodeNoLimit(bech string) (string, []byte, error) {
|
||||
// The minimum allowed size of a bech32 string is 8 characters, since it
|
||||
// needs a non-empty HRP, a separator, and a 6 character checksum.
|
||||
if len(bech) < 8 {
|
||||
return "", nil, ErrInvalidLength(len(bech))
|
||||
}
|
||||
|
||||
// Only ASCII characters between 33 and 126 are allowed.
|
||||
var hasLower, hasUpper bool
|
||||
for i := 0; i < len(bech); i++ {
|
||||
if bech[i] < 33 || bech[i] > 126 {
|
||||
return "", nil, ErrInvalidCharacter(bech[i])
|
||||
}
|
||||
|
||||
// The characters must be either all lowercase or all uppercase. Testing
|
||||
// directly with ascii codes is safe here, given the previous test.
|
||||
hasLower = hasLower || (bech[i] >= 97 && bech[i] <= 122)
|
||||
hasUpper = hasUpper || (bech[i] >= 65 && bech[i] <= 90)
|
||||
if hasLower && hasUpper {
|
||||
return "", nil, ErrMixedCase{}
|
||||
}
|
||||
}
|
||||
|
||||
// Bech32 standard uses only the lowercase for of strings for checksum
|
||||
// calculation.
|
||||
if hasUpper {
|
||||
bech = strings.ToLower(bech)
|
||||
}
|
||||
|
||||
// The string is invalid if the last '1' is non-existent, it is the
|
||||
// first character of the string (no human-readable part) or one of the
|
||||
// last 6 characters of the string (since checksum cannot contain '1'),
|
||||
// or if the string is more than 90 characters in total.
|
||||
// last 6 characters of the string (since checksum cannot contain '1').
|
||||
one := strings.LastIndexByte(bech, '1')
|
||||
if one < 1 || one+7 > len(bech) {
|
||||
return "", nil, fmt.Errorf("invalid index of 1")
|
||||
return "", nil, ErrInvalidSeparatorIndex(one)
|
||||
}
|
||||
|
||||
// The human-readable part is everything before the last '1'.
|
||||
@ -59,85 +202,96 @@ func Decode(bech string) (string, []byte, error) {
|
||||
// 'charset'.
|
||||
decoded, err := toBytes(data)
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("failed converting data to bytes: "+
|
||||
"%v", err)
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
// Verify if the checksum (stored inside decoded[:]) is valid, given the
|
||||
// previously decoded hrp.
|
||||
if !bech32VerifyChecksum(hrp, decoded) {
|
||||
moreInfo := ""
|
||||
checksum := bech[len(bech)-6:]
|
||||
expected, err := toChars(bech32Checksum(hrp,
|
||||
decoded[:len(decoded)-6]))
|
||||
if err == nil {
|
||||
moreInfo = fmt.Sprintf("Expected %v, got %v.",
|
||||
expected, checksum)
|
||||
// Invalid checksum. Calculate what it should have been, so that the
|
||||
// error contains this information.
|
||||
|
||||
// Extract the payload bytes and actual checksum in the string.
|
||||
actual := bech[len(bech)-6:]
|
||||
payload := decoded[:len(decoded)-6]
|
||||
|
||||
// Calculate the expected checksum, given the hrp and payload data.
|
||||
var expectedBldr strings.Builder
|
||||
expectedBldr.Grow(6)
|
||||
writeBech32Checksum(hrp, payload, &expectedBldr)
|
||||
expected := expectedBldr.String()
|
||||
|
||||
err = ErrInvalidChecksum{
|
||||
Expected: expected,
|
||||
Actual: actual,
|
||||
}
|
||||
return "", nil, fmt.Errorf("checksum failed. " + moreInfo)
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
// We exclude the last 6 bytes, which is the checksum.
|
||||
return hrp, decoded[:len(decoded)-6], nil
|
||||
}
|
||||
|
||||
// Encode encodes a byte slice into a bech32 string with the
|
||||
// human-readable part hrb. Note that the bytes must each encode 5 bits
|
||||
// (base32).
|
||||
// Decode decodes a bech32 encoded string, returning the human-readable part and
|
||||
// the data part excluding the checksum.
|
||||
//
|
||||
// Note that the returned data is 5-bit (base32) encoded and the human-readable
|
||||
// part will be lowercase.
|
||||
func Decode(bech string) (string, []byte, error) {
|
||||
// The maximum allowed length for a bech32 string is 90.
|
||||
if len(bech) > 90 {
|
||||
return "", nil, ErrInvalidLength(len(bech))
|
||||
}
|
||||
|
||||
return DecodeNoLimit(bech)
|
||||
}
|
||||
|
||||
// Encode encodes a byte slice into a bech32 string with the given
|
||||
// human-readable part (HRP). The HRP will be converted to lowercase if needed
|
||||
// since mixed cased encodings are not permitted and lowercase is used for
|
||||
// checksum purposes. Note that the bytes must each encode 5 bits (base32).
|
||||
func Encode(hrp string, data []byte) (string, error) {
|
||||
// Calculate the checksum of the data and append it at the end.
|
||||
checksum := bech32Checksum(hrp, data)
|
||||
combined := append(data, checksum...)
|
||||
// The resulting bech32 string is the concatenation of the lowercase hrp,
|
||||
// the separator 1, data and the 6-byte checksum.
|
||||
hrp = strings.ToLower(hrp)
|
||||
var bldr strings.Builder
|
||||
bldr.Grow(len(hrp) + 1 + len(data) + 6)
|
||||
bldr.WriteString(hrp)
|
||||
bldr.WriteString("1")
|
||||
|
||||
// The resulting bech32 string is the concatenation of the hrp, the
|
||||
// separator 1, data and checksum. Everything after the separator is
|
||||
// represented using the specified charset.
|
||||
dataChars, err := toChars(combined)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to convert data bytes to chars: "+
|
||||
"%v", err)
|
||||
}
|
||||
return hrp + "1" + dataChars, nil
|
||||
}
|
||||
|
||||
// toBytes converts each character in the string 'chars' to the value of the
|
||||
// index of the correspoding character in 'charset'.
|
||||
func toBytes(chars string) ([]byte, error) {
|
||||
decoded := make([]byte, 0, len(chars))
|
||||
for i := 0; i < len(chars); i++ {
|
||||
index := strings.IndexByte(charset, chars[i])
|
||||
if index < 0 {
|
||||
return nil, fmt.Errorf("invalid character not part of "+
|
||||
"charset: %v", chars[i])
|
||||
}
|
||||
decoded = append(decoded, byte(index))
|
||||
}
|
||||
return decoded, nil
|
||||
}
|
||||
|
||||
// toChars converts the byte slice 'data' to a string where each byte in 'data'
|
||||
// encodes the index of a character in 'charset'.
|
||||
func toChars(data []byte) (string, error) {
|
||||
result := make([]byte, 0, len(data))
|
||||
// Write the data part, using the bech32 charset.
|
||||
for _, b := range data {
|
||||
if int(b) >= len(charset) {
|
||||
return "", fmt.Errorf("invalid data byte: %v", b)
|
||||
return "", ErrInvalidDataByte(b)
|
||||
}
|
||||
result = append(result, charset[b])
|
||||
bldr.WriteByte(charset[b])
|
||||
}
|
||||
return string(result), nil
|
||||
|
||||
// Calculate and write the checksum of the data.
|
||||
writeBech32Checksum(hrp, data, &bldr)
|
||||
|
||||
return bldr.String(), nil
|
||||
}
|
||||
|
||||
// ConvertBits converts a byte slice where each byte is encoding fromBits bits,
|
||||
// to a byte slice where each byte is encoding toBits bits.
|
||||
func ConvertBits(data []byte, fromBits, toBits uint8, pad bool) ([]byte, error) {
|
||||
if fromBits < 1 || fromBits > 8 || toBits < 1 || toBits > 8 {
|
||||
return nil, fmt.Errorf("only bit groups between 1 and 8 allowed")
|
||||
return nil, ErrInvalidBitGroups{}
|
||||
}
|
||||
|
||||
// Determine the maximum size the resulting array can have after base
|
||||
// conversion, so that we can size it a single time. This might be off
|
||||
// by a byte depending on whether padding is used or not and if the input
|
||||
// data is a multiple of both fromBits and toBits, but we ignore that and
|
||||
// just size it to the maximum possible.
|
||||
maxSize := len(data)*int(fromBits)/int(toBits) + 1
|
||||
|
||||
// The final bytes, each byte encoding toBits bits.
|
||||
var regrouped []byte
|
||||
regrouped := make([]byte, 0, maxSize)
|
||||
|
||||
// Keep track of the next byte we create and how many bits we have
|
||||
// added to it out of the toBits goal.
|
||||
// added to it out of the toBits goal.
|
||||
nextByte := byte(0)
|
||||
filledBits := uint8(0)
|
||||
|
||||
@ -170,7 +324,7 @@ func ConvertBits(data []byte, fromBits, toBits uint8, pad bool) ([]byte, error)
|
||||
filledBits += toExtract
|
||||
|
||||
// If the nextByte is completely filled, we add it to
|
||||
// our regrouped bytes and start on the next byte.
|
||||
// our regrouped bytes and start on the next byte.
|
||||
if filledBits == toBits {
|
||||
regrouped = append(regrouped, nextByte)
|
||||
filledBits = 0
|
||||
@ -189,64 +343,36 @@ func ConvertBits(data []byte, fromBits, toBits uint8, pad bool) ([]byte, error)
|
||||
|
||||
// Any incomplete group must be <= 4 bits, and all zeroes.
|
||||
if filledBits > 0 && (filledBits > 4 || nextByte != 0) {
|
||||
return nil, fmt.Errorf("invalid incomplete group")
|
||||
return nil, ErrInvalidIncompleteGroup{}
|
||||
}
|
||||
|
||||
return regrouped, nil
|
||||
}
|
||||
|
||||
// For more details on the checksum calculation, please refer to BIP 173.
|
||||
func bech32Checksum(hrp string, data []byte) []byte {
|
||||
// Convert the bytes to list of integers, as this is needed for the
|
||||
// checksum calculation.
|
||||
integers := make([]int, len(data))
|
||||
for i, b := range data {
|
||||
integers[i] = int(b)
|
||||
// EncodeFromBase256 converts a base256-encoded byte slice into a base32-encoded
|
||||
// byte slice and then encodes it into a bech32 string with the given
|
||||
// human-readable part (HRP). The HRP will be converted to lowercase if needed
|
||||
// since mixed cased encodings are not permitted and lowercase is used for
|
||||
// checksum purposes.
|
||||
func EncodeFromBase256(hrp string, data []byte) (string, error) {
|
||||
converted, err := ConvertBits(data, 8, 5, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
values := append(bech32HrpExpand(hrp), integers...)
|
||||
values = append(values, []int{0, 0, 0, 0, 0, 0}...)
|
||||
polymod := bech32Polymod(values) ^ 1
|
||||
var res []byte
|
||||
for i := 0; i < 6; i++ {
|
||||
res = append(res, byte((polymod>>uint(5*(5-i)))&31))
|
||||
}
|
||||
return res
|
||||
return Encode(hrp, converted)
|
||||
}
|
||||
|
||||
// For more details on the polymod calculation, please refer to BIP 173.
|
||||
func bech32Polymod(values []int) int {
|
||||
chk := 1
|
||||
for _, v := range values {
|
||||
b := chk >> 25
|
||||
chk = (chk&0x1ffffff)<<5 ^ v
|
||||
for i := 0; i < 5; i++ {
|
||||
if (b>>uint(i))&1 == 1 {
|
||||
chk ^= gen[i]
|
||||
}
|
||||
}
|
||||
// DecodeToBase256 decodes a bech32-encoded string into its associated
|
||||
// human-readable part (HRP) and base32-encoded data, converts that data to a
|
||||
// base256-encoded byte slice and returns it along with the lowercase HRP.
|
||||
func DecodeToBase256(bech string) (string, []byte, error) {
|
||||
hrp, data, err := Decode(bech)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return chk
|
||||
}
|
||||
|
||||
// For more details on HRP expansion, please refer to BIP 173.
|
||||
func bech32HrpExpand(hrp string) []int {
|
||||
v := make([]int, 0, len(hrp)*2+1)
|
||||
for i := 0; i < len(hrp); i++ {
|
||||
v = append(v, int(hrp[i]>>5))
|
||||
}
|
||||
v = append(v, 0)
|
||||
for i := 0; i < len(hrp); i++ {
|
||||
v = append(v, int(hrp[i]&31))
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// For more details on the checksum verification, please refer to BIP 173.
|
||||
func bech32VerifyChecksum(hrp string, data []byte) bool {
|
||||
integers := make([]int, len(data))
|
||||
for i, b := range data {
|
||||
integers[i] = int(b)
|
||||
}
|
||||
concat := append(bech32HrpExpand(hrp), integers...)
|
||||
return bech32Polymod(concat) == 1
|
||||
converted, err := ConvertBits(data, 5, 8, false)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return hrp, converted, nil
|
||||
}
|
||||
|
85
vendor/github.com/btcsuite/btcutil/bech32/error.go
generated
vendored
Normal file
85
vendor/github.com/btcsuite/btcutil/bech32/error.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
// Copyright (c) 2019 The Decred developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bech32
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ErrMixedCase is returned when the bech32 string has both lower and uppercase
|
||||
// characters.
|
||||
type ErrMixedCase struct{}
|
||||
|
||||
func (e ErrMixedCase) Error() string {
|
||||
return "string not all lowercase or all uppercase"
|
||||
}
|
||||
|
||||
// ErrInvalidBitGroups is returned when conversion is attempted between byte
|
||||
// slices using bit-per-element of unsupported value.
|
||||
type ErrInvalidBitGroups struct{}
|
||||
|
||||
func (e ErrInvalidBitGroups) Error() string {
|
||||
return "only bit groups between 1 and 8 allowed"
|
||||
}
|
||||
|
||||
// ErrInvalidIncompleteGroup is returned when then byte slice used as input has
|
||||
// data of wrong length.
|
||||
type ErrInvalidIncompleteGroup struct{}
|
||||
|
||||
func (e ErrInvalidIncompleteGroup) Error() string {
|
||||
return "invalid incomplete group"
|
||||
}
|
||||
|
||||
// ErrInvalidLength is returned when the bech32 string has an invalid length
|
||||
// given the BIP-173 defined restrictions.
|
||||
type ErrInvalidLength int
|
||||
|
||||
func (e ErrInvalidLength) Error() string {
|
||||
return fmt.Sprintf("invalid bech32 string length %d", int(e))
|
||||
}
|
||||
|
||||
// ErrInvalidCharacter is returned when the bech32 string has a character
|
||||
// outside the range of the supported charset.
|
||||
type ErrInvalidCharacter rune
|
||||
|
||||
func (e ErrInvalidCharacter) Error() string {
|
||||
return fmt.Sprintf("invalid character in string: '%c'", rune(e))
|
||||
}
|
||||
|
||||
// ErrInvalidSeparatorIndex is returned when the separator character '1' is
|
||||
// in an invalid position in the bech32 string.
|
||||
type ErrInvalidSeparatorIndex int
|
||||
|
||||
func (e ErrInvalidSeparatorIndex) Error() string {
|
||||
return fmt.Sprintf("invalid separator index %d", int(e))
|
||||
}
|
||||
|
||||
// ErrNonCharsetChar is returned when a character outside of the specific
|
||||
// bech32 charset is used in the string.
|
||||
type ErrNonCharsetChar rune
|
||||
|
||||
func (e ErrNonCharsetChar) Error() string {
|
||||
return fmt.Sprintf("invalid character not part of charset: %v", int(e))
|
||||
}
|
||||
|
||||
// ErrInvalidChecksum is returned when the extracted checksum of the string
|
||||
// is different than what was expected.
|
||||
type ErrInvalidChecksum struct {
|
||||
Expected string
|
||||
Actual string
|
||||
}
|
||||
|
||||
func (e ErrInvalidChecksum) Error() string {
|
||||
return fmt.Sprintf("invalid checksum (expected %v got %v)",
|
||||
e.Expected, e.Actual)
|
||||
}
|
||||
|
||||
// ErrInvalidDataByte is returned when a byte outside the range required for
|
||||
// conversion into a string was found.
|
||||
type ErrInvalidDataByte byte
|
||||
|
||||
func (e ErrInvalidDataByte) Error() string {
|
||||
return fmt.Sprintf("invalid data byte: %v", byte(e))
|
||||
}
|
2
vendor/github.com/btcsuite/btcutil/block.go
generated
vendored
2
vendor/github.com/btcsuite/btcutil/block.go
generated
vendored
@ -114,7 +114,7 @@ func (b *Block) Hash() *chainhash.Hash {
|
||||
func (b *Block) Tx(txNum int) (*Tx, error) {
|
||||
// Ensure the requested transaction is in range.
|
||||
numTx := uint64(len(b.msgBlock.Transactions))
|
||||
if txNum < 0 || uint64(txNum) > numTx {
|
||||
if txNum < 0 || uint64(txNum) >= numTx {
|
||||
str := fmt.Sprintf("transaction index %d is out of range - max %d",
|
||||
txNum, numTx-1)
|
||||
return nil, OutOfRangeError(str)
|
||||
|
2
vendor/github.com/btcsuite/btcutil/certgen.go
generated
vendored
2
vendor/github.com/btcsuite/btcutil/certgen.go
generated
vendored
@ -110,7 +110,7 @@ func NewTLSCertPair(organization string, validUntil time.Time, extraHosts []stri
|
||||
|
||||
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature |
|
||||
x509.KeyUsageCertSign,
|
||||
IsCA: true, // so can sign self.
|
||||
IsCA: true, // so can sign self.
|
||||
BasicConstraintsValid: true,
|
||||
|
||||
DNSNames: dnsNames,
|
||||
|
11
vendor/github.com/btcsuite/btcutil/go.mod
generated
vendored
Normal file
11
vendor/github.com/btcsuite/btcutil/go.mod
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
module github.com/btcsuite/btcutil
|
||||
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
github.com/aead/siphash v1.0.1
|
||||
github.com/btcsuite/btcd v0.20.1-beta
|
||||
github.com/davecgh/go-spew v1.1.0
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23
|
||||
golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d
|
||||
)
|
59
vendor/github.com/btcsuite/btcutil/go.sum
generated
vendored
Normal file
59
vendor/github.com/btcsuite/btcutil/go.sum
generated
vendored
Normal file
@ -0,0 +1,59 @@
|
||||
github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd h1:qdGvebPBDuYDPGi1WCPjy1tGyMpmDK8IEapSsszn7HE=
|
||||
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723 h1:ZA/jbKoGcVAnER6pCHPEkGdZOV7U1oLUedErBHCUMs0=
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495 h1:6IyqGr3fnd0tM3YxipK27TUskaOVUjU2nG45yzwcQKY=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 h1:FOOIBWrEkLgmlgGfMuZT83xIwfPDxEI2OHu6xUmJMFE=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44 h1:9lP3x0pW80sDI6t1UMSLA4to18W7R7imwAI/sWS9S8Q=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d h1:2+ZP7EfsZV7Vvmx3TIqSlSzATMkTAKqM14YGFPoSKjI=
|
||||
golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
58
vendor/github.com/btcsuite/btcutil/goclean.sh
generated
vendored
58
vendor/github.com/btcsuite/btcutil/goclean.sh
generated
vendored
@ -9,40 +9,38 @@
|
||||
# 7. race detector (http://blog.golang.org/race-detector)
|
||||
# 8. test coverage (http://blog.golang.org/cover)
|
||||
#
|
||||
# gometalint (github.com/alecthomas/gometalinter) is used to run each each
|
||||
# static checker.
|
||||
|
||||
set -ex
|
||||
|
||||
# Automatic checks
|
||||
test -z "$(gometalinter --disable-all \
|
||||
--enable=gofmt \
|
||||
--enable=goimports \
|
||||
--enable=golint \
|
||||
--enable=vet \
|
||||
--enable=gosimple \
|
||||
--enable=unconvert \
|
||||
--deadline=120s ./... | grep -v 'ExampleNew' 2>&1 | tee /dev/stderr)"
|
||||
env GORACE="halt_on_error=1" go test -race ./...
|
||||
for i in $(find . -name go.mod -type f -print); do
|
||||
module=$(dirname ${i})
|
||||
echo "==> ${module}"
|
||||
|
||||
# Run test coverage on each subdirectories and merge the coverage profile.
|
||||
|
||||
echo "mode: count" > profile.cov
|
||||
|
||||
# Standard go tooling behavior is to ignore dirs with leading underscores.
|
||||
for dir in $(find . -maxdepth 10 -not -path './.git*' -not -path '*/_*' -type d);
|
||||
do
|
||||
if ls $dir/*.go &> /dev/null; then
|
||||
go test -covermode=count -coverprofile=$dir/profile.tmp $dir
|
||||
if [ -f $dir/profile.tmp ]; then
|
||||
cat $dir/profile.tmp | tail -n +2 >> profile.cov
|
||||
rm $dir/profile.tmp
|
||||
MODNAME=$(echo $module | sed -E -e "s/^$ROOTPATHPATTERN//" \
|
||||
-e 's,^/,,' -e 's,/v[0-9]+$,,')
|
||||
if [ -z "$MODNAME" ]; then
|
||||
MODNAME=.
|
||||
fi
|
||||
fi
|
||||
|
||||
# run tests
|
||||
(cd $MODNAME &&
|
||||
echo "mode: atomic" > profile.cov && \
|
||||
env GORACE=halt_on_error=1 go test -race -covermode=atomic -coverprofile=profile.tmp ./... && \
|
||||
cat profile.tmp | tail -n +2 >> profile.cov && \
|
||||
rm profile.tmp && \
|
||||
go tool cover -func profile.cov
|
||||
)
|
||||
|
||||
# check linters
|
||||
(cd $MODNAME && \
|
||||
go mod download && \
|
||||
golangci-lint run --deadline=10m --disable-all \
|
||||
--enable=gofmt \
|
||||
--enable=goimports \
|
||||
--enable=golint \
|
||||
--enable=govet \
|
||||
--enable=gosimple \
|
||||
--enable=unconvert
|
||||
)
|
||||
done
|
||||
|
||||
go tool cover -func profile.cov
|
||||
|
||||
# To submit the test coverage result to coveralls.io,
|
||||
# use goveralls (https://github.com/mattn/goveralls)
|
||||
# goveralls -coverprofile=profile.cov -service=travis-ci
|
||||
|
2
vendor/github.com/btcsuite/btcutil/tx.go
generated
vendored
2
vendor/github.com/btcsuite/btcutil/tx.go
generated
vendored
@ -70,7 +70,7 @@ func (t *Tx) WitnessHash() *chainhash.Hash {
|
||||
// HasWitness on the underlying wire.MsgTx, however it caches the result so
|
||||
// subsequent calls are more efficient.
|
||||
func (t *Tx) HasWitness() bool {
|
||||
if t.txHashWitness != nil {
|
||||
if t.txHasWitness != nil {
|
||||
return *t.txHasWitness
|
||||
}
|
||||
|
||||
|
84
vendor/github.com/elastic/gosigar/.appveyor.yml
generated
vendored
84
vendor/github.com/elastic/gosigar/.appveyor.yml
generated
vendored
@ -1,84 +0,0 @@
|
||||
# Version format
|
||||
version: "{build}"
|
||||
|
||||
# Operating system (build VM template)
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
# Environment variables
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
GVM_GO_VERSION: 1.8.3
|
||||
GVM_DL: https://github.com/andrewkroh/gvm/releases/download/v0.0.1/gvm-windows-amd64.exe
|
||||
|
||||
# Custom clone folder (variables are not expanded here).
|
||||
clone_folder: c:\gopath\src\github.com\elastic\gosigar
|
||||
|
||||
# Cache mingw install until appveyor.yml is modified.
|
||||
cache:
|
||||
- C:\ProgramData\chocolatey\bin -> .appveyor.yml
|
||||
- C:\ProgramData\chocolatey\lib -> .appveyor.yml
|
||||
- C:\Users\appveyor\.gvm -> .appveyor.yml
|
||||
- C:\Windows\System32\gvm.exe -> .appveyor.yml
|
||||
- C:\tools\mingw64 -> .appveyor.yml
|
||||
|
||||
# Scripts that run after cloning repository
|
||||
install:
|
||||
- ps: >-
|
||||
if(!(Test-Path "C:\Windows\System32\gvm.exe")) {
|
||||
wget "$env:GVM_DL" -Outfile C:\Windows\System32\gvm.exe
|
||||
}
|
||||
- ps: gvm --format=powershell "$env:GVM_GO_VERSION" | Invoke-Expression
|
||||
# AppVeyor installed mingw is 32-bit only so install 64-bit version.
|
||||
- ps: >-
|
||||
if(!(Test-Path "C:\tools\mingw64\bin\gcc.exe")) {
|
||||
cinst mingw > mingw-install.txt
|
||||
Push-AppveyorArtifact mingw-install.txt
|
||||
}
|
||||
- set PATH=C:\tools\mingw64\bin;%GOROOT%\bin;%PATH%
|
||||
- set PATH=%GOPATH%\bin;%PATH%
|
||||
- go version
|
||||
- go env
|
||||
- python --version
|
||||
- go get github.com/elastic/beats/vendor/github.com/pierrre/gotestcover
|
||||
|
||||
# To run your custom scripts instead of automatic MSBuild
|
||||
build_script:
|
||||
# Compile
|
||||
- appveyor AddCompilationMessage "Starting Compile"
|
||||
- cd c:\gopath\src\github.com\elastic\gosigar
|
||||
- go get -v -t -d ./...
|
||||
- go build
|
||||
- go build -o examples/df/df.exe ./examples/df
|
||||
- go build -o examples/free/free.exe ./examples/free
|
||||
- go build -o examples/ps/ps.exe ./examples/ps
|
||||
- go build -o examples/uptime/uptime.exe ./examples/uptime
|
||||
- appveyor AddCompilationMessage "Compile Success"
|
||||
|
||||
# To run your custom scripts instead of automatic tests
|
||||
test_script:
|
||||
# Unit tests
|
||||
- ps: Add-AppveyorTest "Unit Tests" -Outcome Running
|
||||
- mkdir build\coverage
|
||||
- gotestcover -v -coverprofile=build/coverage/unit.cov github.com/elastic/gosigar/...
|
||||
- ps: Update-AppveyorTest "Unit Tests" -Outcome Passed
|
||||
- ps: Add-AppveyorTest "Running Examples" -Outcome Running
|
||||
- .\examples\df\df.exe
|
||||
- .\examples\free\free.exe
|
||||
- .\examples\ps\ps.exe
|
||||
- .\examples\uptime\uptime.exe
|
||||
- ps: Update-AppveyorTest "Running Examples" -Outcome Passed
|
||||
|
||||
after_test:
|
||||
- go tool cover -html=build\coverage\unit.cov -o build\coverage\unit.html
|
||||
- ps: Push-AppveyorArtifact build\coverage\unit.cov
|
||||
- ps: Push-AppveyorArtifact build\coverage\unit.html
|
||||
# Upload coverage report.
|
||||
- "SET PATH=C:\\Python34;C:\\Python34\\Scripts;%PATH%"
|
||||
- pip install codecov
|
||||
- codecov -X gcov -f "build\coverage\unit.cov"
|
||||
|
||||
# To disable deployment
|
||||
deploy: off
|
||||
|
||||
# Notifications should only be setup using the AppVeyor UI so that
|
||||
# forks can be created without inheriting the settings.
|
41
vendor/github.com/elastic/gosigar/.gitignore
generated
vendored
41
vendor/github.com/elastic/gosigar/.gitignore
generated
vendored
@ -1,41 +0,0 @@
|
||||
# Directories
|
||||
/.vagrant
|
||||
/.idea
|
||||
/build
|
||||
|
||||
# Files
|
||||
.DS_Store
|
||||
/*.iml
|
||||
*.h
|
||||
|
||||
# Editor swap files
|
||||
*.swp
|
||||
*.swo
|
||||
*.swn
|
||||
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
*.pyc
|
||||
*.swp
|
||||
|
||||
# Example binaries
|
||||
examples/df/df
|
||||
examples/df/df.exe
|
||||
examples/free/free
|
||||
examples/free/free.exe
|
||||
examples/ps/ps
|
||||
examples/ps/ps.exe
|
||||
examples/ss/ss
|
||||
examples/ss/ss.exe
|
||||
examples/uptime/uptime
|
||||
examples/uptime/uptime.exe
|
||||
|
||||
# Test Data
|
||||
cgroup/testdata/*
|
||||
!cgroup/testdata/*.zip
|
||||
|
37
vendor/github.com/elastic/gosigar/.travis.yml
generated
vendored
37
vendor/github.com/elastic/gosigar/.travis.yml
generated
vendored
@ -1,37 +0,0 @@
|
||||
language: go
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
|
||||
go:
|
||||
- 1.8.x
|
||||
- 1.10.x
|
||||
|
||||
env:
|
||||
global:
|
||||
- PROJ="github.com/elastic/gosigar"
|
||||
|
||||
sudo: false
|
||||
|
||||
before_install:
|
||||
# Put project into proper GOPATH location (important for forks).
|
||||
- mkdir -p $HOME/gopath/src/${PROJ}
|
||||
- rsync -az ${TRAVIS_BUILD_DIR}/ $HOME/gopath/src/${PROJ}/
|
||||
- export TRAVIS_BUILD_DIR=$HOME/gopath/src/${PROJ}
|
||||
- cd $HOME/gopath/src/${PROJ}
|
||||
|
||||
install:
|
||||
- go get -v -t -d ./...
|
||||
- go get github.com/elastic/beats/vendor/github.com/pierrre/gotestcover
|
||||
|
||||
script:
|
||||
- gofmt -l . | read && echo "Code differs from gofmt's style. Run 'gofmt -w .'" 1>&2 && exit 1 || true
|
||||
- go vet
|
||||
- go build
|
||||
- mkdir -p build/coverage
|
||||
- gotestcover -v -coverprofile=build/coverage/unit.cov github.com/elastic/gosigar/...
|
||||
- for i in $(ls examples); do go build -o examples/$i/$i ./examples/$i; ./examples/$i/$i; done
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash) -f build/coverage/unit.cov
|
141
vendor/github.com/elastic/gosigar/CHANGELOG.md
generated
vendored
141
vendor/github.com/elastic/gosigar/CHANGELOG.md
generated
vendored
@ -1,141 +0,0 @@
|
||||
# Change Log
|
||||
All notable changes to this project will be documented in this file.
|
||||
This project adheres to [Semantic Versioning](http://semver.org/).
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
### Added
|
||||
|
||||
### Fixed
|
||||
|
||||
### Changed
|
||||
|
||||
### Deprecated
|
||||
|
||||
## [0.10.4]
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed a crash when splitting command-line arguments under Windows. #124
|
||||
|
||||
## [0.10.3]
|
||||
|
||||
### Fixed
|
||||
- ProcState.Get() doesn't fail under Windows when it cannot obtain process ownership information. #121
|
||||
|
||||
## [0.10.2]
|
||||
|
||||
### Fixed
|
||||
- Fix memory leak when getting process arguments. #119
|
||||
|
||||
## [0.10.1]
|
||||
|
||||
### Fixed
|
||||
- Replaced the WMI queries with win32 apis due to high CPU usage. #116
|
||||
|
||||
## [0.10.0]
|
||||
|
||||
### Added
|
||||
- List filesystems on Windows that have an access path but not an assigned letter. #112
|
||||
|
||||
### Fixed
|
||||
- Added missing runtime import for FreeBSD. #104
|
||||
- Handle nil command line in Windows processes. #110
|
||||
|
||||
## [0.9.0]
|
||||
|
||||
### Added
|
||||
- Added support for huge TLB pages on Linux #97
|
||||
- Added support for big endian platform #100
|
||||
|
||||
### Fixed
|
||||
- Add missing method for OpenBSD #99
|
||||
|
||||
## [0.8.0]
|
||||
|
||||
### Added
|
||||
- Added partial `getrusage` support for Windows to retrieve system CPU time and user CPU time. #95
|
||||
- Added full `getrusage` support for Unix. #95
|
||||
|
||||
## [0.7.0]
|
||||
|
||||
### Added
|
||||
- Added method stubs for process handling for operating system that are not supported
|
||||
by gosigar. All methods return `ErrNotImplemented` on such systems. #88
|
||||
|
||||
### Fixed
|
||||
- Fix freebsd build by using the common version of Get(pid). #91
|
||||
|
||||
### Changed
|
||||
- Fixed issues in cgroup package by adding missing error checks and closing
|
||||
file handles. #92
|
||||
|
||||
## [0.6.0]
|
||||
|
||||
### Added
|
||||
- Added method stubs to enable compilation for operating systems that are not
|
||||
supported by gosigar. All methods return `ErrNotImplemented` on these unsupported
|
||||
operating systems. #83
|
||||
- FreeBSD returns `ErrNotImplemented` for `ProcTime.Get`. #83
|
||||
|
||||
### Changed
|
||||
- OpenBSD returns `ErrNotImplemented` for `ProcTime.Get` instead of `nil`. #83
|
||||
- Fixed incorrect `Mem.Used` calculation under linux. #82
|
||||
- Fixed `ProcState` on Linux and FreeBSD when process names contain parentheses. #81
|
||||
|
||||
### Removed
|
||||
- Remove NetBSD build from sigar_unix.go as it is not supported by gosigar. #83
|
||||
|
||||
## [0.5.0]
|
||||
|
||||
### Changed
|
||||
- Fixed Trim environment variables when comparing values in the test suite. #79
|
||||
- Make `kern_procargs` more robust under darwin when we cannot retrieve
|
||||
all the information about a process. #78
|
||||
|
||||
## [0.4.0]
|
||||
|
||||
### Changed
|
||||
- Fixed Windows issue that caused a hang during `init()` if WMI wasn't ready. #74
|
||||
|
||||
## [0.3.0]
|
||||
|
||||
### Added
|
||||
- Read `MemAvailable` value for kernel 3.14+ #71
|
||||
|
||||
## [0.2.0]
|
||||
|
||||
### Added
|
||||
- Added `ErrCgroupsMissing` to indicate that /proc/cgroups is missing which is
|
||||
an indicator that cgroups were disabled at compile time. #64
|
||||
|
||||
### Changed
|
||||
- Changed `cgroup.SupportedSubsystems()` to honor the "enabled" column in the
|
||||
/proc/cgroups file. #64
|
||||
|
||||
## [0.1.0]
|
||||
|
||||
### Added
|
||||
- Added `CpuList` implementation for Windows that returns CPU timing information
|
||||
on a per CPU basis. #55
|
||||
- Added `Uptime` implementation for Windows. #55
|
||||
- Added `Swap` implementation for Windows based on page file metrics. #55
|
||||
- Added support to `github.com/gosigar/sys/windows` for querying and enabling
|
||||
privileges in a process token.
|
||||
- Added utility code for interfacing with linux NETLINK_INET_DIAG. #60
|
||||
- Added `ProcEnv` for getting a process's environment variables. #61
|
||||
|
||||
### Changed
|
||||
- Changed several `OpenProcess` calls on Windows to request the lowest possible
|
||||
access privileges. #50
|
||||
- Removed cgo usage from Windows code.
|
||||
- Added OS version checks to `ProcArgs.Get` on Windows because the
|
||||
`Win32_Process` WMI query is not available prior to Windows vista. On XP and
|
||||
Windows 2003, this method returns `ErrNotImplemented`. #55
|
||||
|
||||
### Fixed
|
||||
- Fixed value of `Mem.ActualFree` and `Mem.ActualUsed` on Windows. #49
|
||||
- Fixed `ProcTime.StartTime` on Windows to report value in milliseconds since
|
||||
Unix epoch. #51
|
||||
- Fixed `ProcStatus.PPID` value is wrong on Windows. #55
|
||||
- Fixed `ProcStatus.Username` error on Windows XP #56
|
201
vendor/github.com/elastic/gosigar/LICENSE
generated
vendored
201
vendor/github.com/elastic/gosigar/LICENSE
generated
vendored
@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
9
vendor/github.com/elastic/gosigar/NOTICE
generated
vendored
9
vendor/github.com/elastic/gosigar/NOTICE
generated
vendored
@ -1,9 +0,0 @@
|
||||
Copyright (c) [2009-2011] VMware, Inc. All Rights Reserved.
|
||||
|
||||
This product is licensed to you under the Apache License, Version 2.0 (the "License").
|
||||
You may not use this product except in compliance with the License.
|
||||
|
||||
This product includes a number of subcomponents with
|
||||
separate copyright notices and license terms. Your use of these
|
||||
subcomponents is subject to the terms and conditions of the
|
||||
subcomponent's license, as noted in the LICENSE file.
|
58
vendor/github.com/elastic/gosigar/README.md
generated
vendored
58
vendor/github.com/elastic/gosigar/README.md
generated
vendored
@ -1,58 +0,0 @@
|
||||
# Go sigar [](https://travis-ci.org/elastic/gosigar) [](https://ci.appveyor.com/project/elastic-beats/gosigar/branch/master)
|
||||
|
||||
|
||||
## Overview
|
||||
|
||||
Go sigar is a golang implementation of the
|
||||
[sigar API](https://github.com/hyperic/sigar). The Go version of
|
||||
sigar has a very similar interface, but is being written from scratch
|
||||
in pure go/cgo, rather than cgo bindings for libsigar.
|
||||
|
||||
## Test drive
|
||||
|
||||
$ go get github.com/elastic/gosigar
|
||||
$ cd $GOPATH/src/github.com/elastic/gosigar/examples/ps
|
||||
$ go build
|
||||
$ ./ps
|
||||
|
||||
## Supported platforms
|
||||
|
||||
The features vary by operating system.
|
||||
|
||||
| Feature | Linux | Darwin | Windows | OpenBSD | FreeBSD |
|
||||
|-----------------|:-----:|:------:|:-------:|:-------:|:-------:|
|
||||
| Cpu | X | X | X | X | X |
|
||||
| CpuList | X | X | | X | X |
|
||||
| FDUsage | X | | | | X |
|
||||
| FileSystemList | X | X | X | X | X |
|
||||
| FileSystemUsage | X | X | X | X | X |
|
||||
| HugeTLBPages | X | | | | |
|
||||
| LoadAverage | X | X | | X | X |
|
||||
| Mem | X | X | X | X | X |
|
||||
| ProcArgs | X | X | X | | X |
|
||||
| ProcEnv | X | X | | | X |
|
||||
| ProcExe | X | X | | | X |
|
||||
| ProcFDUsage | X | | | | X |
|
||||
| ProcList | X | X | X | | X |
|
||||
| ProcMem | X | X | X | | X |
|
||||
| ProcState | X | X | X | | X |
|
||||
| ProcTime | X | X | X | | X |
|
||||
| Swap | X | X | | X | X |
|
||||
| Uptime | X | X | | X | X |
|
||||
|
||||
## OS Specific Notes
|
||||
|
||||
### FreeBSD
|
||||
|
||||
Mount both `linprocfs` and `procfs` for compatability. Consider adding these
|
||||
mounts to your `/etc/fstab` file so they are mounted automatically at boot.
|
||||
|
||||
```
|
||||
sudo mount -t procfs proc /proc
|
||||
sudo mkdir -p /compat/linux/proc
|
||||
sudo mount -t linprocfs /dev/null /compat/linux/proc
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Apache 2.0
|
25
vendor/github.com/elastic/gosigar/Vagrantfile
generated
vendored
25
vendor/github.com/elastic/gosigar/Vagrantfile
generated
vendored
@ -1,25 +0,0 @@
|
||||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
||||
VAGRANTFILE_API_VERSION = "2"
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
config.vm.box = "hashicorp/precise64"
|
||||
config.vm.provision "shell", inline: "mkdir -p /home/vagrant/go"
|
||||
config.vm.synced_folder ".", "/home/vagrant/go/src/github.com/cloudfoundry/gosigar"
|
||||
config.vm.provision "shell", inline: "chown -R vagrant:vagrant /home/vagrant/go"
|
||||
install_go = <<-BASH
|
||||
set -e
|
||||
|
||||
if [ ! -d "/usr/local/go" ]; then
|
||||
cd /tmp && wget https://storage.googleapis.com/golang/go1.3.3.linux-amd64.tar.gz
|
||||
cd /usr/local
|
||||
tar xvzf /tmp/go1.3.3.linux-amd64.tar.gz
|
||||
echo 'export GOPATH=/home/vagrant/go; export PATH=/usr/local/go/bin:$PATH:$GOPATH/bin' >> /home/vagrant/.bashrc
|
||||
fi
|
||||
export GOPATH=/home/vagrant/go
|
||||
export PATH=/usr/local/go/bin:$PATH:$GOPATH/bin
|
||||
/usr/local/go/bin/go get -u github.com/onsi/ginkgo/ginkgo
|
||||
/usr/local/go/bin/go get -u github.com/onsi/gomega;
|
||||
BASH
|
||||
config.vm.provision "shell", inline: 'apt-get install -y git-core'
|
||||
config.vm.provision "shell", inline: install_go
|
||||
end
|
21
vendor/github.com/elastic/gosigar/codecov.yml
generated
vendored
21
vendor/github.com/elastic/gosigar/codecov.yml
generated
vendored
@ -1,21 +0,0 @@
|
||||
# Enable coverage report message for diff on commit
|
||||
coverage:
|
||||
status:
|
||||
project: off
|
||||
patch:
|
||||
default:
|
||||
# basic
|
||||
target: auto
|
||||
threshold: null
|
||||
base: auto
|
||||
# advanced
|
||||
branches: null
|
||||
if_no_uploads: error
|
||||
if_not_found: success
|
||||
if_ci_failed: error
|
||||
only_pulls: false
|
||||
flags: null
|
||||
paths: null
|
||||
|
||||
# Disable comments on Pull Requests
|
||||
comment: false
|
89
vendor/github.com/elastic/gosigar/concrete_sigar.go
generated
vendored
89
vendor/github.com/elastic/gosigar/concrete_sigar.go
generated
vendored
@ -1,89 +0,0 @@
|
||||
package gosigar
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type ConcreteSigar struct{}
|
||||
|
||||
func (c *ConcreteSigar) CollectCpuStats(collectionInterval time.Duration) (<-chan Cpu, chan<- struct{}) {
|
||||
// samplesCh is buffered to 1 value to immediately return first CPU sample
|
||||
samplesCh := make(chan Cpu, 1)
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
var cpuUsage Cpu
|
||||
|
||||
// Immediately provide non-delta value.
|
||||
// samplesCh is buffered to 1 value, so it will not block.
|
||||
cpuUsage.Get()
|
||||
samplesCh <- cpuUsage
|
||||
|
||||
ticker := time.NewTicker(collectionInterval)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
previousCpuUsage := cpuUsage
|
||||
|
||||
cpuUsage.Get()
|
||||
|
||||
select {
|
||||
case samplesCh <- cpuUsage.Delta(previousCpuUsage):
|
||||
default:
|
||||
// Include default to avoid channel blocking
|
||||
}
|
||||
|
||||
case <-stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return samplesCh, stopCh
|
||||
}
|
||||
|
||||
func (c *ConcreteSigar) GetLoadAverage() (LoadAverage, error) {
|
||||
l := LoadAverage{}
|
||||
err := l.Get()
|
||||
return l, err
|
||||
}
|
||||
|
||||
func (c *ConcreteSigar) GetMem() (Mem, error) {
|
||||
m := Mem{}
|
||||
err := m.Get()
|
||||
return m, err
|
||||
}
|
||||
|
||||
func (c *ConcreteSigar) GetSwap() (Swap, error) {
|
||||
s := Swap{}
|
||||
err := s.Get()
|
||||
return s, err
|
||||
}
|
||||
|
||||
func (c *ConcreteSigar) GetHugeTLBPages() (HugeTLBPages, error) {
|
||||
p := HugeTLBPages{}
|
||||
err := p.Get()
|
||||
return p, err
|
||||
}
|
||||
|
||||
func (c *ConcreteSigar) GetFileSystemUsage(path string) (FileSystemUsage, error) {
|
||||
f := FileSystemUsage{}
|
||||
err := f.Get(path)
|
||||
return f, err
|
||||
}
|
||||
|
||||
func (c *ConcreteSigar) GetFDUsage() (FDUsage, error) {
|
||||
fd := FDUsage{}
|
||||
err := fd.Get()
|
||||
return fd, err
|
||||
}
|
||||
|
||||
// GetRusage return the resource usage of the process
|
||||
// Possible params: 0 = RUSAGE_SELF, 1 = RUSAGE_CHILDREN, 2 = RUSAGE_THREAD
|
||||
func (c *ConcreteSigar) GetRusage(who int) (Rusage, error) {
|
||||
r := Rusage{}
|
||||
err := r.Get(who)
|
||||
return r, err
|
||||
}
|
498
vendor/github.com/elastic/gosigar/sigar_darwin.go
generated
vendored
498
vendor/github.com/elastic/gosigar/sigar_darwin.go
generated
vendored
@ -1,498 +0,0 @@
|
||||
// Copyright (c) 2012 VMware, Inc.
|
||||
|
||||
package gosigar
|
||||
|
||||
/*
|
||||
#include <stdlib.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/mount.h>
|
||||
#include <mach/mach_init.h>
|
||||
#include <mach/mach_host.h>
|
||||
#include <mach/host_info.h>
|
||||
#include <libproc.h>
|
||||
#include <mach/processor_info.h>
|
||||
#include <mach/vm_map.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"os/user"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func (self *LoadAverage) Get() error {
|
||||
avg := []C.double{0, 0, 0}
|
||||
|
||||
C.getloadavg(&avg[0], C.int(len(avg)))
|
||||
|
||||
self.One = float64(avg[0])
|
||||
self.Five = float64(avg[1])
|
||||
self.Fifteen = float64(avg[2])
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Uptime) Get() error {
|
||||
tv := syscall.Timeval32{}
|
||||
|
||||
if err := sysctlbyname("kern.boottime", &tv); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
self.Length = time.Since(time.Unix(int64(tv.Sec), int64(tv.Usec)*1000)).Seconds()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Mem) Get() error {
|
||||
var vmstat C.vm_statistics_data_t
|
||||
|
||||
if err := sysctlbyname("hw.memsize", &self.Total); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := vm_info(&vmstat); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
kern := uint64(vmstat.inactive_count) << 12
|
||||
self.Free = uint64(vmstat.free_count) << 12
|
||||
|
||||
self.Used = self.Total - self.Free
|
||||
self.ActualFree = self.Free + kern
|
||||
self.ActualUsed = self.Used - kern
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type xsw_usage struct {
|
||||
Total, Avail, Used uint64
|
||||
}
|
||||
|
||||
func (self *Swap) Get() error {
|
||||
sw_usage := xsw_usage{}
|
||||
|
||||
if err := sysctlbyname("vm.swapusage", &sw_usage); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
self.Total = sw_usage.Total
|
||||
self.Used = sw_usage.Used
|
||||
self.Free = sw_usage.Avail
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *HugeTLBPages) Get() error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (self *Cpu) Get() error {
|
||||
var count C.mach_msg_type_number_t = C.HOST_CPU_LOAD_INFO_COUNT
|
||||
var cpuload C.host_cpu_load_info_data_t
|
||||
|
||||
status := C.host_statistics(C.host_t(C.mach_host_self()),
|
||||
C.HOST_CPU_LOAD_INFO,
|
||||
C.host_info_t(unsafe.Pointer(&cpuload)),
|
||||
&count)
|
||||
|
||||
if status != C.KERN_SUCCESS {
|
||||
return fmt.Errorf("host_statistics error=%d", status)
|
||||
}
|
||||
|
||||
self.User = uint64(cpuload.cpu_ticks[C.CPU_STATE_USER])
|
||||
self.Sys = uint64(cpuload.cpu_ticks[C.CPU_STATE_SYSTEM])
|
||||
self.Idle = uint64(cpuload.cpu_ticks[C.CPU_STATE_IDLE])
|
||||
self.Nice = uint64(cpuload.cpu_ticks[C.CPU_STATE_NICE])
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *CpuList) Get() error {
|
||||
var count C.mach_msg_type_number_t
|
||||
var cpuload *C.processor_cpu_load_info_data_t
|
||||
var ncpu C.natural_t
|
||||
|
||||
status := C.host_processor_info(C.host_t(C.mach_host_self()),
|
||||
C.PROCESSOR_CPU_LOAD_INFO,
|
||||
&ncpu,
|
||||
(*C.processor_info_array_t)(unsafe.Pointer(&cpuload)),
|
||||
&count)
|
||||
|
||||
if status != C.KERN_SUCCESS {
|
||||
return fmt.Errorf("host_processor_info error=%d", status)
|
||||
}
|
||||
|
||||
// jump through some cgo casting hoops and ensure we properly free
|
||||
// the memory that cpuload points to
|
||||
target := C.vm_map_t(C.mach_task_self_)
|
||||
address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload)))
|
||||
defer C.vm_deallocate(target, address, C.vm_size_t(ncpu))
|
||||
|
||||
// the body of struct processor_cpu_load_info
|
||||
// aka processor_cpu_load_info_data_t
|
||||
var cpu_ticks [C.CPU_STATE_MAX]uint32
|
||||
|
||||
// copy the cpuload array to a []byte buffer
|
||||
// where we can binary.Read the data
|
||||
size := int(ncpu) * binary.Size(cpu_ticks)
|
||||
buf := C.GoBytes(unsafe.Pointer(cpuload), C.int(size))
|
||||
|
||||
bbuf := bytes.NewBuffer(buf)
|
||||
|
||||
self.List = make([]Cpu, 0, ncpu)
|
||||
|
||||
for i := 0; i < int(ncpu); i++ {
|
||||
cpu := Cpu{}
|
||||
|
||||
err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cpu.User = uint64(cpu_ticks[C.CPU_STATE_USER])
|
||||
cpu.Sys = uint64(cpu_ticks[C.CPU_STATE_SYSTEM])
|
||||
cpu.Idle = uint64(cpu_ticks[C.CPU_STATE_IDLE])
|
||||
cpu.Nice = uint64(cpu_ticks[C.CPU_STATE_NICE])
|
||||
|
||||
self.List = append(self.List, cpu)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *FDUsage) Get() error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (self *FileSystemList) Get() error {
|
||||
num, err := syscall.Getfsstat(nil, C.MNT_NOWAIT)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buf := make([]syscall.Statfs_t, num)
|
||||
|
||||
_, err = syscall.Getfsstat(buf, C.MNT_NOWAIT)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fslist := make([]FileSystem, 0, num)
|
||||
|
||||
for i := 0; i < num; i++ {
|
||||
fs := FileSystem{}
|
||||
|
||||
fs.DirName = bytePtrToString(&buf[i].Mntonname[0])
|
||||
fs.DevName = bytePtrToString(&buf[i].Mntfromname[0])
|
||||
fs.SysTypeName = bytePtrToString(&buf[i].Fstypename[0])
|
||||
|
||||
fslist = append(fslist, fs)
|
||||
}
|
||||
|
||||
self.List = fslist
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (self *ProcList) Get() error {
|
||||
n := C.proc_listpids(C.PROC_ALL_PIDS, 0, nil, 0)
|
||||
if n <= 0 {
|
||||
return syscall.EINVAL
|
||||
}
|
||||
buf := make([]byte, n)
|
||||
n = C.proc_listpids(C.PROC_ALL_PIDS, 0, unsafe.Pointer(&buf[0]), n)
|
||||
if n <= 0 {
|
||||
return syscall.ENOMEM
|
||||
}
|
||||
|
||||
var pid int32
|
||||
num := int(n) / binary.Size(pid)
|
||||
list := make([]int, 0, num)
|
||||
bbuf := bytes.NewBuffer(buf)
|
||||
|
||||
for i := 0; i < num; i++ {
|
||||
if err := binary.Read(bbuf, binary.LittleEndian, &pid); err != nil {
|
||||
return err
|
||||
}
|
||||
if pid == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
list = append(list, int(pid))
|
||||
}
|
||||
|
||||
self.List = list
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcState) Get(pid int) error {
|
||||
info := C.struct_proc_taskallinfo{}
|
||||
|
||||
if err := task_info(pid, &info); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
self.Name = C.GoString(&info.pbsd.pbi_comm[0])
|
||||
|
||||
switch info.pbsd.pbi_status {
|
||||
case C.SIDL:
|
||||
self.State = RunStateIdle
|
||||
case C.SRUN:
|
||||
self.State = RunStateRun
|
||||
case C.SSLEEP:
|
||||
self.State = RunStateSleep
|
||||
case C.SSTOP:
|
||||
self.State = RunStateStop
|
||||
case C.SZOMB:
|
||||
self.State = RunStateZombie
|
||||
default:
|
||||
self.State = RunStateUnknown
|
||||
}
|
||||
|
||||
self.Ppid = int(info.pbsd.pbi_ppid)
|
||||
|
||||
self.Pgid = int(info.pbsd.pbi_pgid)
|
||||
|
||||
self.Tty = int(info.pbsd.e_tdev)
|
||||
|
||||
self.Priority = int(info.ptinfo.pti_priority)
|
||||
|
||||
self.Nice = int(info.pbsd.pbi_nice)
|
||||
|
||||
// Get process username. Fallback to UID if username is not available.
|
||||
uid := strconv.Itoa(int(info.pbsd.pbi_uid))
|
||||
user, err := user.LookupId(uid)
|
||||
if err == nil && user.Username != "" {
|
||||
self.Username = user.Username
|
||||
} else {
|
||||
self.Username = uid
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcMem) Get(pid int) error {
|
||||
info := C.struct_proc_taskallinfo{}
|
||||
|
||||
if err := task_info(pid, &info); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
self.Size = uint64(info.ptinfo.pti_virtual_size)
|
||||
self.Resident = uint64(info.ptinfo.pti_resident_size)
|
||||
self.PageFaults = uint64(info.ptinfo.pti_faults)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcTime) Get(pid int) error {
|
||||
info := C.struct_proc_taskallinfo{}
|
||||
|
||||
if err := task_info(pid, &info); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
self.User =
|
||||
uint64(info.ptinfo.pti_total_user) / uint64(time.Millisecond)
|
||||
|
||||
self.Sys =
|
||||
uint64(info.ptinfo.pti_total_system) / uint64(time.Millisecond)
|
||||
|
||||
self.Total = self.User + self.Sys
|
||||
|
||||
self.StartTime = (uint64(info.pbsd.pbi_start_tvsec) * 1000) +
|
||||
(uint64(info.pbsd.pbi_start_tvusec) / 1000)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcArgs) Get(pid int) error {
|
||||
var args []string
|
||||
|
||||
argv := func(arg string) {
|
||||
args = append(args, arg)
|
||||
}
|
||||
|
||||
err := kern_procargs(pid, nil, argv, nil)
|
||||
|
||||
self.List = args
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (self *ProcEnv) Get(pid int) error {
|
||||
if self.Vars == nil {
|
||||
self.Vars = map[string]string{}
|
||||
}
|
||||
|
||||
env := func(k, v string) {
|
||||
self.Vars[k] = v
|
||||
}
|
||||
|
||||
return kern_procargs(pid, nil, nil, env)
|
||||
}
|
||||
|
||||
func (self *ProcExe) Get(pid int) error {
|
||||
exe := func(arg string) {
|
||||
self.Name = arg
|
||||
}
|
||||
|
||||
return kern_procargs(pid, exe, nil, nil)
|
||||
}
|
||||
|
||||
func (self *ProcFDUsage) Get(pid int) error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
// wrapper around sysctl KERN_PROCARGS2
|
||||
// callbacks params are optional,
|
||||
// up to the caller as to which pieces of data they want
|
||||
func kern_procargs(pid int,
|
||||
exe func(string),
|
||||
argv func(string),
|
||||
env func(string, string)) error {
|
||||
|
||||
mib := []C.int{C.CTL_KERN, C.KERN_PROCARGS2, C.int(pid)}
|
||||
argmax := uintptr(C.ARG_MAX)
|
||||
buf := make([]byte, argmax)
|
||||
err := sysctl(mib, &buf[0], &argmax, nil, 0)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
bbuf := bytes.NewBuffer(buf)
|
||||
bbuf.Truncate(int(argmax))
|
||||
|
||||
var argc int32
|
||||
binary.Read(bbuf, binary.LittleEndian, &argc)
|
||||
|
||||
path, err := bbuf.ReadBytes(0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading the argv[0]: %v", err)
|
||||
}
|
||||
if exe != nil {
|
||||
exe(string(chop(path)))
|
||||
}
|
||||
|
||||
// skip trailing \0's
|
||||
for {
|
||||
c, err := bbuf.ReadByte()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error skipping nils: %v", err)
|
||||
}
|
||||
if c != 0 {
|
||||
bbuf.UnreadByte()
|
||||
break // start of argv[0]
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < int(argc); i++ {
|
||||
arg, err := bbuf.ReadBytes(0)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading args: %v", err)
|
||||
}
|
||||
if argv != nil {
|
||||
argv(string(chop(arg)))
|
||||
}
|
||||
}
|
||||
|
||||
if env == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
delim := []byte{61} // "="
|
||||
|
||||
for {
|
||||
line, err := bbuf.ReadBytes(0)
|
||||
if err == io.EOF || line[0] == 0 {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading args: %v", err)
|
||||
}
|
||||
pair := bytes.SplitN(chop(line), delim, 2)
|
||||
|
||||
if len(pair) != 2 {
|
||||
return fmt.Errorf("Error reading process information for PID: %d", pid)
|
||||
}
|
||||
|
||||
env(string(pair[0]), string(pair[1]))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// XXX copied from zsyscall_darwin_amd64.go
|
||||
func sysctl(mib []C.int, old *byte, oldlen *uintptr,
|
||||
new *byte, newlen uintptr) (err error) {
|
||||
var p0 unsafe.Pointer
|
||||
p0 = unsafe.Pointer(&mib[0])
|
||||
_, _, e1 := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p0),
|
||||
uintptr(len(mib)),
|
||||
uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)),
|
||||
uintptr(unsafe.Pointer(new)), uintptr(newlen))
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func vm_info(vmstat *C.vm_statistics_data_t) error {
|
||||
var count C.mach_msg_type_number_t = C.HOST_VM_INFO_COUNT
|
||||
|
||||
status := C.host_statistics(
|
||||
C.host_t(C.mach_host_self()),
|
||||
C.HOST_VM_INFO,
|
||||
C.host_info_t(unsafe.Pointer(vmstat)),
|
||||
&count)
|
||||
|
||||
if status != C.KERN_SUCCESS {
|
||||
return fmt.Errorf("host_statistics=%d", status)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// generic Sysctl buffer unmarshalling
|
||||
func sysctlbyname(name string, data interface{}) (err error) {
|
||||
val, err := syscall.Sysctl(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buf := []byte(val)
|
||||
|
||||
switch v := data.(type) {
|
||||
case *uint64:
|
||||
*v = *(*uint64)(unsafe.Pointer(&buf[0]))
|
||||
return
|
||||
}
|
||||
|
||||
bbuf := bytes.NewBuffer([]byte(val))
|
||||
return binary.Read(bbuf, binary.LittleEndian, data)
|
||||
}
|
||||
|
||||
func task_info(pid int, info *C.struct_proc_taskallinfo) error {
|
||||
size := C.int(unsafe.Sizeof(*info))
|
||||
ptr := unsafe.Pointer(info)
|
||||
|
||||
n := C.proc_pidinfo(C.int(pid), C.PROC_PIDTASKALLINFO, 0, ptr, size)
|
||||
if n != size {
|
||||
return fmt.Errorf("Could not read process info for pid %d", pid)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
126
vendor/github.com/elastic/gosigar/sigar_format.go
generated
vendored
126
vendor/github.com/elastic/gosigar/sigar_format.go
generated
vendored
@ -1,126 +0,0 @@
|
||||
// Copyright (c) 2012 VMware, Inc.
|
||||
|
||||
package gosigar
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Go version of apr_strfsize
|
||||
func FormatSize(size uint64) string {
|
||||
ord := []string{"K", "M", "G", "T", "P", "E"}
|
||||
o := 0
|
||||
buf := new(bytes.Buffer)
|
||||
w := bufio.NewWriter(buf)
|
||||
|
||||
if size < 973 {
|
||||
fmt.Fprintf(w, "%3d ", size)
|
||||
w.Flush()
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
for {
|
||||
remain := size & 1023
|
||||
size >>= 10
|
||||
|
||||
if size >= 973 {
|
||||
o++
|
||||
continue
|
||||
}
|
||||
|
||||
if size < 9 || (size == 9 && remain < 973) {
|
||||
remain = ((remain * 5) + 256) / 512
|
||||
if remain >= 10 {
|
||||
size++
|
||||
remain = 0
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "%d.%d%s", size, remain, ord[o])
|
||||
break
|
||||
}
|
||||
|
||||
if remain >= 512 {
|
||||
size++
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "%3d%s", size, ord[o])
|
||||
break
|
||||
}
|
||||
|
||||
w.Flush()
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func FormatPercent(percent float64) string {
|
||||
return strconv.FormatFloat(percent, 'f', -1, 64) + "%"
|
||||
}
|
||||
|
||||
func (self *FileSystemUsage) UsePercent() float64 {
|
||||
b_used := (self.Total - self.Free) / 1024
|
||||
b_avail := self.Avail / 1024
|
||||
utotal := b_used + b_avail
|
||||
used := b_used
|
||||
|
||||
if utotal != 0 {
|
||||
u100 := used * 100
|
||||
pct := u100 / utotal
|
||||
if u100%utotal != 0 {
|
||||
pct += 1
|
||||
}
|
||||
return (float64(pct) / float64(100)) * 100.0
|
||||
}
|
||||
|
||||
return 0.0
|
||||
}
|
||||
|
||||
func (self *Uptime) Format() string {
|
||||
buf := new(bytes.Buffer)
|
||||
w := bufio.NewWriter(buf)
|
||||
uptime := uint64(self.Length)
|
||||
|
||||
days := uptime / (60 * 60 * 24)
|
||||
|
||||
if days != 0 {
|
||||
s := ""
|
||||
if days > 1 {
|
||||
s = "s"
|
||||
}
|
||||
fmt.Fprintf(w, "%d day%s, ", days, s)
|
||||
}
|
||||
|
||||
minutes := uptime / 60
|
||||
hours := minutes / 60
|
||||
hours %= 24
|
||||
minutes %= 60
|
||||
|
||||
fmt.Fprintf(w, "%2d:%02d", hours, minutes)
|
||||
|
||||
w.Flush()
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (self *ProcTime) FormatStartTime() string {
|
||||
if self.StartTime == 0 {
|
||||
return "00:00"
|
||||
}
|
||||
start := time.Unix(int64(self.StartTime)/1000, 0)
|
||||
format := "Jan02"
|
||||
if time.Since(start).Seconds() < (60 * 60 * 24) {
|
||||
format = "15:04"
|
||||
}
|
||||
return start.Format(format)
|
||||
}
|
||||
|
||||
func (self *ProcTime) FormatTotal() string {
|
||||
t := self.Total / 1000
|
||||
ss := t % 60
|
||||
t /= 60
|
||||
mm := t % 60
|
||||
t /= 60
|
||||
hh := t % 24
|
||||
return fmt.Sprintf("%02d:%02d:%02d", hh, mm, ss)
|
||||
}
|
158
vendor/github.com/elastic/gosigar/sigar_freebsd.go
generated
vendored
158
vendor/github.com/elastic/gosigar/sigar_freebsd.go
generated
vendored
@ -1,158 +0,0 @@
|
||||
// Copied and modified from sigar_linux.go.
|
||||
|
||||
package gosigar
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
/*
|
||||
#include <sys/param.h>
|
||||
#include <sys/mount.h>
|
||||
#include <sys/ucred.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include <unistd.h>
|
||||
#include <time.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
func init() {
|
||||
system.ticks = uint64(C.sysconf(C._SC_CLK_TCK))
|
||||
|
||||
Procd = "/compat/linux/proc"
|
||||
|
||||
getLinuxBootTime()
|
||||
}
|
||||
|
||||
func getMountTableFileName() string {
|
||||
return Procd + "/mtab"
|
||||
}
|
||||
|
||||
func (self *Uptime) Get() error {
|
||||
ts := C.struct_timespec{}
|
||||
|
||||
if _, err := C.clock_gettime(C.CLOCK_UPTIME, &ts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
self.Length = float64(ts.tv_sec) + 1e-9*float64(ts.tv_nsec)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *FDUsage) Get() error {
|
||||
val := C.uint32_t(0)
|
||||
sc := C.size_t(4)
|
||||
|
||||
name := C.CString("kern.openfiles")
|
||||
_, err := C.sysctlbyname(name, unsafe.Pointer(&val), &sc, nil, 0)
|
||||
C.free(unsafe.Pointer(name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
self.Open = uint64(val)
|
||||
|
||||
name = C.CString("kern.maxfiles")
|
||||
_, err = C.sysctlbyname(name, unsafe.Pointer(&val), &sc, nil, 0)
|
||||
C.free(unsafe.Pointer(name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
self.Max = uint64(val)
|
||||
|
||||
self.Unused = self.Max - self.Open
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcFDUsage) Get(pid int) error {
|
||||
err := readFile("/proc/"+strconv.Itoa(pid)+"/rlimit", func(line string) bool {
|
||||
if strings.HasPrefix(line, "nofile") {
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) == 3 {
|
||||
self.SoftLimit, _ = strconv.ParseUint(fields[1], 10, 64)
|
||||
self.HardLimit, _ = strconv.ParseUint(fields[2], 10, 64)
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// linprocfs only provides this information for this process (self).
|
||||
fds, err := ioutil.ReadDir(procFileName(pid, "fd"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
self.Open = uint64(len(fds))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *HugeTLBPages) Get() error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func parseCpuStat(self *Cpu, line string) error {
|
||||
fields := strings.Fields(line)
|
||||
|
||||
self.User, _ = strtoull(fields[1])
|
||||
self.Nice, _ = strtoull(fields[2])
|
||||
self.Sys, _ = strtoull(fields[3])
|
||||
self.Idle, _ = strtoull(fields[4])
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Mem) Get() error {
|
||||
val := C.uint32_t(0)
|
||||
sc := C.size_t(4)
|
||||
|
||||
name := C.CString("vm.stats.vm.v_page_count")
|
||||
_, err := C.sysctlbyname(name, unsafe.Pointer(&val), &sc, nil, 0)
|
||||
C.free(unsafe.Pointer(name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pagecount := uint64(val)
|
||||
|
||||
name = C.CString("vm.stats.vm.v_page_size")
|
||||
_, err = C.sysctlbyname(name, unsafe.Pointer(&val), &sc, nil, 0)
|
||||
C.free(unsafe.Pointer(name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pagesize := uint64(val)
|
||||
|
||||
name = C.CString("vm.stats.vm.v_free_count")
|
||||
_, err = C.sysctlbyname(name, unsafe.Pointer(&val), &sc, nil, 0)
|
||||
C.free(unsafe.Pointer(name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
self.Free = uint64(val) * pagesize
|
||||
|
||||
name = C.CString("vm.stats.vm.v_inactive_count")
|
||||
_, err = C.sysctlbyname(name, unsafe.Pointer(&val), &sc, nil, 0)
|
||||
C.free(unsafe.Pointer(name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
kern := uint64(val)
|
||||
|
||||
self.Total = uint64(pagecount * pagesize)
|
||||
|
||||
self.Used = self.Total - self.Free
|
||||
self.ActualFree = self.Free + (kern * pagesize)
|
||||
self.ActualUsed = self.Used - (kern * pagesize)
|
||||
|
||||
return nil
|
||||
}
|
207
vendor/github.com/elastic/gosigar/sigar_interface.go
generated
vendored
207
vendor/github.com/elastic/gosigar/sigar_interface.go
generated
vendored
@ -1,207 +0,0 @@
|
||||
package gosigar
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type ErrNotImplemented struct {
|
||||
OS string
|
||||
}
|
||||
|
||||
func (e ErrNotImplemented) Error() string {
|
||||
return "not implemented on " + e.OS
|
||||
}
|
||||
|
||||
func IsNotImplemented(err error) bool {
|
||||
switch err.(type) {
|
||||
case ErrNotImplemented, *ErrNotImplemented:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
type Sigar interface {
|
||||
CollectCpuStats(collectionInterval time.Duration) (<-chan Cpu, chan<- struct{})
|
||||
GetLoadAverage() (LoadAverage, error)
|
||||
GetMem() (Mem, error)
|
||||
GetSwap() (Swap, error)
|
||||
GetHugeTLBPages(HugeTLBPages, error)
|
||||
GetFileSystemUsage(string) (FileSystemUsage, error)
|
||||
GetFDUsage() (FDUsage, error)
|
||||
GetRusage(who int) (Rusage, error)
|
||||
}
|
||||
|
||||
type Cpu struct {
|
||||
User uint64
|
||||
Nice uint64
|
||||
Sys uint64
|
||||
Idle uint64
|
||||
Wait uint64
|
||||
Irq uint64
|
||||
SoftIrq uint64
|
||||
Stolen uint64
|
||||
}
|
||||
|
||||
func (cpu *Cpu) Total() uint64 {
|
||||
return cpu.User + cpu.Nice + cpu.Sys + cpu.Idle +
|
||||
cpu.Wait + cpu.Irq + cpu.SoftIrq + cpu.Stolen
|
||||
}
|
||||
|
||||
func (cpu Cpu) Delta(other Cpu) Cpu {
|
||||
return Cpu{
|
||||
User: cpu.User - other.User,
|
||||
Nice: cpu.Nice - other.Nice,
|
||||
Sys: cpu.Sys - other.Sys,
|
||||
Idle: cpu.Idle - other.Idle,
|
||||
Wait: cpu.Wait - other.Wait,
|
||||
Irq: cpu.Irq - other.Irq,
|
||||
SoftIrq: cpu.SoftIrq - other.SoftIrq,
|
||||
Stolen: cpu.Stolen - other.Stolen,
|
||||
}
|
||||
}
|
||||
|
||||
type LoadAverage struct {
|
||||
One, Five, Fifteen float64
|
||||
}
|
||||
|
||||
type Uptime struct {
|
||||
Length float64
|
||||
}
|
||||
|
||||
type Mem struct {
|
||||
Total uint64
|
||||
Used uint64
|
||||
Free uint64
|
||||
ActualFree uint64
|
||||
ActualUsed uint64
|
||||
}
|
||||
|
||||
type Swap struct {
|
||||
Total uint64
|
||||
Used uint64
|
||||
Free uint64
|
||||
}
|
||||
|
||||
type HugeTLBPages struct {
|
||||
Total uint64
|
||||
Free uint64
|
||||
Reserved uint64
|
||||
Surplus uint64
|
||||
DefaultSize uint64
|
||||
TotalAllocatedSize uint64
|
||||
}
|
||||
|
||||
type CpuList struct {
|
||||
List []Cpu
|
||||
}
|
||||
|
||||
type FDUsage struct {
|
||||
Open uint64
|
||||
Unused uint64
|
||||
Max uint64
|
||||
}
|
||||
|
||||
type FileSystem struct {
|
||||
DirName string
|
||||
DevName string
|
||||
TypeName string
|
||||
SysTypeName string
|
||||
Options string
|
||||
Flags uint32
|
||||
}
|
||||
|
||||
type FileSystemList struct {
|
||||
List []FileSystem
|
||||
}
|
||||
|
||||
type FileSystemUsage struct {
|
||||
Total uint64
|
||||
Used uint64
|
||||
Free uint64
|
||||
Avail uint64
|
||||
Files uint64
|
||||
FreeFiles uint64
|
||||
}
|
||||
|
||||
type ProcList struct {
|
||||
List []int
|
||||
}
|
||||
|
||||
type RunState byte
|
||||
|
||||
const (
|
||||
RunStateSleep = 'S'
|
||||
RunStateRun = 'R'
|
||||
RunStateStop = 'T'
|
||||
RunStateZombie = 'Z'
|
||||
RunStateIdle = 'D'
|
||||
RunStateUnknown = '?'
|
||||
)
|
||||
|
||||
type ProcState struct {
|
||||
Name string
|
||||
Username string
|
||||
State RunState
|
||||
Ppid int
|
||||
Pgid int
|
||||
Tty int
|
||||
Priority int
|
||||
Nice int
|
||||
Processor int
|
||||
}
|
||||
|
||||
type ProcMem struct {
|
||||
Size uint64
|
||||
Resident uint64
|
||||
Share uint64
|
||||
MinorFaults uint64
|
||||
MajorFaults uint64
|
||||
PageFaults uint64
|
||||
}
|
||||
|
||||
type ProcTime struct {
|
||||
StartTime uint64
|
||||
User uint64
|
||||
Sys uint64
|
||||
Total uint64
|
||||
}
|
||||
|
||||
type ProcArgs struct {
|
||||
List []string
|
||||
}
|
||||
|
||||
type ProcEnv struct {
|
||||
Vars map[string]string
|
||||
}
|
||||
|
||||
type ProcExe struct {
|
||||
Name string
|
||||
Cwd string
|
||||
Root string
|
||||
}
|
||||
|
||||
type ProcFDUsage struct {
|
||||
Open uint64
|
||||
SoftLimit uint64
|
||||
HardLimit uint64
|
||||
}
|
||||
|
||||
type Rusage struct {
|
||||
Utime time.Duration
|
||||
Stime time.Duration
|
||||
Maxrss int64
|
||||
Ixrss int64
|
||||
Idrss int64
|
||||
Isrss int64
|
||||
Minflt int64
|
||||
Majflt int64
|
||||
Nswap int64
|
||||
Inblock int64
|
||||
Oublock int64
|
||||
Msgsnd int64
|
||||
Msgrcv int64
|
||||
Nsignals int64
|
||||
Nvcsw int64
|
||||
Nivcsw int64
|
||||
}
|
133
vendor/github.com/elastic/gosigar/sigar_linux.go
generated
vendored
133
vendor/github.com/elastic/gosigar/sigar_linux.go
generated
vendored
@ -1,133 +0,0 @@
|
||||
// Copyright (c) 2012 VMware, Inc.
|
||||
|
||||
package gosigar
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func init() {
|
||||
system.ticks = 100 // C.sysconf(C._SC_CLK_TCK)
|
||||
|
||||
Procd = "/proc"
|
||||
|
||||
getLinuxBootTime()
|
||||
}
|
||||
|
||||
func getMountTableFileName() string {
|
||||
return "/etc/mtab"
|
||||
}
|
||||
|
||||
func (self *Uptime) Get() error {
|
||||
sysinfo := syscall.Sysinfo_t{}
|
||||
|
||||
if err := syscall.Sysinfo(&sysinfo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
self.Length = float64(sysinfo.Uptime)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *FDUsage) Get() error {
|
||||
return readFile(Procd+"/sys/fs/file-nr", func(line string) bool {
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) == 3 {
|
||||
self.Open, _ = strconv.ParseUint(fields[0], 10, 64)
|
||||
self.Unused, _ = strconv.ParseUint(fields[1], 10, 64)
|
||||
self.Max, _ = strconv.ParseUint(fields[2], 10, 64)
|
||||
}
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
func (self *HugeTLBPages) Get() error {
|
||||
table, err := parseMeminfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
self.Total, _ = table["HugePages_Total"]
|
||||
self.Free, _ = table["HugePages_Free"]
|
||||
self.Reserved, _ = table["HugePages_Rsvd"]
|
||||
self.Surplus, _ = table["HugePages_Surp"]
|
||||
self.DefaultSize, _ = table["Hugepagesize"]
|
||||
|
||||
if totalSize, found := table["Hugetlb"]; found {
|
||||
self.TotalAllocatedSize = totalSize
|
||||
} else {
|
||||
// If Hugetlb is not present, or huge pages of different sizes
|
||||
// are used, this figure can be unaccurate.
|
||||
// TODO (jsoriano): Extract information from /sys/kernel/mm/hugepages too
|
||||
self.TotalAllocatedSize = (self.Total - self.Free + self.Reserved) * self.DefaultSize
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcFDUsage) Get(pid int) error {
|
||||
err := readFile(procFileName(pid, "limits"), func(line string) bool {
|
||||
if strings.HasPrefix(line, "Max open files") {
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) == 6 {
|
||||
self.SoftLimit, _ = strconv.ParseUint(fields[3], 10, 64)
|
||||
self.HardLimit, _ = strconv.ParseUint(fields[4], 10, 64)
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fds, err := ioutil.ReadDir(procFileName(pid, "fd"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
self.Open = uint64(len(fds))
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseCpuStat(self *Cpu, line string) error {
|
||||
fields := strings.Fields(line)
|
||||
|
||||
self.User, _ = strtoull(fields[1])
|
||||
self.Nice, _ = strtoull(fields[2])
|
||||
self.Sys, _ = strtoull(fields[3])
|
||||
self.Idle, _ = strtoull(fields[4])
|
||||
self.Wait, _ = strtoull(fields[5])
|
||||
self.Irq, _ = strtoull(fields[6])
|
||||
self.SoftIrq, _ = strtoull(fields[7])
|
||||
self.Stolen, _ = strtoull(fields[8])
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Mem) Get() error {
|
||||
|
||||
table, err := parseMeminfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
self.Total, _ = table["MemTotal"]
|
||||
self.Free, _ = table["MemFree"]
|
||||
buffers, _ := table["Buffers"]
|
||||
cached, _ := table["Cached"]
|
||||
|
||||
if available, ok := table["MemAvailable"]; ok {
|
||||
// MemAvailable is in /proc/meminfo (kernel 3.14+)
|
||||
self.ActualFree = available
|
||||
} else {
|
||||
self.ActualFree = self.Free + buffers + cached
|
||||
}
|
||||
|
||||
self.Used = self.Total - self.Free
|
||||
self.ActualUsed = self.Total - self.ActualFree
|
||||
|
||||
return nil
|
||||
}
|
457
vendor/github.com/elastic/gosigar/sigar_linux_common.go
generated
vendored
457
vendor/github.com/elastic/gosigar/sigar_linux_common.go
generated
vendored
@ -1,457 +0,0 @@
|
||||
// Copyright (c) 2012 VMware, Inc.
|
||||
|
||||
// +build freebsd linux
|
||||
|
||||
package gosigar
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var system struct {
|
||||
ticks uint64
|
||||
btime uint64
|
||||
}
|
||||
|
||||
var Procd string
|
||||
|
||||
func getLinuxBootTime() {
|
||||
// grab system boot time
|
||||
readFile(Procd+"/stat", func(line string) bool {
|
||||
if strings.HasPrefix(line, "btime") {
|
||||
system.btime, _ = strtoull(line[6:])
|
||||
return false // stop reading
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func (self *LoadAverage) Get() error {
|
||||
line, err := ioutil.ReadFile(Procd + "/loadavg")
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
fields := strings.Fields(string(line))
|
||||
|
||||
self.One, _ = strconv.ParseFloat(fields[0], 64)
|
||||
self.Five, _ = strconv.ParseFloat(fields[1], 64)
|
||||
self.Fifteen, _ = strconv.ParseFloat(fields[2], 64)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Swap) Get() error {
|
||||
|
||||
table, err := parseMeminfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
self.Total, _ = table["SwapTotal"]
|
||||
self.Free, _ = table["SwapFree"]
|
||||
|
||||
self.Used = self.Total - self.Free
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Cpu) Get() error {
|
||||
return readFile(Procd+"/stat", func(line string) bool {
|
||||
if len(line) > 4 && line[0:4] == "cpu " {
|
||||
parseCpuStat(self, line)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
func (self *CpuList) Get() error {
|
||||
capacity := len(self.List)
|
||||
if capacity == 0 {
|
||||
capacity = 4
|
||||
}
|
||||
list := make([]Cpu, 0, capacity)
|
||||
|
||||
err := readFile(Procd+"/stat", func(line string) bool {
|
||||
if len(line) > 3 && line[0:3] == "cpu" && line[3] != ' ' {
|
||||
cpu := Cpu{}
|
||||
parseCpuStat(&cpu, line)
|
||||
list = append(list, cpu)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
self.List = list
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (self *FileSystemList) Get() error {
|
||||
capacity := len(self.List)
|
||||
if capacity == 0 {
|
||||
capacity = 10
|
||||
}
|
||||
fslist := make([]FileSystem, 0, capacity)
|
||||
|
||||
err := readFile(getMountTableFileName(), func(line string) bool {
|
||||
fields := strings.Fields(line)
|
||||
|
||||
fs := FileSystem{}
|
||||
fs.DevName = fields[0]
|
||||
fs.DirName = fields[1]
|
||||
fs.SysTypeName = fields[2]
|
||||
fs.Options = fields[3]
|
||||
|
||||
fslist = append(fslist, fs)
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
self.List = fslist
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (self *ProcList) Get() error {
|
||||
dir, err := os.Open(Procd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dir.Close()
|
||||
|
||||
const readAllDirnames = -1 // see os.File.Readdirnames doc
|
||||
|
||||
names, err := dir.Readdirnames(readAllDirnames)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
capacity := len(names)
|
||||
list := make([]int, 0, capacity)
|
||||
|
||||
for _, name := range names {
|
||||
if name[0] < '0' || name[0] > '9' {
|
||||
continue
|
||||
}
|
||||
pid, err := strconv.Atoi(name)
|
||||
if err == nil {
|
||||
list = append(list, pid)
|
||||
}
|
||||
}
|
||||
|
||||
self.List = list
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcState) Get(pid int) error {
|
||||
data, err := readProcFile(pid, "stat")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Extract the comm value with is surrounded by parentheses.
|
||||
lIdx := bytes.Index(data, []byte("("))
|
||||
rIdx := bytes.LastIndex(data, []byte(")"))
|
||||
if lIdx < 0 || rIdx < 0 || lIdx >= rIdx || rIdx+2 >= len(data) {
|
||||
return fmt.Errorf("failed to extract comm for pid %d from '%v'", pid, string(data))
|
||||
}
|
||||
self.Name = string(data[lIdx+1 : rIdx])
|
||||
|
||||
// Extract the rest of the fields that we are interested in.
|
||||
fields := bytes.Fields(data[rIdx+2:])
|
||||
if len(fields) <= 36 {
|
||||
return fmt.Errorf("expected more stat fields for pid %d from '%v'", pid, string(data))
|
||||
}
|
||||
|
||||
interests := bytes.Join([][]byte{
|
||||
fields[0], // state
|
||||
fields[1], // ppid
|
||||
fields[2], // pgrp
|
||||
fields[4], // tty_nr
|
||||
fields[15], // priority
|
||||
fields[16], // nice
|
||||
fields[36], // processor (last processor executed on)
|
||||
}, []byte(" "))
|
||||
|
||||
var state string
|
||||
_, err = fmt.Fscan(bytes.NewBuffer(interests),
|
||||
&state,
|
||||
&self.Ppid,
|
||||
&self.Pgid,
|
||||
&self.Tty,
|
||||
&self.Priority,
|
||||
&self.Nice,
|
||||
&self.Processor,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse stat fields for pid %d from '%v': %v", pid, string(data), err)
|
||||
}
|
||||
self.State = RunState(state[0])
|
||||
|
||||
// Read /proc/[pid]/status to get the uid, then lookup uid to get username.
|
||||
status, err := getProcStatus(pid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read process status for pid %d: %v", pid, err)
|
||||
}
|
||||
uids, err := getUIDs(status)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read process status for pid %d: %v", pid, err)
|
||||
}
|
||||
user, err := user.LookupId(uids[0])
|
||||
if err == nil {
|
||||
self.Username = user.Username
|
||||
} else {
|
||||
self.Username = uids[0]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcMem) Get(pid int) error {
|
||||
contents, err := readProcFile(pid, "statm")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fields := strings.Fields(string(contents))
|
||||
|
||||
size, _ := strtoull(fields[0])
|
||||
self.Size = size << 12
|
||||
|
||||
rss, _ := strtoull(fields[1])
|
||||
self.Resident = rss << 12
|
||||
|
||||
share, _ := strtoull(fields[2])
|
||||
self.Share = share << 12
|
||||
|
||||
contents, err = readProcFile(pid, "stat")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fields = strings.Fields(string(contents))
|
||||
|
||||
self.MinorFaults, _ = strtoull(fields[10])
|
||||
self.MajorFaults, _ = strtoull(fields[12])
|
||||
self.PageFaults = self.MinorFaults + self.MajorFaults
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcTime) Get(pid int) error {
|
||||
contents, err := readProcFile(pid, "stat")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fields := strings.Fields(string(contents))
|
||||
|
||||
user, _ := strtoull(fields[13])
|
||||
sys, _ := strtoull(fields[14])
|
||||
// convert to millis
|
||||
self.User = user * (1000 / system.ticks)
|
||||
self.Sys = sys * (1000 / system.ticks)
|
||||
self.Total = self.User + self.Sys
|
||||
|
||||
// convert to millis
|
||||
self.StartTime, _ = strtoull(fields[21])
|
||||
self.StartTime /= system.ticks
|
||||
self.StartTime += system.btime
|
||||
self.StartTime *= 1000
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcArgs) Get(pid int) error {
|
||||
contents, err := readProcFile(pid, "cmdline")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bbuf := bytes.NewBuffer(contents)
|
||||
|
||||
var args []string
|
||||
|
||||
for {
|
||||
arg, err := bbuf.ReadBytes(0)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
args = append(args, string(chop(arg)))
|
||||
}
|
||||
|
||||
self.List = args
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcEnv) Get(pid int) error {
|
||||
contents, err := readProcFile(pid, "environ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if self.Vars == nil {
|
||||
self.Vars = map[string]string{}
|
||||
}
|
||||
|
||||
pairs := bytes.Split(contents, []byte{0})
|
||||
for _, kv := range pairs {
|
||||
parts := bytes.SplitN(kv, []byte{'='}, 2)
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
key := string(bytes.TrimSpace(parts[0]))
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
self.Vars[key] = string(bytes.TrimSpace(parts[1]))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcExe) Get(pid int) error {
|
||||
fields := map[string]*string{
|
||||
"exe": &self.Name,
|
||||
"cwd": &self.Cwd,
|
||||
"root": &self.Root,
|
||||
}
|
||||
|
||||
for name, field := range fields {
|
||||
val, err := os.Readlink(procFileName(pid, name))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*field = val
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseMeminfo() (map[string]uint64, error) {
|
||||
table := map[string]uint64{}
|
||||
|
||||
err := readFile(Procd+"/meminfo", func(line string) bool {
|
||||
fields := strings.Split(line, ":")
|
||||
|
||||
if len(fields) != 2 {
|
||||
return true // skip on errors
|
||||
}
|
||||
|
||||
valueUnit := strings.Fields(fields[1])
|
||||
value, err := strtoull(valueUnit[0])
|
||||
if err != nil {
|
||||
return true // skip on errors
|
||||
}
|
||||
|
||||
if len(valueUnit) > 1 && valueUnit[1] == "kB" {
|
||||
value *= 1024
|
||||
}
|
||||
table[fields[0]] = value
|
||||
|
||||
return true
|
||||
})
|
||||
return table, err
|
||||
}
|
||||
|
||||
func readFile(file string, handler func(string) bool) error {
|
||||
contents, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reader := bufio.NewReader(bytes.NewBuffer(contents))
|
||||
|
||||
for {
|
||||
line, _, err := reader.ReadLine()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if !handler(string(line)) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func strtoull(val string) (uint64, error) {
|
||||
return strconv.ParseUint(val, 10, 64)
|
||||
}
|
||||
|
||||
func procFileName(pid int, name string) string {
|
||||
return Procd + "/" + strconv.Itoa(pid) + "/" + name
|
||||
}
|
||||
|
||||
func readProcFile(pid int, name string) (content []byte, err error) {
|
||||
path := procFileName(pid, name)
|
||||
|
||||
// Panics have been reported when reading proc files, let's recover and
|
||||
// report the path if this happens
|
||||
// See https://github.com/elastic/beats/issues/6692
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
content = nil
|
||||
err = fmt.Errorf("recovered panic when reading proc file '%s': %v", path, r)
|
||||
}
|
||||
}()
|
||||
contents, err := ioutil.ReadFile(path)
|
||||
|
||||
if err != nil {
|
||||
if perr, ok := err.(*os.PathError); ok {
|
||||
if perr.Err == syscall.ENOENT {
|
||||
return nil, syscall.ESRCH
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return contents, err
|
||||
}
|
||||
|
||||
// getProcStatus reads /proc/[pid]/status which contains process status
|
||||
// information in human readable form.
|
||||
func getProcStatus(pid int) (map[string]string, error) {
|
||||
status := make(map[string]string, 42)
|
||||
path := filepath.Join(Procd, strconv.Itoa(pid), "status")
|
||||
err := readFile(path, func(line string) bool {
|
||||
fields := strings.SplitN(line, ":", 2)
|
||||
if len(fields) == 2 {
|
||||
status[fields[0]] = strings.TrimSpace(fields[1])
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
return status, err
|
||||
}
|
||||
|
||||
// getUIDs reads the "Uid" value from status and splits it into four values --
|
||||
// real, effective, saved set, and file system UIDs.
|
||||
func getUIDs(status map[string]string) ([]string, error) {
|
||||
uidLine, ok := status["Uid"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Uid not found in proc status")
|
||||
}
|
||||
|
||||
uidStrs := strings.Fields(uidLine)
|
||||
if len(uidStrs) != 4 {
|
||||
return nil, fmt.Errorf("Uid line ('%s') did not contain four values", uidLine)
|
||||
}
|
||||
|
||||
return uidStrs, nil
|
||||
}
|
426
vendor/github.com/elastic/gosigar/sigar_openbsd.go
generated
vendored
426
vendor/github.com/elastic/gosigar/sigar_openbsd.go
generated
vendored
@ -1,426 +0,0 @@
|
||||
// Copyright (c) 2016 Jasper Lievisse Adriaanse <j@jasper.la>.
|
||||
|
||||
// +build openbsd
|
||||
|
||||
package gosigar
|
||||
|
||||
/*
|
||||
#include <sys/param.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/mount.h>
|
||||
#include <sys/sched.h>
|
||||
#include <sys/swap.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
//import "github.com/davecgh/go-spew/spew"
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type Uvmexp struct {
|
||||
pagesize uint32
|
||||
pagemask uint32
|
||||
pageshift uint32
|
||||
npages uint32
|
||||
free uint32
|
||||
active uint32
|
||||
inactive uint32
|
||||
paging uint32
|
||||
wired uint32
|
||||
zeropages uint32
|
||||
reserve_pagedaemon uint32
|
||||
reserve_kernel uint32
|
||||
anonpages uint32
|
||||
vnodepages uint32
|
||||
vtextpages uint32
|
||||
freemin uint32
|
||||
freetarg uint32
|
||||
inactarg uint32
|
||||
wiredmax uint32
|
||||
anonmin uint32
|
||||
vtextmin uint32
|
||||
vnodemin uint32
|
||||
anonminpct uint32
|
||||
vtextmi uint32
|
||||
npct uint32
|
||||
vnodeminpct uint32
|
||||
nswapdev uint32
|
||||
swpages uint32
|
||||
swpginuse uint32
|
||||
swpgonly uint32
|
||||
nswget uint32
|
||||
nanon uint32
|
||||
nanonneeded uint32
|
||||
nfreeanon uint32
|
||||
faults uint32
|
||||
traps uint32
|
||||
intrs uint32
|
||||
swtch uint32
|
||||
softs uint32
|
||||
syscalls uint32
|
||||
pageins uint32
|
||||
obsolete_swapins uint32
|
||||
obsolete_swapouts uint32
|
||||
pgswapin uint32
|
||||
pgswapout uint32
|
||||
forks uint32
|
||||
forks_ppwait uint32
|
||||
forks_sharevm uint32
|
||||
pga_zerohit uint32
|
||||
pga_zeromiss uint32
|
||||
zeroaborts uint32
|
||||
fltnoram uint32
|
||||
fltnoanon uint32
|
||||
fltpgwait uint32
|
||||
fltpgrele uint32
|
||||
fltrelck uint32
|
||||
fltrelckok uint32
|
||||
fltanget uint32
|
||||
fltanretry uint32
|
||||
fltamcopy uint32
|
||||
fltnamap uint32
|
||||
fltnomap uint32
|
||||
fltlget uint32
|
||||
fltget uint32
|
||||
flt_anon uint32
|
||||
flt_acow uint32
|
||||
flt_obj uint32
|
||||
flt_prcopy uint32
|
||||
flt_przero uint32
|
||||
pdwoke uint32
|
||||
pdrevs uint32
|
||||
pdswout uint32
|
||||
pdfreed uint32
|
||||
pdscans uint32
|
||||
pdanscan uint32
|
||||
pdobscan uint32
|
||||
pdreact uint32
|
||||
pdbusy uint32
|
||||
pdpageouts uint32
|
||||
pdpending uint32
|
||||
pddeact uint32
|
||||
pdreanon uint32
|
||||
pdrevnode uint32
|
||||
pdrevtext uint32
|
||||
fpswtch uint32
|
||||
kmapent uint32
|
||||
}
|
||||
|
||||
type Bcachestats struct {
|
||||
numbufs uint64
|
||||
numbufpages uint64
|
||||
numdirtypages uint64
|
||||
numcleanpages uint64
|
||||
pendingwrites uint64
|
||||
pendingreads uint64
|
||||
numwrites uint64
|
||||
numreads uint64
|
||||
cachehits uint64
|
||||
busymapped uint64
|
||||
dmapages uint64
|
||||
highpages uint64
|
||||
delwribufs uint64
|
||||
kvaslots uint64
|
||||
kvaslots_avail uint64
|
||||
}
|
||||
|
||||
type Swapent struct {
|
||||
se_dev C.dev_t
|
||||
se_flags int32
|
||||
se_nblks int32
|
||||
se_inuse int32
|
||||
se_priority int32
|
||||
sw_path []byte
|
||||
}
|
||||
|
||||
func (self *FileSystemList) Get() error {
|
||||
num, err := syscall.Getfsstat(nil, C.MNT_NOWAIT)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buf := make([]syscall.Statfs_t, num)
|
||||
|
||||
_, err = syscall.Getfsstat(buf, C.MNT_NOWAIT)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fslist := make([]FileSystem, 0, num)
|
||||
|
||||
for i := 0; i < num; i++ {
|
||||
fs := FileSystem{}
|
||||
|
||||
fs.DirName = bytePtrToString(&buf[i].F_mntonname[0])
|
||||
fs.DevName = bytePtrToString(&buf[i].F_mntfromname[0])
|
||||
fs.SysTypeName = bytePtrToString(&buf[i].F_fstypename[0])
|
||||
|
||||
fslist = append(fslist, fs)
|
||||
}
|
||||
|
||||
self.List = fslist
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (self *FileSystemUsage) Get(path string) error {
|
||||
stat := syscall.Statfs_t{}
|
||||
err := syscall.Statfs(path, &stat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
self.Total = uint64(stat.F_blocks) * uint64(stat.F_bsize)
|
||||
self.Free = uint64(stat.F_bfree) * uint64(stat.F_bsize)
|
||||
self.Avail = uint64(stat.F_bavail) * uint64(stat.F_bsize)
|
||||
self.Used = self.Total - self.Free
|
||||
self.Files = stat.F_files
|
||||
self.FreeFiles = stat.F_ffree
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *FDUsage) Get() error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (self *LoadAverage) Get() error {
|
||||
avg := []C.double{0, 0, 0}
|
||||
|
||||
C.getloadavg(&avg[0], C.int(len(avg)))
|
||||
|
||||
self.One = float64(avg[0])
|
||||
self.Five = float64(avg[1])
|
||||
self.Fifteen = float64(avg[2])
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Uptime) Get() error {
|
||||
tv := syscall.Timeval{}
|
||||
mib := [2]int32{C.CTL_KERN, C.KERN_BOOTTIME}
|
||||
|
||||
n := uintptr(0)
|
||||
// First we determine how much memory we'll need to pass later on (via `n`)
|
||||
_, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 2, 0, uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||
|
||||
if errno != 0 || n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Now perform the actual sysctl(3) call, storing the result in tv
|
||||
_, _, errno = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 2, uintptr(unsafe.Pointer(&tv)), uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||
|
||||
if errno != 0 || n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
self.Length = time.Since(time.Unix(int64(tv.Sec), int64(tv.Usec)*1000)).Seconds()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Mem) Get() error {
|
||||
n := uintptr(0)
|
||||
|
||||
var uvmexp Uvmexp
|
||||
mib := [2]int32{C.CTL_VM, C.VM_UVMEXP}
|
||||
n = uintptr(0)
|
||||
// First we determine how much memory we'll need to pass later on (via `n`)
|
||||
_, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 2, 0, uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||
if errno != 0 || n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, _, errno = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 2, uintptr(unsafe.Pointer(&uvmexp)), uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||
if errno != 0 || n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var bcachestats Bcachestats
|
||||
mib3 := [3]int32{C.CTL_VFS, C.VFS_GENERIC, C.VFS_BCACHESTAT}
|
||||
n = uintptr(0)
|
||||
_, _, errno = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib3[0])), 3, 0, uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||
if errno != 0 || n == 0 {
|
||||
return nil
|
||||
}
|
||||
_, _, errno = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib3[0])), 3, uintptr(unsafe.Pointer(&bcachestats)), uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||
if errno != 0 || n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
self.Total = uint64(uvmexp.npages) << uvmexp.pageshift
|
||||
self.Used = uint64(uvmexp.npages-uvmexp.free) << uvmexp.pageshift
|
||||
self.Free = uint64(uvmexp.free) << uvmexp.pageshift
|
||||
|
||||
self.ActualFree = self.Free + (uint64(bcachestats.numbufpages) << uvmexp.pageshift)
|
||||
self.ActualUsed = self.Used - (uint64(bcachestats.numbufpages) << uvmexp.pageshift)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Swap) Get() error {
|
||||
nswap := C.swapctl(C.SWAP_NSWAP, unsafe.Pointer(uintptr(0)), 0)
|
||||
|
||||
// If there are no swap devices, nothing to do here.
|
||||
if nswap == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
swdev := make([]Swapent, nswap)
|
||||
|
||||
rnswap := C.swapctl(C.SWAP_STATS, unsafe.Pointer(&swdev[0]), nswap)
|
||||
if rnswap == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := 0; i < int(nswap); i++ {
|
||||
if swdev[i].se_flags&C.SWF_ENABLE == 2 {
|
||||
self.Used = self.Used + uint64(swdev[i].se_inuse/(1024/C.DEV_BSIZE))
|
||||
self.Total = self.Total + uint64(swdev[i].se_nblks/(1024/C.DEV_BSIZE))
|
||||
}
|
||||
}
|
||||
|
||||
self.Free = self.Total - self.Used
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *HugeTLBPages) Get() error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (self *Cpu) Get() error {
|
||||
load := [C.CPUSTATES]C.long{C.CP_USER, C.CP_NICE, C.CP_SYS, C.CP_INTR, C.CP_IDLE}
|
||||
|
||||
mib := [2]int32{C.CTL_KERN, C.KERN_CPTIME}
|
||||
n := uintptr(0)
|
||||
// First we determine how much memory we'll need to pass later on (via `n`)
|
||||
_, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 2, 0, uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||
if errno != 0 || n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, _, errno = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 2, uintptr(unsafe.Pointer(&load)), uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||
if errno != 0 || n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
self.User = uint64(load[0])
|
||||
self.Nice = uint64(load[1])
|
||||
self.Sys = uint64(load[2])
|
||||
self.Irq = uint64(load[3])
|
||||
self.Idle = uint64(load[4])
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *CpuList) Get() error {
|
||||
mib := [2]int32{C.CTL_HW, C.HW_NCPU}
|
||||
var ncpu int
|
||||
|
||||
n := uintptr(0)
|
||||
// First we determine how much memory we'll need to pass later on (via `n`)
|
||||
_, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 2, 0, uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||
|
||||
if errno != 0 || n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Now perform the actual sysctl(3) call, storing the result in ncpu
|
||||
_, _, errno = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 2, uintptr(unsafe.Pointer(&ncpu)), uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||
|
||||
if errno != 0 || n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
load := [C.CPUSTATES]C.long{C.CP_USER, C.CP_NICE, C.CP_SYS, C.CP_INTR, C.CP_IDLE}
|
||||
|
||||
self.List = make([]Cpu, ncpu)
|
||||
for curcpu := range self.List {
|
||||
sysctlCptime(ncpu, curcpu, &load)
|
||||
fillCpu(&self.List[curcpu], load)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcList) Get() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcArgs) Get(pid int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcEnv) Get(pid int) error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (self *ProcState) Get(pid int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcMem) Get(pid int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcTime) Get(pid int) error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (self *ProcExe) Get(pid int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcFDUsage) Get(pid int) error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (self *Rusage) Get(pid int) error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func fillCpu(cpu *Cpu, load [C.CPUSTATES]C.long) {
|
||||
cpu.User = uint64(load[0])
|
||||
cpu.Nice = uint64(load[1])
|
||||
cpu.Sys = uint64(load[2])
|
||||
cpu.Irq = uint64(load[3])
|
||||
cpu.Idle = uint64(load[4])
|
||||
}
|
||||
|
||||
func sysctlCptime(ncpu int, curcpu int, load *[C.CPUSTATES]C.long) error {
|
||||
var mib []int32
|
||||
|
||||
// Use the correct mib based on the number of CPUs and fill out the
|
||||
// current CPU number in case of SMP. (0 indexed cf. self.List)
|
||||
if ncpu == 0 {
|
||||
mib = []int32{C.CTL_KERN, C.KERN_CPTIME}
|
||||
} else {
|
||||
mib = []int32{C.CTL_KERN, C.KERN_CPTIME2, int32(curcpu)}
|
||||
}
|
||||
|
||||
len := len(mib)
|
||||
|
||||
n := uintptr(0)
|
||||
// First we determine how much memory we'll need to pass later on (via `n`)
|
||||
_, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), uintptr(len), 0, uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||
if errno != 0 || n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, _, errno = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), uintptr(len), uintptr(unsafe.Pointer(load)), uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||
if errno != 0 || n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
75
vendor/github.com/elastic/gosigar/sigar_stub.go
generated
vendored
75
vendor/github.com/elastic/gosigar/sigar_stub.go
generated
vendored
@ -1,75 +0,0 @@
|
||||
// +build !darwin,!freebsd,!linux,!openbsd,!windows
|
||||
|
||||
package gosigar
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func (c *Cpu) Get() error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (l *LoadAverage) Get() error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (m *Mem) Get() error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (s *Swap) Get() error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (s *HugeTLBPages) Get() error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (f *FDUsage) Get() error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (p *ProcTime) Get(int) error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (self *FileSystemUsage) Get(path string) error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (self *CpuList) Get() error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (p *ProcState) Get(int) error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (p *ProcExe) Get(int) error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (p *ProcMem) Get(int) error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (p *ProcFDUsage) Get(int) error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (p *ProcEnv) Get(int) error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (p *ProcList) Get() error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (p *ProcArgs) Get(int) error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (self *Rusage) Get(int) error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
69
vendor/github.com/elastic/gosigar/sigar_unix.go
generated
vendored
69
vendor/github.com/elastic/gosigar/sigar_unix.go
generated
vendored
@ -1,69 +0,0 @@
|
||||
// Copyright (c) 2012 VMware, Inc.
|
||||
|
||||
// +build darwin freebsd linux
|
||||
|
||||
package gosigar
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func (self *FileSystemUsage) Get(path string) error {
|
||||
stat := syscall.Statfs_t{}
|
||||
err := syscall.Statfs(path, &stat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
self.Total = uint64(stat.Blocks) * uint64(stat.Bsize)
|
||||
self.Free = uint64(stat.Bfree) * uint64(stat.Bsize)
|
||||
self.Avail = uint64(stat.Bavail) * uint64(stat.Bsize)
|
||||
self.Used = self.Total - self.Free
|
||||
self.Files = stat.Files
|
||||
self.FreeFiles = uint64(stat.Ffree)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Rusage) Get(who int) error {
|
||||
ru, err := getResourceUsage(who)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
uTime := convertRtimeToDur(ru.Utime)
|
||||
sTime := convertRtimeToDur(ru.Stime)
|
||||
|
||||
r.Utime = uTime
|
||||
r.Stime = sTime
|
||||
r.Maxrss = int64(ru.Maxrss)
|
||||
r.Ixrss = int64(ru.Ixrss)
|
||||
r.Idrss = int64(ru.Idrss)
|
||||
r.Isrss = int64(ru.Isrss)
|
||||
r.Minflt = int64(ru.Minflt)
|
||||
r.Majflt = int64(ru.Majflt)
|
||||
r.Nswap = int64(ru.Nswap)
|
||||
r.Inblock = int64(ru.Inblock)
|
||||
r.Oublock = int64(ru.Oublock)
|
||||
r.Msgsnd = int64(ru.Msgsnd)
|
||||
r.Msgrcv = int64(ru.Msgrcv)
|
||||
r.Nsignals = int64(ru.Nsignals)
|
||||
r.Nvcsw = int64(ru.Nvcsw)
|
||||
r.Nivcsw = int64(ru.Nivcsw)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getResourceUsage(who int) (unix.Rusage, error) {
|
||||
r := unix.Rusage{}
|
||||
err := unix.Getrusage(who, &r)
|
||||
|
||||
return r, err
|
||||
}
|
||||
|
||||
func convertRtimeToDur(t unix.Timeval) time.Duration {
|
||||
return time.Duration(t.Nano())
|
||||
}
|
22
vendor/github.com/elastic/gosigar/sigar_util.go
generated
vendored
22
vendor/github.com/elastic/gosigar/sigar_util.go
generated
vendored
@ -1,22 +0,0 @@
|
||||
// Copyright (c) 2012 VMware, Inc.
|
||||
|
||||
package gosigar
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func bytePtrToString(ptr *int8) string {
|
||||
bytes := (*[10000]byte)(unsafe.Pointer(ptr))
|
||||
|
||||
n := 0
|
||||
for bytes[n] != 0 {
|
||||
n++
|
||||
}
|
||||
|
||||
return string(bytes[0:n])
|
||||
}
|
||||
|
||||
func chop(buf []byte) []byte {
|
||||
return buf[0 : len(buf)-1]
|
||||
}
|
409
vendor/github.com/elastic/gosigar/sigar_windows.go
generated
vendored
409
vendor/github.com/elastic/gosigar/sigar_windows.go
generated
vendored
@ -1,409 +0,0 @@
|
||||
// Copyright (c) 2012 VMware, Inc.
|
||||
|
||||
package gosigar
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/elastic/gosigar/sys/windows"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// version is Windows version of the host OS.
|
||||
version = windows.GetWindowsVersion()
|
||||
|
||||
// processQueryLimitedInfoAccess is set to PROCESS_QUERY_INFORMATION for Windows
|
||||
// 2003 and XP where PROCESS_QUERY_LIMITED_INFORMATION is unknown. For all newer
|
||||
// OS versions it is set to PROCESS_QUERY_LIMITED_INFORMATION.
|
||||
processQueryLimitedInfoAccess = windows.PROCESS_QUERY_LIMITED_INFORMATION
|
||||
|
||||
// bootTime is the time when the OS was last booted. This value may be nil
|
||||
// on operating systems that do not support the WMI query used to obtain it.
|
||||
bootTime *time.Time
|
||||
bootTimeLock sync.Mutex
|
||||
)
|
||||
|
||||
func init() {
|
||||
if !version.IsWindowsVistaOrGreater() {
|
||||
// PROCESS_QUERY_LIMITED_INFORMATION cannot be used on 2003 or XP.
|
||||
processQueryLimitedInfoAccess = syscall.PROCESS_QUERY_INFORMATION
|
||||
}
|
||||
}
|
||||
|
||||
func (self *LoadAverage) Get() error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (self *FDUsage) Get() error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (self *ProcEnv) Get(pid int) error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (self *ProcExe) Get(pid int) error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (self *ProcFDUsage) Get(pid int) error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (self *Uptime) Get() error {
|
||||
// Minimum supported OS is Windows Vista.
|
||||
if !version.IsWindowsVistaOrGreater() {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
bootTimeLock.Lock()
|
||||
defer bootTimeLock.Unlock()
|
||||
if bootTime == nil {
|
||||
uptime, err := windows.GetTickCount64()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get boot time using win32 api")
|
||||
}
|
||||
var boot = time.Unix(int64(uptime), 0)
|
||||
bootTime = &boot
|
||||
}
|
||||
|
||||
self.Length = time.Since(*bootTime).Seconds()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Mem) Get() error {
|
||||
memoryStatusEx, err := windows.GlobalMemoryStatusEx()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "GlobalMemoryStatusEx failed")
|
||||
}
|
||||
|
||||
self.Total = memoryStatusEx.TotalPhys
|
||||
self.Free = memoryStatusEx.AvailPhys
|
||||
self.Used = self.Total - self.Free
|
||||
self.ActualFree = self.Free
|
||||
self.ActualUsed = self.Used
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Swap) Get() error {
|
||||
memoryStatusEx, err := windows.GlobalMemoryStatusEx()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "GlobalMemoryStatusEx failed")
|
||||
}
|
||||
|
||||
self.Total = memoryStatusEx.TotalPageFile
|
||||
self.Free = memoryStatusEx.AvailPageFile
|
||||
self.Used = self.Total - self.Free
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *HugeTLBPages) Get() error {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
func (self *Cpu) Get() error {
|
||||
idle, kernel, user, err := windows.GetSystemTimes()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "GetSystemTimes failed")
|
||||
}
|
||||
|
||||
// CPU times are reported in milliseconds by gosigar.
|
||||
self.Idle = uint64(idle / time.Millisecond)
|
||||
self.Sys = uint64(kernel / time.Millisecond)
|
||||
self.User = uint64(user / time.Millisecond)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *CpuList) Get() error {
|
||||
cpus, err := windows.NtQuerySystemProcessorPerformanceInformation()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "NtQuerySystemProcessorPerformanceInformation failed")
|
||||
}
|
||||
|
||||
self.List = make([]Cpu, 0, len(cpus))
|
||||
for _, cpu := range cpus {
|
||||
self.List = append(self.List, Cpu{
|
||||
Idle: uint64(cpu.IdleTime / time.Millisecond),
|
||||
Sys: uint64(cpu.KernelTime / time.Millisecond),
|
||||
User: uint64(cpu.UserTime / time.Millisecond),
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *FileSystemList) Get() error {
|
||||
drives, err := windows.GetAccessPaths()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "GetAccessPaths failed")
|
||||
}
|
||||
|
||||
for _, drive := range drives {
|
||||
dt, err := windows.GetDriveType(drive)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "GetDriveType failed")
|
||||
}
|
||||
|
||||
self.List = append(self.List, FileSystem{
|
||||
DirName: drive,
|
||||
DevName: drive,
|
||||
TypeName: dt.String(),
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves a list of all process identifiers (PIDs) in the system.
|
||||
func (self *ProcList) Get() error {
|
||||
pids, err := windows.EnumProcesses()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "EnumProcesses failed")
|
||||
}
|
||||
|
||||
// Convert uint32 PIDs to int.
|
||||
self.List = make([]int, 0, len(pids))
|
||||
for _, pid := range pids {
|
||||
self.List = append(self.List, int(pid))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcState) Get(pid int) error {
|
||||
var errs []error
|
||||
|
||||
var err error
|
||||
self.Name, err = getProcName(pid)
|
||||
if err != nil {
|
||||
errs = append(errs, errors.Wrap(err, "getProcName failed"))
|
||||
}
|
||||
|
||||
self.State, err = getProcStatus(pid)
|
||||
if err != nil {
|
||||
errs = append(errs, errors.Wrap(err, "getProcStatus failed"))
|
||||
}
|
||||
|
||||
self.Ppid, err = getParentPid(pid)
|
||||
if err != nil {
|
||||
errs = append(errs, errors.Wrap(err, "getParentPid failed"))
|
||||
}
|
||||
|
||||
// getProcCredName will often fail when run as a non-admin user. This is
|
||||
// caused by strict ACL of the process token belonging to other users.
|
||||
// Instead of failing completely, ignore this error and still return most
|
||||
// data with an empty Username.
|
||||
self.Username, _ = getProcCredName(pid)
|
||||
|
||||
if len(errs) > 0 {
|
||||
errStrs := make([]string, 0, len(errs))
|
||||
for _, e := range errs {
|
||||
errStrs = append(errStrs, e.Error())
|
||||
}
|
||||
return errors.New(strings.Join(errStrs, "; "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getProcName returns the process name associated with the PID.
|
||||
func getProcName(pid int) (string, error) {
|
||||
handle, err := syscall.OpenProcess(processQueryLimitedInfoAccess, false, uint32(pid))
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "OpenProcess failed for pid=%v", pid)
|
||||
}
|
||||
defer syscall.CloseHandle(handle)
|
||||
|
||||
filename, err := windows.GetProcessImageFileName(handle)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "GetProcessImageFileName failed for pid=%v", pid)
|
||||
}
|
||||
|
||||
return filepath.Base(filename), nil
|
||||
}
|
||||
|
||||
// getProcStatus returns the status of a process.
|
||||
func getProcStatus(pid int) (RunState, error) {
|
||||
handle, err := syscall.OpenProcess(processQueryLimitedInfoAccess, false, uint32(pid))
|
||||
if err != nil {
|
||||
return RunStateUnknown, errors.Wrapf(err, "OpenProcess failed for pid=%v", pid)
|
||||
}
|
||||
defer syscall.CloseHandle(handle)
|
||||
|
||||
var exitCode uint32
|
||||
err = syscall.GetExitCodeProcess(handle, &exitCode)
|
||||
if err != nil {
|
||||
return RunStateUnknown, errors.Wrapf(err, "GetExitCodeProcess failed for pid=%v", pid)
|
||||
}
|
||||
|
||||
if exitCode == 259 { //still active
|
||||
return RunStateRun, nil
|
||||
}
|
||||
return RunStateSleep, nil
|
||||
}
|
||||
|
||||
// getParentPid returns the parent process ID of a process.
|
||||
func getParentPid(pid int) (int, error) {
|
||||
handle, err := syscall.OpenProcess(processQueryLimitedInfoAccess, false, uint32(pid))
|
||||
if err != nil {
|
||||
return RunStateUnknown, errors.Wrapf(err, "OpenProcess failed for pid=%v", pid)
|
||||
}
|
||||
defer syscall.CloseHandle(handle)
|
||||
|
||||
procInfo, err := windows.NtQueryProcessBasicInformation(handle)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "NtQueryProcessBasicInformation failed for pid=%v", pid)
|
||||
}
|
||||
|
||||
return int(procInfo.InheritedFromUniqueProcessID), nil
|
||||
}
|
||||
|
||||
func getProcCredName(pid int) (string, error) {
|
||||
handle, err := syscall.OpenProcess(syscall.PROCESS_QUERY_INFORMATION, false, uint32(pid))
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "OpenProcess failed for pid=%v", pid)
|
||||
}
|
||||
defer syscall.CloseHandle(handle)
|
||||
|
||||
// Find process token via win32.
|
||||
var token syscall.Token
|
||||
err = syscall.OpenProcessToken(handle, syscall.TOKEN_QUERY, &token)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "OpenProcessToken failed for pid=%v", pid)
|
||||
}
|
||||
// Close token to prevent handle leaks.
|
||||
defer token.Close()
|
||||
|
||||
// Find the token user.
|
||||
tokenUser, err := token.GetTokenUser()
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "GetTokenInformation failed for pid=%v", pid)
|
||||
}
|
||||
|
||||
// Look up domain account by SID.
|
||||
account, domain, _, err := tokenUser.User.Sid.LookupAccount("")
|
||||
if err != nil {
|
||||
sid, sidErr := tokenUser.User.Sid.String()
|
||||
if sidErr != nil {
|
||||
return "", errors.Wrapf(err, "failed while looking up account name for pid=%v", pid)
|
||||
}
|
||||
return "", errors.Wrapf(err, "failed while looking up account name for SID=%v of pid=%v", sid, pid)
|
||||
}
|
||||
|
||||
return fmt.Sprintf(`%s\%s`, domain, account), nil
|
||||
}
|
||||
|
||||
func (self *ProcMem) Get(pid int) error {
|
||||
handle, err := syscall.OpenProcess(processQueryLimitedInfoAccess|windows.PROCESS_VM_READ, false, uint32(pid))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "OpenProcess failed for pid=%v", pid)
|
||||
}
|
||||
defer syscall.CloseHandle(handle)
|
||||
|
||||
counters, err := windows.GetProcessMemoryInfo(handle)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "GetProcessMemoryInfo failed for pid=%v", pid)
|
||||
}
|
||||
|
||||
self.Resident = uint64(counters.WorkingSetSize)
|
||||
self.Size = uint64(counters.PrivateUsage)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcTime) Get(pid int) error {
|
||||
cpu, err := getProcTimes(pid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Windows epoch times are expressed as time elapsed since midnight on
|
||||
// January 1, 1601 at Greenwich, England. This converts the Filetime to
|
||||
// unix epoch in milliseconds.
|
||||
self.StartTime = uint64(cpu.CreationTime.Nanoseconds() / 1e6)
|
||||
|
||||
// Convert to millis.
|
||||
self.User = uint64(windows.FiletimeToDuration(&cpu.UserTime).Nanoseconds() / 1e6)
|
||||
self.Sys = uint64(windows.FiletimeToDuration(&cpu.KernelTime).Nanoseconds() / 1e6)
|
||||
self.Total = self.User + self.Sys
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getProcTimes(pid int) (*syscall.Rusage, error) {
|
||||
handle, err := syscall.OpenProcess(processQueryLimitedInfoAccess, false, uint32(pid))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "OpenProcess failed for pid=%v", pid)
|
||||
}
|
||||
defer syscall.CloseHandle(handle)
|
||||
|
||||
var cpu syscall.Rusage
|
||||
if err := syscall.GetProcessTimes(handle, &cpu.CreationTime, &cpu.ExitTime, &cpu.KernelTime, &cpu.UserTime); err != nil {
|
||||
return nil, errors.Wrapf(err, "GetProcessTimes failed for pid=%v", pid)
|
||||
}
|
||||
|
||||
return &cpu, nil
|
||||
}
|
||||
|
||||
func (self *ProcArgs) Get(pid int) error {
|
||||
// The minimum supported client for Win32_Process is Windows Vista.
|
||||
if !version.IsWindowsVistaOrGreater() {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
handle, err := syscall.OpenProcess(processQueryLimitedInfoAccess|windows.PROCESS_VM_READ, false, uint32(pid))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "OpenProcess failed for pid=%v", pid)
|
||||
}
|
||||
defer syscall.CloseHandle(handle)
|
||||
pbi, err := windows.NtQueryProcessBasicInformation(handle)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "NtQueryProcessBasicInformation failed for pid=%v", pid)
|
||||
}
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
userProcParams, err := windows.GetUserProcessParams(handle, pbi)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if argsW, err := windows.ReadProcessUnicodeString(handle, &userProcParams.CommandLine); err == nil {
|
||||
self.List, err = windows.ByteSliceToStringSlice(argsW)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *FileSystemUsage) Get(path string) error {
|
||||
freeBytesAvailable, totalNumberOfBytes, totalNumberOfFreeBytes, err := windows.GetDiskFreeSpaceEx(path)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "GetDiskFreeSpaceEx failed")
|
||||
}
|
||||
|
||||
self.Total = totalNumberOfBytes
|
||||
self.Free = totalNumberOfFreeBytes
|
||||
self.Used = self.Total - self.Free
|
||||
self.Avail = freeBytesAvailable
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Rusage) Get(who int) error {
|
||||
if who != 0 {
|
||||
return ErrNotImplemented{runtime.GOOS}
|
||||
}
|
||||
|
||||
pid := os.Getpid()
|
||||
cpu, err := getProcTimes(pid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
self.Utime = windows.FiletimeToDuration(&cpu.UserTime)
|
||||
self.Stime = windows.FiletimeToDuration(&cpu.KernelTime)
|
||||
|
||||
return nil
|
||||
}
|
2
vendor/github.com/elastic/gosigar/sys/windows/doc.go
generated
vendored
2
vendor/github.com/elastic/gosigar/sys/windows/doc.go
generated
vendored
@ -1,2 +0,0 @@
|
||||
// Package windows contains various Windows system call.
|
||||
package windows
|
132
vendor/github.com/elastic/gosigar/sys/windows/ntquery.go
generated
vendored
132
vendor/github.com/elastic/gosigar/sys/windows/ntquery.go
generated
vendored
@ -1,132 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package windows
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// On both 32-bit and 64-bit systems NtQuerySystemInformation expects the
|
||||
// size of SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION to be 48.
|
||||
const sizeofSystemProcessorPerformanceInformation = 48
|
||||
|
||||
// ProcessBasicInformation is an equivalent representation of
|
||||
// PROCESS_BASIC_INFORMATION in the Windows API.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms684280(v=vs.85).aspx
|
||||
type ProcessBasicInformation struct {
|
||||
ExitStatus uint
|
||||
PebBaseAddress uintptr
|
||||
AffinityMask uint
|
||||
BasePriority uint
|
||||
UniqueProcessID uint
|
||||
InheritedFromUniqueProcessID uint
|
||||
}
|
||||
|
||||
// NtQueryProcessBasicInformation queries basic information about the process
|
||||
// associated with the given handle (provided by OpenProcess). It uses the
|
||||
// NtQueryInformationProcess function to collect the data.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms684280(v=vs.85).aspx
|
||||
func NtQueryProcessBasicInformation(handle syscall.Handle) (ProcessBasicInformation, error) {
|
||||
var processBasicInfo ProcessBasicInformation
|
||||
processBasicInfoPtr := (*byte)(unsafe.Pointer(&processBasicInfo))
|
||||
size := uint32(unsafe.Sizeof(processBasicInfo))
|
||||
ntStatus, _ := _NtQueryInformationProcess(handle, 0, processBasicInfoPtr, size, nil)
|
||||
if ntStatus != 0 {
|
||||
return ProcessBasicInformation{}, errors.Errorf("NtQueryInformationProcess failed, NTSTATUS=0x%X", ntStatus)
|
||||
}
|
||||
|
||||
return processBasicInfo, nil
|
||||
}
|
||||
|
||||
// SystemProcessorPerformanceInformation contains CPU performance information
|
||||
// for a single CPU.
|
||||
type SystemProcessorPerformanceInformation struct {
|
||||
IdleTime time.Duration // Amount of time spent idle.
|
||||
KernelTime time.Duration // Kernel time does NOT include time spent in idle.
|
||||
UserTime time.Duration // Amount of time spent executing in user mode.
|
||||
}
|
||||
|
||||
// _SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION is an equivalent representation of
|
||||
// SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION in the Windows API. This struct is
|
||||
// used internally with NtQuerySystemInformation call and is not exported. The
|
||||
// exported equivalent is SystemProcessorPerformanceInformation.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724509(v=vs.85).aspx
|
||||
type _SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION struct {
|
||||
IdleTime int64
|
||||
KernelTime int64
|
||||
UserTime int64
|
||||
Reserved1 [2]int64
|
||||
Reserved2 uint32
|
||||
}
|
||||
|
||||
// NtQuerySystemProcessorPerformanceInformation queries CPU performance
|
||||
// information for each CPU. It uses the NtQuerySystemInformation function to
|
||||
// collect the SystemProcessorPerformanceInformation.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724509(v=vs.85).aspx
|
||||
func NtQuerySystemProcessorPerformanceInformation() ([]SystemProcessorPerformanceInformation, error) {
|
||||
// NTSTATUS code for success.
|
||||
// https://msdn.microsoft.com/en-us/library/cc704588.aspx
|
||||
const STATUS_SUCCESS = 0
|
||||
|
||||
// From the _SYSTEM_INFORMATION_CLASS enum.
|
||||
// http://processhacker.sourceforge.net/doc/ntexapi_8h.html#ad5d815b48e8f4da1ef2eb7a2f18a54e0
|
||||
const systemProcessorPerformanceInformation = 8
|
||||
|
||||
// Create a buffer large enough to hold an entry for each processor.
|
||||
b := make([]byte, runtime.NumCPU()*sizeofSystemProcessorPerformanceInformation)
|
||||
|
||||
// Query the performance information. Note that this function uses 0 to
|
||||
// indicate success. Most other Windows functions use non-zero for success.
|
||||
var returnLength uint32
|
||||
ntStatus, _ := _NtQuerySystemInformation(systemProcessorPerformanceInformation, &b[0], uint32(len(b)), &returnLength)
|
||||
if ntStatus != STATUS_SUCCESS {
|
||||
return nil, errors.Errorf("NtQuerySystemInformation failed, NTSTATUS=0x%X, bufLength=%v, returnLength=%v", ntStatus, len(b), returnLength)
|
||||
}
|
||||
|
||||
return readSystemProcessorPerformanceInformationBuffer(b)
|
||||
}
|
||||
|
||||
// readSystemProcessorPerformanceInformationBuffer reads from a buffer
|
||||
// containing SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION data. The buffer should
|
||||
// contain one entry for each CPU.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724509(v=vs.85).aspx
|
||||
func readSystemProcessorPerformanceInformationBuffer(b []byte) ([]SystemProcessorPerformanceInformation, error) {
|
||||
n := len(b) / sizeofSystemProcessorPerformanceInformation
|
||||
r := bytes.NewReader(b)
|
||||
|
||||
rtn := make([]SystemProcessorPerformanceInformation, 0, n)
|
||||
for i := 0; i < n; i++ {
|
||||
_, err := r.Seek(int64(i*sizeofSystemProcessorPerformanceInformation), io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to seek to cpuN=%v in buffer", i)
|
||||
}
|
||||
|
||||
times := make([]uint64, 3)
|
||||
for j := range times {
|
||||
err := binary.Read(r, binary.LittleEndian, ×[j])
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed reading cpu times for cpuN=%v", i)
|
||||
}
|
||||
}
|
||||
|
||||
idleTime := time.Duration(times[0] * 100)
|
||||
kernelTime := time.Duration(times[1] * 100)
|
||||
userTime := time.Duration(times[2] * 100)
|
||||
|
||||
rtn = append(rtn, SystemProcessorPerformanceInformation{
|
||||
IdleTime: idleTime,
|
||||
KernelTime: kernelTime - idleTime, // Subtract out idle time from kernel time.
|
||||
UserTime: userTime,
|
||||
})
|
||||
}
|
||||
|
||||
return rtn, nil
|
||||
}
|
272
vendor/github.com/elastic/gosigar/sys/windows/privileges.go
generated
vendored
272
vendor/github.com/elastic/gosigar/sys/windows/privileges.go
generated
vendored
@ -1,272 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package windows
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// Cache of privilege names to LUIDs.
|
||||
var (
|
||||
privNames = make(map[string]int64)
|
||||
privNameMutex sync.Mutex
|
||||
)
|
||||
|
||||
const (
|
||||
// SeDebugPrivilege is the name of the privilege used to debug programs.
|
||||
SeDebugPrivilege = "SeDebugPrivilege"
|
||||
)
|
||||
|
||||
// Errors returned by AdjustTokenPrivileges.
|
||||
const (
|
||||
ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300
|
||||
)
|
||||
|
||||
// Attribute bits for privileges.
|
||||
const (
|
||||
_SE_PRIVILEGE_ENABLED_BY_DEFAULT uint32 = 0x00000001
|
||||
_SE_PRIVILEGE_ENABLED uint32 = 0x00000002
|
||||
_SE_PRIVILEGE_REMOVED uint32 = 0x00000004
|
||||
_SE_PRIVILEGE_USED_FOR_ACCESS uint32 = 0x80000000
|
||||
)
|
||||
|
||||
// Privilege contains information about a single privilege associated with a
|
||||
// Token.
|
||||
type Privilege struct {
|
||||
LUID int64 `json:"-"` // Locally unique identifier (guaranteed only until the system is restarted).
|
||||
Name string `json:"-"`
|
||||
EnabledByDefault bool `json:"enabled_by_default,omitempty"`
|
||||
Enabled bool `json:"enabled"`
|
||||
Removed bool `json:"removed,omitempty"`
|
||||
Used bool `json:"used,omitempty"`
|
||||
}
|
||||
|
||||
func (p Privilege) String() string {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(p.Name)
|
||||
buf.WriteString("=(")
|
||||
|
||||
opts := make([]string, 0, 4)
|
||||
if p.EnabledByDefault {
|
||||
opts = append(opts, "Default")
|
||||
}
|
||||
if p.Enabled {
|
||||
opts = append(opts, "Enabled")
|
||||
}
|
||||
if !p.EnabledByDefault && !p.Enabled {
|
||||
opts = append(opts, "Disabled")
|
||||
}
|
||||
if p.Removed {
|
||||
opts = append(opts, "Removed")
|
||||
}
|
||||
if p.Used {
|
||||
opts = append(opts, "Used")
|
||||
}
|
||||
|
||||
buf.WriteString(strings.Join(opts, ", "))
|
||||
buf.WriteString(")")
|
||||
|
||||
// Example: SeDebugPrivilege=(Default, Enabled)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// User represent the information about a Windows account.
|
||||
type User struct {
|
||||
SID string
|
||||
Account string
|
||||
Domain string
|
||||
Type uint32
|
||||
}
|
||||
|
||||
func (u User) String() string {
|
||||
return fmt.Sprintf(`User:%v\%v, SID:%v, Type:%v`, u.Domain, u.Account, u.SID, u.Type)
|
||||
}
|
||||
|
||||
// DebugInfo contains general debug info about the current process.
|
||||
type DebugInfo struct {
|
||||
OSVersion Version // OS version info.
|
||||
Arch string // Architecture of the machine.
|
||||
NumCPU int // Number of CPUs.
|
||||
User User // User that this process is running as.
|
||||
ProcessPrivs map[string]Privilege // Privileges held by the process.
|
||||
}
|
||||
|
||||
func (d DebugInfo) String() string {
|
||||
bytes, _ := json.Marshal(d)
|
||||
return string(bytes)
|
||||
}
|
||||
|
||||
// LookupPrivilegeName looks up a privilege name given a LUID value.
|
||||
func LookupPrivilegeName(systemName string, luid int64) (string, error) {
|
||||
buf := make([]uint16, 256)
|
||||
bufSize := uint32(len(buf))
|
||||
err := _LookupPrivilegeName(systemName, &luid, &buf[0], &bufSize)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "LookupPrivilegeName failed for luid=%v", luid)
|
||||
}
|
||||
|
||||
return syscall.UTF16ToString(buf), nil
|
||||
}
|
||||
|
||||
// mapPrivileges maps privilege names to LUID values.
|
||||
func mapPrivileges(names []string) ([]int64, error) {
|
||||
var privileges []int64
|
||||
privNameMutex.Lock()
|
||||
defer privNameMutex.Unlock()
|
||||
for _, name := range names {
|
||||
p, ok := privNames[name]
|
||||
if !ok {
|
||||
err := _LookupPrivilegeValue("", name, &p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "LookupPrivilegeValue failed on '%v'", name)
|
||||
}
|
||||
privNames[name] = p
|
||||
}
|
||||
privileges = append(privileges, p)
|
||||
}
|
||||
return privileges, nil
|
||||
}
|
||||
|
||||
// EnableTokenPrivileges enables the specified privileges in the given
|
||||
// Token. The token must have TOKEN_ADJUST_PRIVILEGES access. If the token
|
||||
// does not already contain the privilege it cannot be enabled.
|
||||
func EnableTokenPrivileges(token syscall.Token, privileges ...string) error {
|
||||
privValues, err := mapPrivileges(privileges)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
binary.Write(&b, binary.LittleEndian, uint32(len(privValues)))
|
||||
for _, p := range privValues {
|
||||
binary.Write(&b, binary.LittleEndian, p)
|
||||
binary.Write(&b, binary.LittleEndian, uint32(_SE_PRIVILEGE_ENABLED))
|
||||
}
|
||||
|
||||
success, err := _AdjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(b.Len()), nil, nil)
|
||||
if !success {
|
||||
return err
|
||||
}
|
||||
if err == ERROR_NOT_ALL_ASSIGNED {
|
||||
return errors.Wrap(err, "error not all privileges were assigned")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetTokenPrivileges returns a list of privileges associated with a token.
|
||||
// The provided token must have at a minimum TOKEN_QUERY access. This is a
|
||||
// wrapper around the GetTokenInformation function.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa446671(v=vs.85).aspx
|
||||
func GetTokenPrivileges(token syscall.Token) (map[string]Privilege, error) {
|
||||
// Determine the required buffer size.
|
||||
var size uint32
|
||||
syscall.GetTokenInformation(token, syscall.TokenPrivileges, nil, 0, &size)
|
||||
|
||||
// This buffer will receive a TOKEN_PRIVILEGE structure.
|
||||
b := bytes.NewBuffer(make([]byte, size))
|
||||
err := syscall.GetTokenInformation(token, syscall.TokenPrivileges, &b.Bytes()[0], uint32(b.Len()), &size)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "GetTokenInformation failed")
|
||||
}
|
||||
|
||||
var privilegeCount uint32
|
||||
err = binary.Read(b, binary.LittleEndian, &privilegeCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read PrivilegeCount")
|
||||
}
|
||||
|
||||
rtn := make(map[string]Privilege, privilegeCount)
|
||||
for i := 0; i < int(privilegeCount); i++ {
|
||||
var luid int64
|
||||
err = binary.Read(b, binary.LittleEndian, &luid)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read LUID value")
|
||||
}
|
||||
|
||||
var attributes uint32
|
||||
err = binary.Read(b, binary.LittleEndian, &attributes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read attributes")
|
||||
}
|
||||
|
||||
name, err := LookupPrivilegeName("", luid)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "LookupPrivilegeName failed for LUID=%v", luid)
|
||||
}
|
||||
|
||||
rtn[name] = Privilege{
|
||||
LUID: luid,
|
||||
Name: name,
|
||||
EnabledByDefault: (attributes & _SE_PRIVILEGE_ENABLED_BY_DEFAULT) > 0,
|
||||
Enabled: (attributes & _SE_PRIVILEGE_ENABLED) > 0,
|
||||
Removed: (attributes & _SE_PRIVILEGE_REMOVED) > 0,
|
||||
Used: (attributes & _SE_PRIVILEGE_USED_FOR_ACCESS) > 0,
|
||||
}
|
||||
}
|
||||
|
||||
return rtn, nil
|
||||
}
|
||||
|
||||
// GetTokenUser returns the User associated with the given Token.
|
||||
func GetTokenUser(token syscall.Token) (User, error) {
|
||||
tokenUser, err := token.GetTokenUser()
|
||||
if err != nil {
|
||||
return User{}, errors.Wrap(err, "GetTokenUser failed")
|
||||
}
|
||||
|
||||
var user User
|
||||
user.SID, err = tokenUser.User.Sid.String()
|
||||
if err != nil {
|
||||
return user, errors.Wrap(err, "ConvertSidToStringSid failed")
|
||||
}
|
||||
|
||||
user.Account, user.Domain, user.Type, err = tokenUser.User.Sid.LookupAccount("")
|
||||
if err != nil {
|
||||
return user, errors.Wrap(err, "LookupAccountSid failed")
|
||||
}
|
||||
|
||||
return user, nil
|
||||
}
|
||||
|
||||
// GetDebugInfo returns general debug info about the current process.
|
||||
func GetDebugInfo() (*DebugInfo, error) {
|
||||
h, err := windows.GetCurrentProcess()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var token syscall.Token
|
||||
err = syscall.OpenProcessToken(syscall.Handle(h), syscall.TOKEN_QUERY, &token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
privs, err := GetTokenPrivileges(token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
user, err := GetTokenUser(token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &DebugInfo{
|
||||
User: user,
|
||||
ProcessPrivs: privs,
|
||||
OSVersion: GetWindowsVersion(),
|
||||
Arch: runtime.GOARCH,
|
||||
NumCPU: runtime.NumCPU(),
|
||||
}, nil
|
||||
}
|
610
vendor/github.com/elastic/gosigar/sys/windows/syscall_windows.go
generated
vendored
610
vendor/github.com/elastic/gosigar/sys/windows/syscall_windows.go
generated
vendored
@ -1,610 +0,0 @@
|
||||
package windows
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
sizeofUint32 = 4
|
||||
sizeofProcessEntry32 = uint32(unsafe.Sizeof(ProcessEntry32{}))
|
||||
sizeofProcessMemoryCountersEx = uint32(unsafe.Sizeof(ProcessMemoryCountersEx{}))
|
||||
sizeofMemoryStatusEx = uint32(unsafe.Sizeof(MemoryStatusEx{}))
|
||||
)
|
||||
|
||||
// Process-specific access rights. Others are declared in the syscall package.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms684880(v=vs.85).aspx
|
||||
const (
|
||||
PROCESS_QUERY_LIMITED_INFORMATION uint32 = 0x1000
|
||||
PROCESS_VM_READ uint32 = 0x0010
|
||||
)
|
||||
|
||||
// SizeOfRtlUserProcessParameters gives the size
|
||||
// of the RtlUserProcessParameters struct.
|
||||
const SizeOfRtlUserProcessParameters = unsafe.Sizeof(RtlUserProcessParameters{})
|
||||
|
||||
// MAX_PATH is the maximum length for a path in Windows.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
|
||||
const MAX_PATH = 260
|
||||
|
||||
// DriveType represents a type of drive (removable, fixed, CD-ROM, RAM disk, or
|
||||
// network drive).
|
||||
type DriveType uint32
|
||||
|
||||
// Drive types as returned by GetDriveType.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364939(v=vs.85).aspx
|
||||
const (
|
||||
DRIVE_UNKNOWN DriveType = iota
|
||||
DRIVE_NO_ROOT_DIR
|
||||
DRIVE_REMOVABLE
|
||||
DRIVE_FIXED
|
||||
DRIVE_REMOTE
|
||||
DRIVE_CDROM
|
||||
DRIVE_RAMDISK
|
||||
)
|
||||
|
||||
// UnicodeString is Go's equivalent for the _UNICODE_STRING struct.
|
||||
type UnicodeString struct {
|
||||
Size uint16
|
||||
MaximumLength uint16
|
||||
Buffer uintptr
|
||||
}
|
||||
|
||||
// RtlUserProcessParameters is Go's equivalent for the
|
||||
// _RTL_USER_PROCESS_PARAMETERS struct.
|
||||
// A few undocumented fields are exposed.
|
||||
type RtlUserProcessParameters struct {
|
||||
Reserved1 [16]byte
|
||||
Reserved2 [5]uintptr
|
||||
CurrentDirectoryPath UnicodeString
|
||||
CurrentDirectoryHandle uintptr
|
||||
DllPath UnicodeString
|
||||
ImagePathName UnicodeString
|
||||
CommandLine UnicodeString
|
||||
}
|
||||
|
||||
func (dt DriveType) String() string {
|
||||
names := map[DriveType]string{
|
||||
DRIVE_UNKNOWN: "unknown",
|
||||
DRIVE_NO_ROOT_DIR: "invalid",
|
||||
DRIVE_REMOVABLE: "removable",
|
||||
DRIVE_FIXED: "fixed",
|
||||
DRIVE_REMOTE: "remote",
|
||||
DRIVE_CDROM: "cdrom",
|
||||
DRIVE_RAMDISK: "ramdisk",
|
||||
}
|
||||
|
||||
name, found := names[dt]
|
||||
if !found {
|
||||
return "unknown DriveType value"
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// Flags that can be used with CreateToolhelp32Snapshot.
|
||||
const (
|
||||
TH32CS_INHERIT uint32 = 0x80000000 // Indicates that the snapshot handle is to be inheritable.
|
||||
TH32CS_SNAPHEAPLIST uint32 = 0x00000001 // Includes all heaps of the process specified in th32ProcessID in the snapshot.
|
||||
TH32CS_SNAPMODULE uint32 = 0x00000008 // Includes all modules of the process specified in th32ProcessID in the snapshot.
|
||||
TH32CS_SNAPMODULE32 uint32 = 0x00000010 // Includes all 32-bit modules of the process specified in th32ProcessID in the snapshot when called from a 64-bit process.
|
||||
TH32CS_SNAPPROCESS uint32 = 0x00000002 // Includes all processes in the system in the snapshot.
|
||||
TH32CS_SNAPTHREAD uint32 = 0x00000004 // Includes all threads in the system in the snapshot.
|
||||
)
|
||||
|
||||
// ProcessEntry32 is an equivalent representation of PROCESSENTRY32 in the
|
||||
// Windows API. It contains a process's information. Do not modify or reorder.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms684839(v=vs.85).aspx
|
||||
type ProcessEntry32 struct {
|
||||
size uint32
|
||||
CntUsage uint32
|
||||
ProcessID uint32
|
||||
DefaultHeapID uintptr
|
||||
ModuleID uint32
|
||||
CntThreads uint32
|
||||
ParentProcessID uint32
|
||||
PriorityClassBase int32
|
||||
Flags uint32
|
||||
exeFile [MAX_PATH]uint16
|
||||
}
|
||||
|
||||
// ExeFile returns the name of the executable file for the process. It does
|
||||
// not contain the full path.
|
||||
func (p ProcessEntry32) ExeFile() string {
|
||||
return syscall.UTF16ToString(p.exeFile[:])
|
||||
}
|
||||
|
||||
func (p ProcessEntry32) String() string {
|
||||
return fmt.Sprintf("{CntUsage:%v ProcessID:%v DefaultHeapID:%v ModuleID:%v "+
|
||||
"CntThreads:%v ParentProcessID:%v PriorityClassBase:%v Flags:%v ExeFile:%v",
|
||||
p.CntUsage, p.ProcessID, p.DefaultHeapID, p.ModuleID, p.CntThreads,
|
||||
p.ParentProcessID, p.PriorityClassBase, p.Flags, p.ExeFile())
|
||||
}
|
||||
|
||||
// MemoryStatusEx is an equivalent representation of MEMORYSTATUSEX in the
|
||||
// Windows API. It contains information about the current state of both physical
|
||||
// and virtual memory, including extended memory.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770
|
||||
type MemoryStatusEx struct {
|
||||
length uint32
|
||||
MemoryLoad uint32
|
||||
TotalPhys uint64
|
||||
AvailPhys uint64
|
||||
TotalPageFile uint64
|
||||
AvailPageFile uint64
|
||||
TotalVirtual uint64
|
||||
AvailVirtual uint64
|
||||
AvailExtendedVirtual uint64
|
||||
}
|
||||
|
||||
// ProcessMemoryCountersEx is an equivalent representation of
|
||||
// PROCESS_MEMORY_COUNTERS_EX in the Windows API.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms684874(v=vs.85).aspx
|
||||
type ProcessMemoryCountersEx struct {
|
||||
cb uint32
|
||||
PageFaultCount uint32
|
||||
PeakWorkingSetSize uintptr
|
||||
WorkingSetSize uintptr
|
||||
QuotaPeakPagedPoolUsage uintptr
|
||||
QuotaPagedPoolUsage uintptr
|
||||
QuotaPeakNonPagedPoolUsage uintptr
|
||||
QuotaNonPagedPoolUsage uintptr
|
||||
PagefileUsage uintptr
|
||||
PeakPagefileUsage uintptr
|
||||
PrivateUsage uintptr
|
||||
}
|
||||
|
||||
// GetLogicalDriveStrings returns a list of drives in the system.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364975(v=vs.85).aspx
|
||||
func GetLogicalDriveStrings() ([]string, error) {
|
||||
// Determine the size of the buffer required to receive all drives.
|
||||
bufferLength, err := _GetLogicalDriveStringsW(0, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "GetLogicalDriveStringsW failed to get buffer length")
|
||||
}
|
||||
if bufferLength < 0 {
|
||||
return nil, errors.New("GetLogicalDriveStringsW returned an invalid buffer length")
|
||||
}
|
||||
|
||||
buffer := make([]uint16, bufferLength)
|
||||
_, err = _GetLogicalDriveStringsW(uint32(len(buffer)), &buffer[0])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "GetLogicalDriveStringsW failed")
|
||||
}
|
||||
|
||||
return UTF16SliceToStringSlice(buffer), nil
|
||||
}
|
||||
|
||||
// GetAccessPaths returns the list of access paths for volumes in the system.
|
||||
func GetAccessPaths() ([]string, error) {
|
||||
volumes, err := GetVolumes()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "GetVolumes failed")
|
||||
}
|
||||
|
||||
var paths []string
|
||||
for _, volumeName := range volumes {
|
||||
volumePaths, err := GetVolumePathsForVolume(volumeName)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get list of access paths for volume '%s'", volumeName)
|
||||
}
|
||||
if len(volumePaths) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get only the first path
|
||||
paths = append(paths, volumePaths[0])
|
||||
}
|
||||
|
||||
return paths, nil
|
||||
}
|
||||
|
||||
// GetVolumes returs the list of volumes in the system.
|
||||
// https://docs.microsoft.com/es-es/windows/desktop/api/fileapi/nf-fileapi-findfirstvolumew
|
||||
func GetVolumes() ([]string, error) {
|
||||
buffer := make([]uint16, MAX_PATH+1)
|
||||
|
||||
var volumes []string
|
||||
|
||||
h, err := _FindFirstVolume(&buffer[0], uint32(len(buffer)))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "FindFirstVolumeW failed")
|
||||
}
|
||||
defer _FindVolumeClose(h)
|
||||
|
||||
for {
|
||||
volumes = append(volumes, syscall.UTF16ToString(buffer))
|
||||
|
||||
err = _FindNextVolume(h, &buffer[0], uint32(len(buffer)))
|
||||
if err != nil {
|
||||
if errors.Cause(err) == syscall.ERROR_NO_MORE_FILES {
|
||||
break
|
||||
}
|
||||
return nil, errors.Wrap(err, "FindNextVolumeW failed")
|
||||
}
|
||||
}
|
||||
|
||||
return volumes, nil
|
||||
}
|
||||
|
||||
// GetVolumePathsForVolume returns the list of volume paths for a volume.
|
||||
// https://docs.microsoft.com/en-us/windows/desktop/api/FileAPI/nf-fileapi-getvolumepathnamesforvolumenamew
|
||||
func GetVolumePathsForVolume(volumeName string) ([]string, error) {
|
||||
var length uint32
|
||||
err := _GetVolumePathNamesForVolumeName(volumeName, nil, 0, &length)
|
||||
if errors.Cause(err) != syscall.ERROR_MORE_DATA {
|
||||
return nil, errors.Wrap(err, "GetVolumePathNamesForVolumeNameW failed to get needed buffer length")
|
||||
}
|
||||
if length == 0 {
|
||||
// Not mounted, no paths, that's ok
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
buffer := make([]uint16, length*(MAX_PATH+1))
|
||||
err = _GetVolumePathNamesForVolumeName(volumeName, &buffer[0], length, &length)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "GetVolumePathNamesForVolumeNameW failed")
|
||||
}
|
||||
|
||||
return UTF16SliceToStringSlice(buffer), nil
|
||||
}
|
||||
|
||||
// GlobalMemoryStatusEx retrieves information about the system's current usage
|
||||
// of both physical and virtual memory.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx
|
||||
func GlobalMemoryStatusEx() (MemoryStatusEx, error) {
|
||||
memoryStatusEx := MemoryStatusEx{length: sizeofMemoryStatusEx}
|
||||
err := _GlobalMemoryStatusEx(&memoryStatusEx)
|
||||
if err != nil {
|
||||
return MemoryStatusEx{}, errors.Wrap(err, "GlobalMemoryStatusEx failed")
|
||||
}
|
||||
|
||||
return memoryStatusEx, nil
|
||||
}
|
||||
|
||||
// GetProcessMemoryInfo retrieves information about the memory usage of the
|
||||
// specified process.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms683219(v=vs.85).aspx
|
||||
func GetProcessMemoryInfo(handle syscall.Handle) (ProcessMemoryCountersEx, error) {
|
||||
processMemoryCountersEx := ProcessMemoryCountersEx{cb: sizeofProcessMemoryCountersEx}
|
||||
err := _GetProcessMemoryInfo(handle, &processMemoryCountersEx, processMemoryCountersEx.cb)
|
||||
if err != nil {
|
||||
return ProcessMemoryCountersEx{}, errors.Wrap(err, "GetProcessMemoryInfo failed")
|
||||
}
|
||||
|
||||
return processMemoryCountersEx, nil
|
||||
}
|
||||
|
||||
// GetProcessImageFileName Retrieves the name of the executable file for the
|
||||
// specified process.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms683217(v=vs.85).aspx
|
||||
func GetProcessImageFileName(handle syscall.Handle) (string, error) {
|
||||
buffer := make([]uint16, MAX_PATH)
|
||||
_, err := _GetProcessImageFileName(handle, &buffer[0], uint32(len(buffer)))
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "GetProcessImageFileName failed")
|
||||
}
|
||||
|
||||
return syscall.UTF16ToString(buffer), nil
|
||||
}
|
||||
|
||||
// GetSystemTimes retrieves system timing information. On a multiprocessor
|
||||
// system, the values returned are the sum of the designated times across all
|
||||
// processors. The returned kernel time does not include the system idle time.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724400(v=vs.85).aspx
|
||||
func GetSystemTimes() (idle, kernel, user time.Duration, err error) {
|
||||
var idleTime, kernelTime, userTime syscall.Filetime
|
||||
err = _GetSystemTimes(&idleTime, &kernelTime, &userTime)
|
||||
if err != nil {
|
||||
return 0, 0, 0, errors.Wrap(err, "GetSystemTimes failed")
|
||||
}
|
||||
|
||||
idle = FiletimeToDuration(&idleTime)
|
||||
kernel = FiletimeToDuration(&kernelTime) // Kernel time includes idle time so we subtract it out.
|
||||
user = FiletimeToDuration(&userTime)
|
||||
|
||||
return idle, kernel - idle, user, nil
|
||||
}
|
||||
|
||||
// FiletimeToDuration converts a Filetime to a time.Duration. Do not use this
|
||||
// method to convert a Filetime to an actual clock time, for that use
|
||||
// Filetime.Nanosecond().
|
||||
func FiletimeToDuration(ft *syscall.Filetime) time.Duration {
|
||||
n := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime) // in 100-nanosecond intervals
|
||||
return time.Duration(n * 100)
|
||||
}
|
||||
|
||||
// GetDriveType Determines whether a disk drive is a removable, fixed, CD-ROM,
|
||||
// RAM disk, or network drive. A trailing backslash is required on the
|
||||
// rootPathName.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364939
|
||||
func GetDriveType(rootPathName string) (DriveType, error) {
|
||||
rootPathNamePtr, err := syscall.UTF16PtrFromString(rootPathName)
|
||||
if err != nil {
|
||||
return DRIVE_UNKNOWN, errors.Wrapf(err, "UTF16PtrFromString failed for rootPathName=%v", rootPathName)
|
||||
}
|
||||
|
||||
dt, err := _GetDriveType(rootPathNamePtr)
|
||||
if err != nil {
|
||||
return DRIVE_UNKNOWN, errors.Wrapf(err, "GetDriveType failed for rootPathName=%v", rootPathName)
|
||||
}
|
||||
|
||||
return dt, nil
|
||||
}
|
||||
|
||||
// EnumProcesses retrieves the process identifier for each process object in the
|
||||
// system. This function can return a max of 65536 PIDs. If there are more
|
||||
// processes than that then this will not return them all.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms682629(v=vs.85).aspx
|
||||
func EnumProcesses() ([]uint32, error) {
|
||||
enumProcesses := func(size int) ([]uint32, error) {
|
||||
var (
|
||||
pids = make([]uint32, size)
|
||||
sizeBytes = len(pids) * sizeofUint32
|
||||
bytesWritten uint32
|
||||
)
|
||||
|
||||
err := _EnumProcesses(&pids[0], uint32(sizeBytes), &bytesWritten)
|
||||
|
||||
pidsWritten := int(bytesWritten) / sizeofUint32
|
||||
if int(bytesWritten)%sizeofUint32 != 0 || pidsWritten > len(pids) {
|
||||
return nil, errors.Errorf("EnumProcesses returned an invalid bytesWritten value of %v", bytesWritten)
|
||||
}
|
||||
pids = pids[:pidsWritten]
|
||||
|
||||
return pids, err
|
||||
}
|
||||
|
||||
// Retry the EnumProcesses call with larger arrays if needed.
|
||||
size := 2048
|
||||
var pids []uint32
|
||||
for tries := 0; tries < 5; tries++ {
|
||||
var err error
|
||||
pids, err = enumProcesses(size)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "EnumProcesses failed")
|
||||
}
|
||||
|
||||
if len(pids) < size {
|
||||
break
|
||||
}
|
||||
|
||||
// Increase the size the pids array and retry the enumProcesses call
|
||||
// because the array wasn't large enough to hold all of the processes.
|
||||
size *= 2
|
||||
}
|
||||
|
||||
return pids, nil
|
||||
}
|
||||
|
||||
// GetDiskFreeSpaceEx retrieves information about the amount of space that is
|
||||
// available on a disk volume, which is the total amount of space, the total
|
||||
// amount of free space, and the total amount of free space available to the
|
||||
// user that is associated with the calling thread.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364937(v=vs.85).aspx
|
||||
func GetDiskFreeSpaceEx(directoryName string) (freeBytesAvailable, totalNumberOfBytes, totalNumberOfFreeBytes uint64, err error) {
|
||||
directoryNamePtr, err := syscall.UTF16PtrFromString(directoryName)
|
||||
if err != nil {
|
||||
return 0, 0, 0, errors.Wrapf(err, "UTF16PtrFromString failed for directoryName=%v", directoryName)
|
||||
}
|
||||
|
||||
err = _GetDiskFreeSpaceEx(directoryNamePtr, &freeBytesAvailable, &totalNumberOfBytes, &totalNumberOfFreeBytes)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
return freeBytesAvailable, totalNumberOfBytes, totalNumberOfFreeBytes, nil
|
||||
}
|
||||
|
||||
// CreateToolhelp32Snapshot takes a snapshot of the specified processes, as well
|
||||
// as the heaps, modules, and threads used by these processes.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms682489(v=vs.85).aspx
|
||||
func CreateToolhelp32Snapshot(flags, pid uint32) (syscall.Handle, error) {
|
||||
h, err := _CreateToolhelp32Snapshot(flags, pid)
|
||||
if err != nil {
|
||||
return syscall.InvalidHandle, err
|
||||
}
|
||||
if h == syscall.InvalidHandle {
|
||||
return syscall.InvalidHandle, syscall.GetLastError()
|
||||
}
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// Process32First retrieves information about the first process encountered in a
|
||||
// system snapshot.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms684834
|
||||
func Process32First(handle syscall.Handle) (ProcessEntry32, error) {
|
||||
processEntry32 := ProcessEntry32{size: sizeofProcessEntry32}
|
||||
err := _Process32First(handle, &processEntry32)
|
||||
if err != nil {
|
||||
return ProcessEntry32{}, errors.Wrap(err, "Process32First failed")
|
||||
}
|
||||
|
||||
return processEntry32, nil
|
||||
}
|
||||
|
||||
// Process32Next retrieves information about the next process recorded in a
|
||||
// system snapshot. When there are no more processes to iterate then
|
||||
// syscall.ERROR_NO_MORE_FILES is returned (use errors.Cause() to unwrap).
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms684836
|
||||
func Process32Next(handle syscall.Handle) (ProcessEntry32, error) {
|
||||
processEntry32 := ProcessEntry32{size: sizeofProcessEntry32}
|
||||
err := _Process32Next(handle, &processEntry32)
|
||||
if err != nil {
|
||||
return ProcessEntry32{}, errors.Wrap(err, "Process32Next failed")
|
||||
}
|
||||
|
||||
return processEntry32, nil
|
||||
}
|
||||
|
||||
// UTF16SliceToStringSlice converts slice of uint16 containing a list of UTF16
|
||||
// strings to a slice of strings.
|
||||
func UTF16SliceToStringSlice(buffer []uint16) []string {
|
||||
// Split the uint16 slice at null-terminators.
|
||||
var startIdx int
|
||||
var stringsUTF16 [][]uint16
|
||||
for i, value := range buffer {
|
||||
if value == 0 {
|
||||
stringsUTF16 = append(stringsUTF16, buffer[startIdx:i])
|
||||
startIdx = i + 1
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the utf16 slices to strings.
|
||||
result := make([]string, 0, len(stringsUTF16))
|
||||
for _, stringUTF16 := range stringsUTF16 {
|
||||
if len(stringUTF16) > 0 {
|
||||
result = append(result, syscall.UTF16ToString(stringUTF16))
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func GetUserProcessParams(handle syscall.Handle, pbi ProcessBasicInformation) (params RtlUserProcessParameters, err error) {
|
||||
const is32bitProc = unsafe.Sizeof(uintptr(0)) == 4
|
||||
|
||||
// Offset of params field within PEB structure.
|
||||
// This structure is different in 32 and 64 bit.
|
||||
paramsOffset := 0x20
|
||||
if is32bitProc {
|
||||
paramsOffset = 0x10
|
||||
}
|
||||
|
||||
// Read the PEB from the target process memory
|
||||
pebSize := paramsOffset + 8
|
||||
peb := make([]byte, pebSize)
|
||||
nRead, err := ReadProcessMemory(handle, pbi.PebBaseAddress, peb)
|
||||
if err != nil {
|
||||
return params, err
|
||||
}
|
||||
if nRead != uintptr(pebSize) {
|
||||
return params, errors.Errorf("PEB: short read (%d/%d)", nRead, pebSize)
|
||||
}
|
||||
|
||||
// Get the RTL_USER_PROCESS_PARAMETERS struct pointer from the PEB
|
||||
paramsAddr := *(*uintptr)(unsafe.Pointer(&peb[paramsOffset]))
|
||||
|
||||
// Read the RTL_USER_PROCESS_PARAMETERS from the target process memory
|
||||
paramsBuf := make([]byte, SizeOfRtlUserProcessParameters)
|
||||
nRead, err = ReadProcessMemory(handle, paramsAddr, paramsBuf)
|
||||
if err != nil {
|
||||
return params, err
|
||||
}
|
||||
if nRead != uintptr(SizeOfRtlUserProcessParameters) {
|
||||
return params, errors.Errorf("RTL_USER_PROCESS_PARAMETERS: short read (%d/%d)", nRead, SizeOfRtlUserProcessParameters)
|
||||
}
|
||||
|
||||
params = *(*RtlUserProcessParameters)(unsafe.Pointer(¶msBuf[0]))
|
||||
return params, nil
|
||||
}
|
||||
|
||||
// ReadProcessUnicodeString returns a zero-terminated UTF-16 string from another
|
||||
// process's memory.
|
||||
func ReadProcessUnicodeString(handle syscall.Handle, s *UnicodeString) ([]byte, error) {
|
||||
// Allocate an extra UTF-16 null character at the end in case the read string
|
||||
// is not terminated.
|
||||
extra := 2
|
||||
if s.Size&1 != 0 {
|
||||
extra = 3 // If size is odd, need 3 nulls to terminate.
|
||||
}
|
||||
buf := make([]byte, int(s.Size)+extra)
|
||||
nRead, err := ReadProcessMemory(handle, s.Buffer, buf[:s.Size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if nRead != uintptr(s.Size) {
|
||||
return nil, errors.Errorf("unicode string: short read: (%d/%d)", nRead, s.Size)
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// ByteSliceToStringSlice uses CommandLineToArgv API to split an UTF-16 command
|
||||
// line string into a list of parameters.
|
||||
func ByteSliceToStringSlice(utf16 []byte) ([]string, error) {
|
||||
n := len(utf16)
|
||||
// Discard odd byte
|
||||
if n&1 != 0 {
|
||||
n--
|
||||
utf16 = utf16[:n]
|
||||
}
|
||||
if n == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
terminated := false
|
||||
for i := 0; i < n && !terminated; i += 2 {
|
||||
terminated = utf16[i] == 0 && utf16[i+1] == 0
|
||||
}
|
||||
if !terminated {
|
||||
// Append a null uint16 at the end if terminator is missing
|
||||
utf16 = append(utf16, 0, 0)
|
||||
}
|
||||
var numArgs int32
|
||||
argsWide, err := syscall.CommandLineToArgv((*uint16)(unsafe.Pointer(&utf16[0])), &numArgs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Free memory allocated for CommandLineToArgvW arguments.
|
||||
defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(argsWide)))
|
||||
|
||||
args := make([]string, numArgs)
|
||||
for idx := range args {
|
||||
args[idx] = syscall.UTF16ToString(argsWide[idx][:])
|
||||
}
|
||||
return args, nil
|
||||
}
|
||||
|
||||
// ReadProcessMemory reads from another process memory. The Handle needs to have
|
||||
// the PROCESS_VM_READ right.
|
||||
// A zero-byte read is a no-op, no error is returned.
|
||||
func ReadProcessMemory(handle syscall.Handle, baseAddress uintptr, dest []byte) (numRead uintptr, err error) {
|
||||
n := len(dest)
|
||||
if n == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if err = _ReadProcessMemory(handle, baseAddress, uintptr(unsafe.Pointer(&dest[0])), uintptr(n), &numRead); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return numRead, nil
|
||||
}
|
||||
|
||||
func GetTickCount64() (uptime uint64, err error) {
|
||||
if uptime, err = _GetTickCount64(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uptime, nil
|
||||
}
|
||||
|
||||
// Use "GOOS=windows go generate -v -x ." to generate the source.
|
||||
|
||||
// Add -trace to enable debug prints around syscalls.
|
||||
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -systemdll=false -output zsyscall_windows.go syscall_windows.go
|
||||
|
||||
// Windows API calls
|
||||
//sys _GlobalMemoryStatusEx(buffer *MemoryStatusEx) (err error) = kernel32.GlobalMemoryStatusEx
|
||||
//sys _GetLogicalDriveStringsW(bufferLength uint32, buffer *uint16) (length uint32, err error) = kernel32.GetLogicalDriveStringsW
|
||||
//sys _GetProcessMemoryInfo(handle syscall.Handle, psmemCounters *ProcessMemoryCountersEx, cb uint32) (err error) = psapi.GetProcessMemoryInfo
|
||||
//sys _GetProcessImageFileName(handle syscall.Handle, outImageFileName *uint16, size uint32) (length uint32, err error) = psapi.GetProcessImageFileNameW
|
||||
//sys _GetSystemTimes(idleTime *syscall.Filetime, kernelTime *syscall.Filetime, userTime *syscall.Filetime) (err error) = kernel32.GetSystemTimes
|
||||
//sys _GetDriveType(rootPathName *uint16) (dt DriveType, err error) = kernel32.GetDriveTypeW
|
||||
//sys _EnumProcesses(processIds *uint32, sizeBytes uint32, bytesReturned *uint32) (err error) = psapi.EnumProcesses
|
||||
//sys _GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailable *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) = kernel32.GetDiskFreeSpaceExW
|
||||
//sys _Process32First(handle syscall.Handle, processEntry32 *ProcessEntry32) (err error) = kernel32.Process32FirstW
|
||||
//sys _Process32Next(handle syscall.Handle, processEntry32 *ProcessEntry32) (err error) = kernel32.Process32NextW
|
||||
//sys _CreateToolhelp32Snapshot(flags uint32, processID uint32) (handle syscall.Handle, err error) = kernel32.CreateToolhelp32Snapshot
|
||||
//sys _NtQuerySystemInformation(systemInformationClass uint32, systemInformation *byte, systemInformationLength uint32, returnLength *uint32) (ntstatus uint32, err error) = ntdll.NtQuerySystemInformation
|
||||
//sys _NtQueryInformationProcess(processHandle syscall.Handle, processInformationClass uint32, processInformation *byte, processInformationLength uint32, returnLength *uint32) (ntstatus uint32, err error) = ntdll.NtQueryInformationProcess
|
||||
//sys _LookupPrivilegeName(systemName string, luid *int64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW
|
||||
//sys _LookupPrivilegeValue(systemName string, name string, luid *int64) (err error) = advapi32.LookupPrivilegeValueW
|
||||
//sys _AdjustTokenPrivileges(token syscall.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges
|
||||
//sys _FindFirstVolume(volumeName *uint16, size uint32) (handle syscall.Handle, err error) = kernel32.FindFirstVolumeW
|
||||
//sys _FindNextVolume(handle syscall.Handle, volumeName *uint16, size uint32) (err error) = kernel32.FindNextVolumeW
|
||||
//sys _FindVolumeClose(handle syscall.Handle) (err error) = kernel32.FindVolumeClose
|
||||
//sys _GetVolumePathNamesForVolumeName(volumeName string, buffer *uint16, bufferSize uint32, length *uint32) (err error) = kernel32.GetVolumePathNamesForVolumeNameW
|
||||
//sys _ReadProcessMemory(handle syscall.Handle, baseAddress uintptr, buffer uintptr, size uintptr, numRead *uintptr) (err error) = kernel32.ReadProcessMemory
|
||||
//sys _GetTickCount64() (uptime uint64, err error) = kernel32.GetTickCount64
|
43
vendor/github.com/elastic/gosigar/sys/windows/version.go
generated
vendored
43
vendor/github.com/elastic/gosigar/sys/windows/version.go
generated
vendored
@ -1,43 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package windows
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// Version identifies a Windows version by major, minor, and build number.
|
||||
type Version struct {
|
||||
Major int
|
||||
Minor int
|
||||
Build int
|
||||
}
|
||||
|
||||
// GetWindowsVersion returns the Windows version information. Applications not
|
||||
// manifested for Windows 8.1 or Windows 10 will return the Windows 8 OS version
|
||||
// value (6.2).
|
||||
//
|
||||
// For a table of version numbers see:
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx
|
||||
func GetWindowsVersion() Version {
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx
|
||||
ver, err := syscall.GetVersion()
|
||||
if err != nil {
|
||||
// GetVersion should never return an error.
|
||||
panic(fmt.Errorf("GetVersion failed: %v", err))
|
||||
}
|
||||
|
||||
return Version{
|
||||
Major: int(ver & 0xFF),
|
||||
Minor: int(ver >> 8 & 0xFF),
|
||||
Build: int(ver >> 16),
|
||||
}
|
||||
}
|
||||
|
||||
// IsWindowsVistaOrGreater returns true if the Windows version is Vista or
|
||||
// greater.
|
||||
func (v Version) IsWindowsVistaOrGreater() bool {
|
||||
// Vista is 6.0.
|
||||
return v.Major >= 6 && v.Minor >= 0
|
||||
}
|
376
vendor/github.com/elastic/gosigar/sys/windows/zsyscall_windows.go
generated
vendored
376
vendor/github.com/elastic/gosigar/sys/windows/zsyscall_windows.go
generated
vendored
@ -1,376 +0,0 @@
|
||||
// Code generated by 'go generate'; DO NOT EDIT.
|
||||
|
||||
package windows
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var _ unsafe.Pointer
|
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const (
|
||||
errnoERROR_IO_PENDING = 997
|
||||
)
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e syscall.Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return nil
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
modpsapi = syscall.NewLazyDLL("psapi.dll")
|
||||
modntdll = syscall.NewLazyDLL("ntdll.dll")
|
||||
modadvapi32 = syscall.NewLazyDLL("advapi32.dll")
|
||||
|
||||
procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx")
|
||||
procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW")
|
||||
procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo")
|
||||
procGetProcessImageFileNameW = modpsapi.NewProc("GetProcessImageFileNameW")
|
||||
procGetSystemTimes = modkernel32.NewProc("GetSystemTimes")
|
||||
procGetDriveTypeW = modkernel32.NewProc("GetDriveTypeW")
|
||||
procEnumProcesses = modpsapi.NewProc("EnumProcesses")
|
||||
procGetDiskFreeSpaceExW = modkernel32.NewProc("GetDiskFreeSpaceExW")
|
||||
procProcess32FirstW = modkernel32.NewProc("Process32FirstW")
|
||||
procProcess32NextW = modkernel32.NewProc("Process32NextW")
|
||||
procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot")
|
||||
procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation")
|
||||
procNtQueryInformationProcess = modntdll.NewProc("NtQueryInformationProcess")
|
||||
procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
|
||||
procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
|
||||
procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
|
||||
procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW")
|
||||
procFindNextVolumeW = modkernel32.NewProc("FindNextVolumeW")
|
||||
procFindVolumeClose = modkernel32.NewProc("FindVolumeClose")
|
||||
procGetVolumePathNamesForVolumeNameW = modkernel32.NewProc("GetVolumePathNamesForVolumeNameW")
|
||||
procReadProcessMemory = modkernel32.NewProc("ReadProcessMemory")
|
||||
procGetTickCount64 = modkernel32.NewProc("GetTickCount64")
|
||||
)
|
||||
|
||||
func _GlobalMemoryStatusEx(buffer *MemoryStatusEx) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procGlobalMemoryStatusEx.Addr(), 1, uintptr(unsafe.Pointer(buffer)), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func _GetLogicalDriveStringsW(bufferLength uint32, buffer *uint16) (length uint32, err error) {
|
||||
r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0)
|
||||
length = uint32(r0)
|
||||
if length == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func _GetProcessMemoryInfo(handle syscall.Handle, psmemCounters *ProcessMemoryCountersEx, cb uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procGetProcessMemoryInfo.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(psmemCounters)), uintptr(cb))
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func _GetProcessImageFileName(handle syscall.Handle, outImageFileName *uint16, size uint32) (length uint32, err error) {
|
||||
r0, _, e1 := syscall.Syscall(procGetProcessImageFileNameW.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(outImageFileName)), uintptr(size))
|
||||
length = uint32(r0)
|
||||
if length == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func _GetSystemTimes(idleTime *syscall.Filetime, kernelTime *syscall.Filetime, userTime *syscall.Filetime) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procGetSystemTimes.Addr(), 3, uintptr(unsafe.Pointer(idleTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)))
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func _GetDriveType(rootPathName *uint16) (dt DriveType, err error) {
|
||||
r0, _, e1 := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0)
|
||||
dt = DriveType(r0)
|
||||
if dt == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func _EnumProcesses(processIds *uint32, sizeBytes uint32, bytesReturned *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(processIds)), uintptr(sizeBytes), uintptr(unsafe.Pointer(bytesReturned)))
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func _GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailable *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailable)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func _Process32First(handle syscall.Handle, processEntry32 *ProcessEntry32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(processEntry32)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func _Process32Next(handle syscall.Handle, processEntry32 *ProcessEntry32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(processEntry32)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func _CreateToolhelp32Snapshot(flags uint32, processID uint32) (handle syscall.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processID), 0)
|
||||
handle = syscall.Handle(r0)
|
||||
if handle == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func _NtQuerySystemInformation(systemInformationClass uint32, systemInformation *byte, systemInformationLength uint32, returnLength *uint32) (ntstatus uint32, err error) {
|
||||
r0, _, e1 := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInformationClass), uintptr(unsafe.Pointer(systemInformation)), uintptr(systemInformationLength), uintptr(unsafe.Pointer(returnLength)), 0, 0)
|
||||
ntstatus = uint32(r0)
|
||||
if ntstatus == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func _NtQueryInformationProcess(processHandle syscall.Handle, processInformationClass uint32, processInformation *byte, processInformationLength uint32, returnLength *uint32) (ntstatus uint32, err error) {
|
||||
r0, _, e1 := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(processHandle), uintptr(processInformationClass), uintptr(unsafe.Pointer(processInformation)), uintptr(processInformationLength), uintptr(unsafe.Pointer(returnLength)), 0)
|
||||
ntstatus = uint32(r0)
|
||||
if ntstatus == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func _LookupPrivilegeName(systemName string, luid *int64, buffer *uint16, size *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(systemName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return __LookupPrivilegeName(_p0, luid, buffer, size)
|
||||
}
|
||||
|
||||
func __LookupPrivilegeName(systemName *uint16, luid *int64, buffer *uint16, size *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func _LookupPrivilegeValue(systemName string, name string, luid *int64) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(systemName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *uint16
|
||||
_p1, err = syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return __LookupPrivilegeValue(_p0, _p1, luid)
|
||||
}
|
||||
|
||||
func __LookupPrivilegeValue(systemName *uint16, name *uint16, luid *int64) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func _AdjustTokenPrivileges(token syscall.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
|
||||
var _p0 uint32
|
||||
if releaseAll {
|
||||
_p0 = 1
|
||||
} else {
|
||||
_p0 = 0
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize)))
|
||||
success = r0 != 0
|
||||
if true {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func _FindFirstVolume(volumeName *uint16, size uint32) (handle syscall.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(size), 0)
|
||||
handle = syscall.Handle(r0)
|
||||
if handle == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func _FindNextVolume(handle syscall.Handle, volumeName *uint16, size uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(volumeName)), uintptr(size))
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func _FindVolumeClose(handle syscall.Handle) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(handle), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func _GetVolumePathNamesForVolumeName(volumeName string, buffer *uint16, bufferSize uint32, length *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(volumeName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return __GetVolumePathNamesForVolumeName(_p0, buffer, bufferSize, length)
|
||||
}
|
||||
|
||||
func __GetVolumePathNamesForVolumeName(volumeName *uint16, buffer *uint16, bufferSize uint32, length *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferSize), uintptr(unsafe.Pointer(length)), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func _ReadProcessMemory(handle syscall.Handle, baseAddress uintptr, buffer uintptr, size uintptr, numRead *uintptr) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procReadProcessMemory.Addr(), 5, uintptr(handle), uintptr(baseAddress), uintptr(buffer), uintptr(size), uintptr(unsafe.Pointer(numRead)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func _GetTickCount64() (uptime uint64, err error) {
|
||||
r0, _, e1 := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0)
|
||||
uptime = uint64(r0)
|
||||
if uptime == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
1
vendor/github.com/ethereum/go-ethereum/.gitignore
generated
vendored
1
vendor/github.com/ethereum/go-ethereum/.gitignore
generated
vendored
@ -24,6 +24,7 @@ build/_vendor/pkg
|
||||
|
||||
# used by the Makefile
|
||||
/build/_workspace/
|
||||
/build/cache/
|
||||
/build/bin/
|
||||
/geth*.zip
|
||||
|
||||
|
50
vendor/github.com/ethereum/go-ethereum/.golangci.yml
generated
vendored
Normal file
50
vendor/github.com/ethereum/go-ethereum/.golangci.yml
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
# This file configures github.com/golangci/golangci-lint.
|
||||
|
||||
run:
|
||||
timeout: 3m
|
||||
tests: true
|
||||
# default is true. Enables skipping of directories:
|
||||
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
||||
skip-dirs-use-default: true
|
||||
skip-files:
|
||||
- core/genesis_alloc.go
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- deadcode
|
||||
- goconst
|
||||
- goimports
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
# - staticcheck
|
||||
- unconvert
|
||||
# - unused
|
||||
- varcheck
|
||||
|
||||
linters-settings:
|
||||
gofmt:
|
||||
simplify: true
|
||||
goconst:
|
||||
min-len: 3 # minimum length of string constant
|
||||
min-occurrences: 6 # minimum number of occurrences
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
- path: crypto/blake2b/
|
||||
linters:
|
||||
- deadcode
|
||||
- path: crypto/bn256/cloudflare
|
||||
linters:
|
||||
- deadcode
|
||||
- path: p2p/discv5/
|
||||
linters:
|
||||
- deadcode
|
||||
- path: core/vm/instructions_test.go
|
||||
linters:
|
||||
- goconst
|
||||
- path: cmd/faucet/
|
||||
linters:
|
||||
- deadcode
|
202
vendor/github.com/ethereum/go-ethereum/.travis.yml
generated
vendored
202
vendor/github.com/ethereum/go-ethereum/.travis.yml
generated
vendored
@ -2,12 +2,21 @@ language: go
|
||||
go_import_path: github.com/ethereum/go-ethereum
|
||||
sudo: false
|
||||
jobs:
|
||||
allow_failures:
|
||||
- stage: build
|
||||
os: osx
|
||||
go: 1.15.x
|
||||
env:
|
||||
- azure-osx
|
||||
- azure-ios
|
||||
- cocoapods-ios
|
||||
|
||||
include:
|
||||
# This builder only tests code linters on latest version of Go
|
||||
# This builder only tests code linters on latest version of Go
|
||||
- stage: lint
|
||||
os: linux
|
||||
dist: xenial
|
||||
go: 1.12.x
|
||||
dist: bionic
|
||||
go: 1.16.x
|
||||
env:
|
||||
- lint
|
||||
git:
|
||||
@ -15,55 +24,51 @@ jobs:
|
||||
script:
|
||||
- go run build/ci.go lint
|
||||
|
||||
# These builders create the Docker sub-images for multi-arch push and each
|
||||
# will attempt to push the multi-arch image if they are the last builder
|
||||
- stage: build
|
||||
if: type = push
|
||||
os: linux
|
||||
dist: xenial
|
||||
go: 1.11.x
|
||||
arch: amd64
|
||||
dist: bionic
|
||||
go: 1.16.x
|
||||
env:
|
||||
- docker
|
||||
services:
|
||||
- docker
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
before_install:
|
||||
- export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
script:
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
- go run build/ci.go docker -image -manifest amd64,arm64 -upload karalabe/geth-docker-test
|
||||
|
||||
- stage: build
|
||||
if: type = push
|
||||
os: linux
|
||||
dist: xenial
|
||||
go: 1.12.x
|
||||
arch: arm64
|
||||
dist: bionic
|
||||
go: 1.16.x
|
||||
env:
|
||||
- docker
|
||||
services:
|
||||
- docker
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
before_install:
|
||||
- export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
script:
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
|
||||
# These are the latest Go versions.
|
||||
- stage: build
|
||||
os: linux
|
||||
dist: xenial
|
||||
go: 1.13.x
|
||||
script:
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
|
||||
- stage: build
|
||||
os: osx
|
||||
go: 1.13.x
|
||||
script:
|
||||
- echo "Increase the maximum number of open file descriptors on macOS"
|
||||
- NOFILE=20480
|
||||
- sudo sysctl -w kern.maxfiles=$NOFILE
|
||||
- sudo sysctl -w kern.maxfilesperproc=$NOFILE
|
||||
- sudo launchctl limit maxfiles $NOFILE $NOFILE
|
||||
- sudo launchctl limit maxfiles
|
||||
- ulimit -S -n $NOFILE
|
||||
- ulimit -n
|
||||
- unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
- go run build/ci.go docker -image -manifest amd64,arm64 -upload karalabe/geth-docker-test
|
||||
|
||||
# This builder does the Ubuntu PPA upload
|
||||
- stage: build
|
||||
if: type = push
|
||||
os: linux
|
||||
dist: xenial
|
||||
go: 1.13.x
|
||||
dist: bionic
|
||||
go: 1.16.x
|
||||
env:
|
||||
- ubuntu-ppa
|
||||
- GO111MODULE=on
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
addons:
|
||||
@ -83,11 +88,12 @@ jobs:
|
||||
- stage: build
|
||||
if: type = push
|
||||
os: linux
|
||||
dist: xenial
|
||||
dist: bionic
|
||||
sudo: required
|
||||
go: 1.13.x
|
||||
go: 1.16.x
|
||||
env:
|
||||
- azure-linux
|
||||
- GO111MODULE=on
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
addons:
|
||||
@ -96,105 +102,107 @@ jobs:
|
||||
- gcc-multilib
|
||||
script:
|
||||
# Build for the primary platforms that Trusty can manage
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go archive -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
- go run build/ci.go install -arch 386
|
||||
- go run build/ci.go archive -arch 386 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
- go run build/ci.go install -dlgo
|
||||
- go run build/ci.go archive -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
|
||||
- go run build/ci.go install -dlgo -arch 386
|
||||
- go run build/ci.go archive -arch 386 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
|
||||
|
||||
# Switch over GCC to cross compilation (breaks 386, hence why do it here only)
|
||||
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install gcc-arm-linux-gnueabi libc6-dev-armel-cross gcc-arm-linux-gnueabihf libc6-dev-armhf-cross gcc-aarch64-linux-gnu libc6-dev-arm64-cross
|
||||
- sudo ln -s /usr/include/asm-generic /usr/include/asm
|
||||
|
||||
- GOARM=5 go run build/ci.go install -arch arm -cc arm-linux-gnueabi-gcc
|
||||
- GOARM=5 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
- GOARM=6 go run build/ci.go install -arch arm -cc arm-linux-gnueabi-gcc
|
||||
- GOARM=6 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
- GOARM=7 go run build/ci.go install -arch arm -cc arm-linux-gnueabihf-gcc
|
||||
- GOARM=7 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
- go run build/ci.go install -arch arm64 -cc aarch64-linux-gnu-gcc
|
||||
- go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
- GOARM=5 go run build/ci.go install -dlgo -arch arm -cc arm-linux-gnueabi-gcc
|
||||
- GOARM=5 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
|
||||
- GOARM=6 go run build/ci.go install -dlgo -arch arm -cc arm-linux-gnueabi-gcc
|
||||
- GOARM=6 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
|
||||
- GOARM=7 go run build/ci.go install -dlgo -arch arm -cc arm-linux-gnueabihf-gcc
|
||||
- GOARM=7 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
|
||||
- go run build/ci.go install -dlgo -arch arm64 -cc aarch64-linux-gnu-gcc
|
||||
- go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
|
||||
|
||||
# This builder does the Linux Azure MIPS xgo uploads
|
||||
- stage: build
|
||||
if: type = push
|
||||
os: linux
|
||||
dist: xenial
|
||||
dist: bionic
|
||||
services:
|
||||
- docker
|
||||
go: 1.13.x
|
||||
go: 1.16.x
|
||||
env:
|
||||
- azure-linux-mips
|
||||
- GO111MODULE=on
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
script:
|
||||
- go run build/ci.go xgo --alltools -- --targets=linux/mips --ldflags '-extldflags "-static"' -v
|
||||
- for bin in build/bin/*-linux-mips; do mv -f "${bin}" "${bin/-linux-mips/}"; done
|
||||
- go run build/ci.go archive -arch mips -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
- go run build/ci.go archive -arch mips -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
|
||||
|
||||
- go run build/ci.go xgo --alltools -- --targets=linux/mipsle --ldflags '-extldflags "-static"' -v
|
||||
- for bin in build/bin/*-linux-mipsle; do mv -f "${bin}" "${bin/-linux-mipsle/}"; done
|
||||
- go run build/ci.go archive -arch mipsle -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
- go run build/ci.go archive -arch mipsle -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
|
||||
|
||||
- go run build/ci.go xgo --alltools -- --targets=linux/mips64 --ldflags '-extldflags "-static"' -v
|
||||
- for bin in build/bin/*-linux-mips64; do mv -f "${bin}" "${bin/-linux-mips64/}"; done
|
||||
- go run build/ci.go archive -arch mips64 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
- go run build/ci.go archive -arch mips64 -type tar -signer LINUX_SIGNING_KEY signify SIGNIFY_KEY -upload gethstore/builds
|
||||
|
||||
- go run build/ci.go xgo --alltools -- --targets=linux/mips64le --ldflags '-extldflags "-static"' -v
|
||||
- for bin in build/bin/*-linux-mips64le; do mv -f "${bin}" "${bin/-linux-mips64le/}"; done
|
||||
- go run build/ci.go archive -arch mips64le -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
- go run build/ci.go archive -arch mips64le -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
|
||||
|
||||
# This builder does the Android Maven and Azure uploads
|
||||
- stage: build
|
||||
if: type = push
|
||||
os: linux
|
||||
dist: xenial
|
||||
dist: bionic
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- oracle-java8-installer
|
||||
- oracle-java8-set-default
|
||||
language: android
|
||||
android:
|
||||
components:
|
||||
- platform-tools
|
||||
- tools
|
||||
- android-15
|
||||
- android-19
|
||||
- android-24
|
||||
- openjdk-8-jdk
|
||||
env:
|
||||
- azure-android
|
||||
- maven-android
|
||||
- GO111MODULE=on
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
before_install:
|
||||
- curl https://dl.google.com/go/go1.13.linux-amd64.tar.gz | tar -xz
|
||||
# Install Android and it's dependencies manually, Travis is stale
|
||||
- export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
|
||||
- curl https://dl.google.com/android/repository/commandlinetools-linux-6858069_latest.zip -o android.zip
|
||||
- unzip -q android.zip -d $HOME/sdk && rm android.zip
|
||||
- mv $HOME/sdk/cmdline-tools $HOME/sdk/latest && mkdir $HOME/sdk/cmdline-tools && mv $HOME/sdk/latest $HOME/sdk/cmdline-tools
|
||||
- export PATH=$PATH:$HOME/sdk/cmdline-tools/latest/bin
|
||||
- export ANDROID_HOME=$HOME/sdk
|
||||
|
||||
- yes | sdkmanager --licenses >/dev/null
|
||||
- sdkmanager "platform-tools" "platforms;android-15" "platforms;android-19" "platforms;android-24" "ndk-bundle"
|
||||
|
||||
# Install Go to allow building with
|
||||
- curl https://dl.google.com/go/go1.16.linux-amd64.tar.gz | tar -xz
|
||||
- export PATH=`pwd`/go/bin:$PATH
|
||||
- export GOROOT=`pwd`/go
|
||||
- export GOPATH=$HOME/go
|
||||
script:
|
||||
# Build the Android archive and upload it to Maven Central and Azure
|
||||
- curl https://dl.google.com/android/repository/android-ndk-r19b-linux-x86_64.zip -o android-ndk-r19b.zip
|
||||
- unzip -q android-ndk-r19b.zip && rm android-ndk-r19b.zip
|
||||
- mv android-ndk-r19b $ANDROID_HOME/ndk-bundle
|
||||
|
||||
- mkdir -p $GOPATH/src/github.com/ethereum
|
||||
- ln -s `pwd` $GOPATH/src/github.com/ethereum/go-ethereum
|
||||
- go run build/ci.go aar -signer ANDROID_SIGNING_KEY -deploy https://oss.sonatype.org -upload gethstore/builds
|
||||
- go run build/ci.go aar -signer ANDROID_SIGNING_KEY -signify SIGNIFY_KEY -deploy https://oss.sonatype.org -upload gethstore/builds
|
||||
|
||||
# This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads
|
||||
- stage: build
|
||||
if: type = push
|
||||
os: osx
|
||||
go: 1.13.x
|
||||
go: 1.16.x
|
||||
env:
|
||||
- azure-osx
|
||||
- azure-ios
|
||||
- cocoapods-ios
|
||||
- GO111MODULE=on
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
script:
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -upload gethstore/builds
|
||||
- go run build/ci.go install -dlgo
|
||||
- go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
|
||||
|
||||
# Build the iOS framework and upload it to CocoaPods and Azure
|
||||
- gem uninstall cocoapods -a -x
|
||||
@ -209,16 +217,48 @@ jobs:
|
||||
|
||||
# Workaround for https://github.com/golang/go/issues/23749
|
||||
- export CGO_CFLAGS_ALLOW='-fmodules|-fblocks|-fobjc-arc'
|
||||
- go run build/ci.go xcode -signer IOS_SIGNING_KEY -deploy trunk -upload gethstore/builds
|
||||
- go run build/ci.go xcode -signer IOS_SIGNING_KEY -signify SIGNIFY_KEY -deploy trunk -upload gethstore/builds
|
||||
|
||||
# These builders run the tests
|
||||
- stage: build
|
||||
os: linux
|
||||
arch: amd64
|
||||
dist: bionic
|
||||
go: 1.16.x
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
script:
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
|
||||
- stage: build
|
||||
if: type = pull_request
|
||||
os: linux
|
||||
arch: arm64
|
||||
dist: bionic
|
||||
go: 1.16.x
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
script:
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
|
||||
- stage: build
|
||||
os: linux
|
||||
dist: bionic
|
||||
go: 1.15.x
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
script:
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
|
||||
# This builder does the Azure archive purges to avoid accumulating junk
|
||||
- stage: build
|
||||
if: type = cron
|
||||
os: linux
|
||||
dist: xenial
|
||||
go: 1.13.x
|
||||
dist: bionic
|
||||
go: 1.16.x
|
||||
env:
|
||||
- azure-purge
|
||||
- GO111MODULE=on
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
script:
|
||||
|
59
vendor/github.com/ethereum/go-ethereum/COPYING
generated
vendored
59
vendor/github.com/ethereum/go-ethereum/COPYING
generated
vendored
@ -1,7 +1,7 @@
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2014 The go-ethereum Authors.
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
@ -616,4 +616,59 @@ above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
||||
|
16
vendor/github.com/ethereum/go-ethereum/Dockerfile
generated
vendored
16
vendor/github.com/ethereum/go-ethereum/Dockerfile
generated
vendored
@ -1,5 +1,10 @@
|
||||
# Support setting various labels on the final image
|
||||
ARG COMMIT=""
|
||||
ARG VERSION=""
|
||||
ARG BUILDNUM=""
|
||||
|
||||
# Build Geth in a stock Go builder container
|
||||
FROM golang:1.13-alpine as builder
|
||||
FROM golang:1.16-alpine as builder
|
||||
|
||||
RUN apk add --no-cache make gcc musl-dev linux-headers git
|
||||
|
||||
@ -12,5 +17,12 @@ FROM alpine:latest
|
||||
RUN apk add --no-cache ca-certificates
|
||||
COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/
|
||||
|
||||
EXPOSE 8545 8546 8547 30303 30303/udp
|
||||
EXPOSE 8545 8546 30303 30303/udp
|
||||
ENTRYPOINT ["geth"]
|
||||
|
||||
# Add some metadata labels to help programatic image consumption
|
||||
ARG COMMIT=""
|
||||
ARG VERSION=""
|
||||
ARG BUILDNUM=""
|
||||
|
||||
LABEL commit="$COMMIT" version="$VERSION" buildnum="$BUILDNUM"
|
||||
|
16
vendor/github.com/ethereum/go-ethereum/Dockerfile.alltools
generated
vendored
16
vendor/github.com/ethereum/go-ethereum/Dockerfile.alltools
generated
vendored
@ -1,5 +1,10 @@
|
||||
# Support setting various labels on the final image
|
||||
ARG COMMIT=""
|
||||
ARG VERSION=""
|
||||
ARG BUILDNUM=""
|
||||
|
||||
# Build Geth in a stock Go builder container
|
||||
FROM golang:1.13-alpine as builder
|
||||
FROM golang:1.16-alpine as builder
|
||||
|
||||
RUN apk add --no-cache make gcc musl-dev linux-headers git
|
||||
|
||||
@ -12,4 +17,11 @@ FROM alpine:latest
|
||||
RUN apk add --no-cache ca-certificates
|
||||
COPY --from=builder /go-ethereum/build/bin/* /usr/local/bin/
|
||||
|
||||
EXPOSE 8545 8546 8547 30303 30303/udp
|
||||
EXPOSE 8545 8546 30303 30303/udp
|
||||
|
||||
# Add some metadata labels to help programatic image consumption
|
||||
ARG COMMIT=""
|
||||
ARG VERSION=""
|
||||
ARG BUILDNUM=""
|
||||
|
||||
LABEL commit="$COMMIT" version="$VERSION" buildnum="$BUILDNUM"
|
||||
|
54
vendor/github.com/ethereum/go-ethereum/Makefile
generated
vendored
54
vendor/github.com/ethereum/go-ethereum/Makefile
generated
vendored
@ -10,45 +10,47 @@
|
||||
|
||||
GOBIN = ./build/bin
|
||||
GO ?= latest
|
||||
GORUN = env GO111MODULE=on go run
|
||||
|
||||
geth:
|
||||
build/env.sh go run build/ci.go install ./cmd/geth
|
||||
$(GORUN) build/ci.go install ./cmd/geth
|
||||
@echo "Done building."
|
||||
@echo "Run \"$(GOBIN)/geth\" to launch geth."
|
||||
|
||||
all:
|
||||
build/env.sh go run build/ci.go install
|
||||
$(GORUN) build/ci.go install
|
||||
|
||||
android:
|
||||
build/env.sh go run build/ci.go aar --local
|
||||
$(GORUN) build/ci.go aar --local
|
||||
@echo "Done building."
|
||||
@echo "Import \"$(GOBIN)/geth.aar\" to use the library."
|
||||
@echo "Import \"$(GOBIN)/geth-sources.jar\" to add javadocs"
|
||||
@echo "For more info see https://stackoverflow.com/questions/20994336/android-studio-how-to-attach-javadoc"
|
||||
|
||||
ios:
|
||||
build/env.sh go run build/ci.go xcode --local
|
||||
$(GORUN) build/ci.go xcode --local
|
||||
@echo "Done building."
|
||||
@echo "Import \"$(GOBIN)/Geth.framework\" to use the library."
|
||||
|
||||
test: all
|
||||
build/env.sh go run build/ci.go test
|
||||
$(GORUN) build/ci.go test
|
||||
|
||||
lint: ## Run linters.
|
||||
build/env.sh go run build/ci.go lint
|
||||
$(GORUN) build/ci.go lint
|
||||
|
||||
clean:
|
||||
./build/clean_go_build_cache.sh
|
||||
env GO111MODULE=on go clean -cache
|
||||
rm -fr build/_workspace/pkg/ $(GOBIN)/*
|
||||
|
||||
# The devtools target installs tools required for 'go generate'.
|
||||
# You need to put $GOBIN (or $GOPATH/bin) in your PATH to use 'go generate'.
|
||||
|
||||
devtools:
|
||||
env GOBIN= go get -u golang.org/x/tools/cmd/stringer
|
||||
env GOBIN= go get -u github.com/kevinburke/go-bindata/go-bindata
|
||||
env GOBIN= go get -u github.com/fjl/gencodec
|
||||
env GOBIN= go get -u github.com/golang/protobuf/protoc-gen-go
|
||||
env GOBIN= go install golang.org/x/tools/cmd/stringer@latest
|
||||
env GOBIN= go install github.com/kevinburke/go-bindata/go-bindata@latest
|
||||
env GOBIN= go install github.com/fjl/gencodec@latest
|
||||
env GOBIN= go install github.com/golang/protobuf/protoc-gen-go@latest
|
||||
env GOBIN= go install ./cmd/abigen
|
||||
@type "npm" 2> /dev/null || echo 'Please install node.js and npm'
|
||||
@type "solc" 2> /dev/null || echo 'Please install solc'
|
||||
@type "protoc" 2> /dev/null || echo 'Please install protoc'
|
||||
|
||||
@ -63,12 +65,12 @@ geth-linux: geth-linux-386 geth-linux-amd64 geth-linux-arm geth-linux-mips64 get
|
||||
@ls -ld $(GOBIN)/geth-linux-*
|
||||
|
||||
geth-linux-386:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/386 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/386 -v ./cmd/geth
|
||||
@echo "Linux 386 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep 386
|
||||
|
||||
geth-linux-amd64:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/amd64 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/amd64 -v ./cmd/geth
|
||||
@echo "Linux amd64 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep amd64
|
||||
|
||||
@ -77,42 +79,42 @@ geth-linux-arm: geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-ar
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm
|
||||
|
||||
geth-linux-arm-5:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm-5 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-5 -v ./cmd/geth
|
||||
@echo "Linux ARMv5 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm-5
|
||||
|
||||
geth-linux-arm-6:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm-6 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-6 -v ./cmd/geth
|
||||
@echo "Linux ARMv6 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm-6
|
||||
|
||||
geth-linux-arm-7:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm-7 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-7 -v ./cmd/geth
|
||||
@echo "Linux ARMv7 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm-7
|
||||
|
||||
geth-linux-arm64:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm64 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm64 -v ./cmd/geth
|
||||
@echo "Linux ARM64 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep arm64
|
||||
|
||||
geth-linux-mips:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mips --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||
@echo "Linux MIPS cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep mips
|
||||
|
||||
geth-linux-mipsle:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mipsle --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mipsle --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||
@echo "Linux MIPSle cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep mipsle
|
||||
|
||||
geth-linux-mips64:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mips64 --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64 --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||
@echo "Linux MIPS64 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep mips64
|
||||
|
||||
geth-linux-mips64le:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mips64le --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64le --ldflags '-extldflags "-static"' -v ./cmd/geth
|
||||
@echo "Linux MIPS64le cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-linux-* | grep mips64le
|
||||
|
||||
@ -121,12 +123,12 @@ geth-darwin: geth-darwin-386 geth-darwin-amd64
|
||||
@ls -ld $(GOBIN)/geth-darwin-*
|
||||
|
||||
geth-darwin-386:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=darwin/386 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/386 -v ./cmd/geth
|
||||
@echo "Darwin 386 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-darwin-* | grep 386
|
||||
|
||||
geth-darwin-amd64:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=darwin/amd64 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/amd64 -v ./cmd/geth
|
||||
@echo "Darwin amd64 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-darwin-* | grep amd64
|
||||
|
||||
@ -135,11 +137,11 @@ geth-windows: geth-windows-386 geth-windows-amd64
|
||||
@ls -ld $(GOBIN)/geth-windows-*
|
||||
|
||||
geth-windows-386:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=windows/386 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/386 -v ./cmd/geth
|
||||
@echo "Windows 386 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-windows-* | grep 386
|
||||
|
||||
geth-windows-amd64:
|
||||
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=windows/amd64 -v ./cmd/geth
|
||||
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/amd64 -v ./cmd/geth
|
||||
@echo "Windows amd64 cross compilation done:"
|
||||
@ls -ld $(GOBIN)/geth-windows-* | grep amd64
|
||||
|
112
vendor/github.com/ethereum/go-ethereum/README.md
generated
vendored
112
vendor/github.com/ethereum/go-ethereum/README.md
generated
vendored
@ -4,9 +4,9 @@ Official Golang implementation of the Ethereum protocol.
|
||||
|
||||
[](https://godoc.org/github.com/ethereum/go-ethereum)
|
||||
)](https://pkg.go.dev/github.com/ethereum/go-ethereum?tab=doc)
|
||||
[](https://goreportcard.com/report/github.com/ethereum/go-ethereum)
|
||||
[](https://travis-ci.org/ethereum/go-ethereum)
|
||||
[](https://travis-ci.com/ethereum/go-ethereum)
|
||||
[](https://discord.gg/nthXNEv)
|
||||
|
||||
Automated builds are available for stable releases and the unstable master branch. Binary
|
||||
@ -14,9 +14,9 @@ archives are published at https://geth.ethereum.org/downloads/.
|
||||
|
||||
## Building the source
|
||||
|
||||
For prerequisites and detailed build instructions please read the [Installation Instructions](https://github.com/ethereum/go-ethereum/wiki/Building-Ethereum) on the wiki.
|
||||
For prerequisites and detailed build instructions please read the [Installation Instructions](https://geth.ethereum.org/docs/install-and-build/installing-geth).
|
||||
|
||||
Building `geth` requires both a Go (version 1.10 or later) and a C compiler. You can install
|
||||
Building `geth` requires both a Go (version 1.14 or later) and a C compiler. You can install
|
||||
them using your favourite package manager. Once the dependencies are installed, run
|
||||
|
||||
```shell
|
||||
@ -36,18 +36,19 @@ directory.
|
||||
|
||||
| Command | Description |
|
||||
| :-----------: | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. |
|
||||
| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. |
|
||||
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI page](https://geth.ethereum.org/docs/interface/command-line-options) for command line options. |
|
||||
| `clef` | Stand-alone signing tool, which can be used as a backend signer for `geth`. |
|
||||
| `devp2p` | Utilities to interact with nodes on the networking layer, without running a full blockchain. |
|
||||
| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://docs.soliditylang.org/en/develop/abi-spec.html) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://geth.ethereum.org/docs/dapp/native-bindings) page for details. |
|
||||
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
|
||||
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug`). |
|
||||
| `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://github.com/ethereum/wiki/wiki/JSON-RPC) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. |
|
||||
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://github.com/ethereum/wiki/wiki/RLP)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
|
||||
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). |
|
||||
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://eth.wiki/en/fundamentals/rlp)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
|
||||
| `puppeth` | a CLI wizard that aids in creating a new Ethereum network. |
|
||||
|
||||
## Running `geth`
|
||||
|
||||
Going through all the possible command line flags is out of scope here (please consult our
|
||||
[CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options)),
|
||||
[CLI Wiki page](https://geth.ethereum.org/docs/interface/command-line-options)),
|
||||
but we've enumerated a few common parameter combos to get you up to speed quickly
|
||||
on how you can run your own `geth` instance.
|
||||
|
||||
@ -66,13 +67,14 @@ This command will:
|
||||
* Start `geth` in fast sync mode (default, can be changed with the `--syncmode` flag),
|
||||
causing it to download more data in exchange for avoiding processing the entire history
|
||||
of the Ethereum network, which is very CPU intensive.
|
||||
* Start up `geth`'s built-in interactive [JavaScript console](https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console),
|
||||
(via the trailing `console` subcommand) through which you can invoke all official [`web3` methods](https://github.com/ethereum/wiki/wiki/JavaScript-API)
|
||||
as well as `geth`'s own [management APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs).
|
||||
* Start up `geth`'s built-in interactive [JavaScript console](https://geth.ethereum.org/docs/interface/javascript-console),
|
||||
(via the trailing `console` subcommand) through which you can interact using [`web3` methods](https://web3js.readthedocs.io/en/)
|
||||
(note: the `web3` version bundled within `geth` is very old, and not up to date with official docs),
|
||||
as well as `geth`'s own [management APIs](https://geth.ethereum.org/docs/rpc/server).
|
||||
This tool is optional and if you leave it out you can always attach to an already running
|
||||
`geth` instance with `geth attach`.
|
||||
|
||||
### A Full node on the Ethereum test network
|
||||
### A Full node on the Görli test network
|
||||
|
||||
Transitioning towards developers, if you'd like to play around with creating Ethereum
|
||||
contracts, you almost certainly would like to do that without any real money involved until
|
||||
@ -81,23 +83,24 @@ network, you want to join the **test** network with your node, which is fully eq
|
||||
the main network, but with play-Ether only.
|
||||
|
||||
```shell
|
||||
$ geth --testnet console
|
||||
$ geth --goerli console
|
||||
```
|
||||
|
||||
The `console` subcommand has the exact same meaning as above and they are equally
|
||||
useful on the testnet too. Please see above for their explanations if you've skipped here.
|
||||
useful on the testnet too. Please, see above for their explanations if you've skipped here.
|
||||
|
||||
Specifying the `--testnet` flag, however, will reconfigure your `geth` instance a bit:
|
||||
Specifying the `--goerli` flag, however, will reconfigure your `geth` instance a bit:
|
||||
|
||||
* Instead of connecting the main Ethereum network, the client will connect to the Görli
|
||||
test network, which uses different P2P bootnodes, different network IDs and genesis
|
||||
states.
|
||||
* Instead of using the default data directory (`~/.ethereum` on Linux for example), `geth`
|
||||
will nest itself one level deeper into a `testnet` subfolder (`~/.ethereum/testnet` on
|
||||
will nest itself one level deeper into a `goerli` subfolder (`~/.ethereum/goerli` on
|
||||
Linux). Note, on OSX and Linux this also means that attaching to a running testnet node
|
||||
requires the use of a custom endpoint since `geth attach` will try to attach to a
|
||||
production node endpoint by default. E.g.
|
||||
`geth attach <datadir>/testnet/geth.ipc`. Windows users are not affected by
|
||||
production node endpoint by default, e.g.,
|
||||
`geth attach <datadir>/goerli/geth.ipc`. Windows users are not affected by
|
||||
this.
|
||||
* Instead of connecting the main Ethereum network, the client will connect to the test
|
||||
network, which uses different P2P bootnodes, different network IDs and genesis states.
|
||||
|
||||
*Note: Although there are some internal protective measures to prevent transactions from
|
||||
crossing over between the main network and test network, you should make sure to always
|
||||
@ -107,17 +110,26 @@ accounts available between them.*
|
||||
|
||||
### Full node on the Rinkeby test network
|
||||
|
||||
The above test network is a cross-client one based on the ethash proof-of-work consensus
|
||||
algorithm. As such, it has certain extra overhead and is more susceptible to reorganization
|
||||
attacks due to the network's low difficulty/security. Go Ethereum also supports connecting
|
||||
to a proof-of-authority based test network called [*Rinkeby*](https://www.rinkeby.io)
|
||||
(operated by members of the community). This network is lighter, more secure, but is only
|
||||
supported by go-ethereum.
|
||||
Go Ethereum also supports connecting to the older proof-of-authority based test network
|
||||
called [*Rinkeby*](https://www.rinkeby.io) which is operated by members of the community.
|
||||
|
||||
```shell
|
||||
$ geth --rinkeby console
|
||||
```
|
||||
|
||||
### Full node on the Ropsten test network
|
||||
|
||||
In addition to Görli and Rinkeby, Geth also supports the ancient Ropsten testnet. The
|
||||
Ropsten test network is based on the Ethash proof-of-work consensus algorithm. As such,
|
||||
it has certain extra overhead and is more susceptible to reorganization attacks due to the
|
||||
network's low difficulty/security.
|
||||
|
||||
```shell
|
||||
$ geth --ropsten console
|
||||
```
|
||||
|
||||
*Note: Older Geth configurations store the Ropsten database in the `testnet` subdirectory.*
|
||||
|
||||
### Configuration
|
||||
|
||||
As an alternative to passing the numerous flags to the `geth` binary, you can also pass a
|
||||
@ -152,7 +164,7 @@ above command does. It will also create a persistent volume in your home direct
|
||||
saving your blockchain as well as map the default ports. There is also an `alpine` tag
|
||||
available for a slim version of the image.
|
||||
|
||||
Do not forget `--rpcaddr 0.0.0.0`, if you want to access RPC from other containers
|
||||
Do not forget `--http.addr 0.0.0.0`, if you want to access RPC from other containers
|
||||
and/or hosts. By default, `geth` binds to the local interface and RPC endpoints is not
|
||||
accessible from the outside.
|
||||
|
||||
@ -160,8 +172,8 @@ accessible from the outside.
|
||||
|
||||
As a developer, sooner rather than later you'll want to start interacting with `geth` and the
|
||||
Ethereum network via your own programs and not manually through the console. To aid
|
||||
this, `geth` has built-in support for a JSON-RPC based APIs ([standard APIs](https://github.com/ethereum/wiki/wiki/JSON-RPC)
|
||||
and [`geth` specific APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs)).
|
||||
this, `geth` has built-in support for a JSON-RPC based APIs ([standard APIs](https://eth.wiki/json-rpc/API)
|
||||
and [`geth` specific APIs](https://geth.ethereum.org/docs/rpc/server)).
|
||||
These can be exposed via HTTP, WebSockets and IPC (UNIX sockets on UNIX based
|
||||
platforms, and named pipes on Windows).
|
||||
|
||||
@ -172,16 +184,16 @@ you'd expect.
|
||||
|
||||
HTTP based JSON-RPC API options:
|
||||
|
||||
* `--rpc` Enable the HTTP-RPC server
|
||||
* `--rpcaddr` HTTP-RPC server listening interface (default: `localhost`)
|
||||
* `--rpcport` HTTP-RPC server listening port (default: `8545`)
|
||||
* `--rpcapi` API's offered over the HTTP-RPC interface (default: `eth,net,web3`)
|
||||
* `--rpccorsdomain` Comma separated list of domains from which to accept cross origin requests (browser enforced)
|
||||
* `--http` Enable the HTTP-RPC server
|
||||
* `--http.addr` HTTP-RPC server listening interface (default: `localhost`)
|
||||
* `--http.port` HTTP-RPC server listening port (default: `8545`)
|
||||
* `--http.api` API's offered over the HTTP-RPC interface (default: `eth,net,web3`)
|
||||
* `--http.corsdomain` Comma separated list of domains from which to accept cross origin requests (browser enforced)
|
||||
* `--ws` Enable the WS-RPC server
|
||||
* `--wsaddr` WS-RPC server listening interface (default: `localhost`)
|
||||
* `--wsport` WS-RPC server listening port (default: `8546`)
|
||||
* `--wsapi` API's offered over the WS-RPC interface (default: `eth,net,web3`)
|
||||
* `--wsorigins` Origins from which to accept websockets requests
|
||||
* `--ws.addr` WS-RPC server listening interface (default: `localhost`)
|
||||
* `--ws.port` WS-RPC server listening port (default: `8546`)
|
||||
* `--ws.api` API's offered over the WS-RPC interface (default: `eth,net,web3`)
|
||||
* `--ws.origins` Origins from which to accept websockets requests
|
||||
* `--ipcdisable` Disable the IPC-RPC server
|
||||
* `--ipcapi` API's offered over the IPC-RPC interface (default: `admin,debug,eth,miner,net,personal,shh,txpool,web3`)
|
||||
* `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it)
|
||||
@ -217,7 +229,9 @@ aware of and agree upon. This consists of a small JSON file (e.g. call it `genes
|
||||
"eip158Block": 0,
|
||||
"byzantiumBlock": 0,
|
||||
"constantinopleBlock": 0,
|
||||
"petersburgBlock": 0
|
||||
"petersburgBlock": 0,
|
||||
"istanbulBlock": 0,
|
||||
"berlinBlock": 0
|
||||
},
|
||||
"alloc": {},
|
||||
"coinbase": "0x0000000000000000000000000000000000000000",
|
||||
@ -266,7 +280,7 @@ $ bootnode --genkey=boot.key
|
||||
$ bootnode --nodekey=boot.key
|
||||
```
|
||||
|
||||
With the bootnode online, it will display an [`enode` URL](https://github.com/ethereum/wiki/wiki/enode-url-format)
|
||||
With the bootnode online, it will display an [`enode` URL](https://eth.wiki/en/fundamentals/enode-url-format)
|
||||
that other nodes can use to connect to it and exchange peer information. Make sure to
|
||||
replace the displayed IP address information (most probably `[::]`) with your externally
|
||||
accessible IP to get the actual `enode` URL.
|
||||
@ -294,7 +308,7 @@ also need to configure a miner to process transactions and create new blocks for
|
||||
Mining on the public Ethereum network is a complex task as it's only feasible using GPUs,
|
||||
requiring an OpenCL or CUDA enabled `ethminer` instance. For information on such a
|
||||
setup, please consult the [EtherMining subreddit](https://www.reddit.com/r/EtherMining/)
|
||||
and the [Genoil miner](https://github.com/Genoil/cpp-ethereum) repository.
|
||||
and the [ethminer](https://github.com/ethereum-mining/ethminer) repository.
|
||||
|
||||
In a private network setting, however a single CPU miner instance is more than enough for
|
||||
practical purposes as it can produce a stable stream of blocks at the correct intervals
|
||||
@ -303,13 +317,13 @@ ones either). To start a `geth` instance for mining, run it with all your usual
|
||||
by:
|
||||
|
||||
```shell
|
||||
$ geth <usual-flags> --mine --minerthreads=1 --etherbase=0x0000000000000000000000000000000000000000
|
||||
$ geth <usual-flags> --mine --miner.threads=1 --miner.etherbase=0x0000000000000000000000000000000000000000
|
||||
```
|
||||
|
||||
Which will start mining blocks and transactions on a single CPU thread, crediting all
|
||||
proceedings to the account specified by `--etherbase`. You can further tune the mining
|
||||
by changing the default gas limit blocks converge to (`--targetgaslimit`) and the price
|
||||
transactions are accepted at (`--gasprice`).
|
||||
proceedings to the account specified by `--miner.etherbase`. You can further tune the mining
|
||||
by changing the default gas limit blocks converge to (`--miner.targetgaslimit`) and the price
|
||||
transactions are accepted at (`--miner.gasprice`).
|
||||
|
||||
## Contribution
|
||||
|
||||
@ -318,7 +332,7 @@ from anyone on the internet, and are grateful for even the smallest of fixes!
|
||||
|
||||
If you'd like to contribute to go-ethereum, please fork, fix, commit and send a pull request
|
||||
for the maintainers to review and merge into the main code base. If you wish to submit
|
||||
more complex changes though, please check up with the core devs first on [our gitter channel](https://gitter.im/ethereum/go-ethereum)
|
||||
more complex changes though, please check up with the core devs first on [our Discord Server](https://discord.gg/invite/nthXNEv)
|
||||
to ensure those changes are in line with the general philosophy of the project and/or get
|
||||
some early feedback which can make both your efforts much lighter as well as our review
|
||||
and merge procedures quick and simple.
|
||||
@ -333,7 +347,7 @@ Please make sure your contributions adhere to our coding guidelines:
|
||||
* Commit messages should be prefixed with the package(s) they modify.
|
||||
* E.g. "eth, rpc: make trace configs optional"
|
||||
|
||||
Please see the [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide)
|
||||
Please see the [Developers' Guide](https://geth.ethereum.org/docs/developers/devguide)
|
||||
for more details on configuring your environment, managing project dependencies, and
|
||||
testing procedures.
|
||||
|
||||
|
10
vendor/github.com/ethereum/go-ethereum/SECURITY.md
generated
vendored
10
vendor/github.com/ethereum/go-ethereum/SECURITY.md
generated
vendored
@ -2,31 +2,29 @@
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Please see Releases. We recommend to use the most recent released version.
|
||||
Please see [Releases](https://github.com/ethereum/go-ethereum/releases). We recommend using the [most recently released version](https://github.com/ethereum/go-ethereum/releases/latest).
|
||||
|
||||
## Audit reports
|
||||
|
||||
Audit reports are published in the `docs` folder: https://github.com/ethereum/go-ethereum/tree/master/docs/audits
|
||||
|
||||
|
||||
| Scope | Date | Report Link |
|
||||
| ------- | ------- | ----------- |
|
||||
| `geth` | 20170425 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2017-04-25_Geth-audit_Truesec.pdf) |
|
||||
| `clef` | 20180914 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2018-09-14_Clef-audit_NCC.pdf) |
|
||||
|
||||
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
**Please do not file a public ticket** mentioning the vulnerability.
|
||||
|
||||
To find out how to disclose a vulnerability in Ethereum visit [https://bounty.ethereum.org](https://bounty.ethereum.org) or email bounty@ethereum.org.
|
||||
To find out how to disclose a vulnerability in Ethereum visit [https://bounty.ethereum.org](https://bounty.ethereum.org) or email bounty@ethereum.org. Please read the [disclosure page](https://github.com/ethereum/go-ethereum/security/advisories?state=published) for more information about publically disclosed security vulnerabilities.
|
||||
|
||||
Use the built-in `geth version-check` feature to check whether the software is affected by any known vulnerability. This command will fetch the latest [`vulnerabilities.json`](https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json) file which contains known security vulnerabilities concerning `geth`, and cross-check the data against its own version number.
|
||||
|
||||
The following key may be used to communicate sensitive information to developers.
|
||||
|
||||
Fingerprint: `AE96 ED96 9E47 9B00 84F3 E17F E88D 3334 FA5F 6A0A`
|
||||
|
||||
|
||||
```
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
Version: GnuPG v1
|
||||
|
215
vendor/github.com/ethereum/go-ethereum/accounts/abi/abi.go
generated
vendored
215
vendor/github.com/ethereum/go-ethereum/accounts/abi/abi.go
generated
vendored
@ -19,10 +19,12 @@ package abi
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
// The ABI holds information about a contract's context and available
|
||||
@ -32,6 +34,12 @@ type ABI struct {
|
||||
Constructor Method
|
||||
Methods map[string]Method
|
||||
Events map[string]Event
|
||||
|
||||
// Additional "special" functions introduced in solidity v0.6.0.
|
||||
// It's separated from the original default fallback. Each contract
|
||||
// can only define one fallback and receive function.
|
||||
Fallback Method // Note it's also used to represent legacy fallback before v0.6.0
|
||||
Receive Method
|
||||
}
|
||||
|
||||
// JSON returns a parsed ABI interface and error if it failed.
|
||||
@ -42,7 +50,6 @@ func JSON(reader io.Reader) (ABI, error) {
|
||||
if err := dec.Decode(&abi); err != nil {
|
||||
return ABI{}, err
|
||||
}
|
||||
|
||||
return abi, nil
|
||||
}
|
||||
|
||||
@ -70,56 +77,80 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
|
||||
return nil, err
|
||||
}
|
||||
// Pack up the method ID too if not a constructor and return
|
||||
return append(method.ID(), arguments...), nil
|
||||
return append(method.ID, arguments...), nil
|
||||
}
|
||||
|
||||
// Unpack output in v according to the abi specification
|
||||
func (abi ABI) Unpack(v interface{}, name string, data []byte) (err error) {
|
||||
if len(data) == 0 {
|
||||
return fmt.Errorf("abi: unmarshalling empty output")
|
||||
}
|
||||
func (abi ABI) getArguments(name string, data []byte) (Arguments, error) {
|
||||
// since there can't be naming collisions with contracts and events,
|
||||
// we need to decide whether we're calling a method or an event
|
||||
var args Arguments
|
||||
if method, ok := abi.Methods[name]; ok {
|
||||
if len(data)%32 != 0 {
|
||||
return fmt.Errorf("abi: improperly formatted output: %s - Bytes: [%+v]", string(data), data)
|
||||
return nil, fmt.Errorf("abi: improperly formatted output: %s - Bytes: [%+v]", string(data), data)
|
||||
}
|
||||
return method.Outputs.Unpack(v, data)
|
||||
args = method.Outputs
|
||||
}
|
||||
if event, ok := abi.Events[name]; ok {
|
||||
return event.Inputs.Unpack(v, data)
|
||||
args = event.Inputs
|
||||
}
|
||||
return fmt.Errorf("abi: could not locate named method or event")
|
||||
if args == nil {
|
||||
return nil, errors.New("abi: could not locate named method or event")
|
||||
}
|
||||
return args, nil
|
||||
}
|
||||
|
||||
// UnpackIntoMap unpacks a log into the provided map[string]interface{}
|
||||
// Unpack unpacks the output according to the abi specification.
|
||||
func (abi ABI) Unpack(name string, data []byte) ([]interface{}, error) {
|
||||
args, err := abi.getArguments(name, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return args.Unpack(data)
|
||||
}
|
||||
|
||||
// UnpackIntoInterface unpacks the output in v according to the abi specification.
|
||||
// It performs an additional copy. Please only use, if you want to unpack into a
|
||||
// structure that does not strictly conform to the abi structure (e.g. has additional arguments)
|
||||
func (abi ABI) UnpackIntoInterface(v interface{}, name string, data []byte) error {
|
||||
args, err := abi.getArguments(name, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
unpacked, err := args.Unpack(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return args.Copy(v, unpacked)
|
||||
}
|
||||
|
||||
// UnpackIntoMap unpacks a log into the provided map[string]interface{}.
|
||||
func (abi ABI) UnpackIntoMap(v map[string]interface{}, name string, data []byte) (err error) {
|
||||
if len(data) == 0 {
|
||||
return fmt.Errorf("abi: unmarshalling empty output")
|
||||
args, err := abi.getArguments(name, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// since there can't be naming collisions with contracts and events,
|
||||
// we need to decide whether we're calling a method or an event
|
||||
if method, ok := abi.Methods[name]; ok {
|
||||
if len(data)%32 != 0 {
|
||||
return fmt.Errorf("abi: improperly formatted output")
|
||||
}
|
||||
return method.Outputs.UnpackIntoMap(v, data)
|
||||
}
|
||||
if event, ok := abi.Events[name]; ok {
|
||||
return event.Inputs.UnpackIntoMap(v, data)
|
||||
}
|
||||
return fmt.Errorf("abi: could not locate named method or event")
|
||||
return args.UnpackIntoMap(v, data)
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler interface
|
||||
// UnmarshalJSON implements json.Unmarshaler interface.
|
||||
func (abi *ABI) UnmarshalJSON(data []byte) error {
|
||||
var fields []struct {
|
||||
Type string
|
||||
Name string
|
||||
Constant bool
|
||||
Type string
|
||||
Name string
|
||||
Inputs []Argument
|
||||
Outputs []Argument
|
||||
|
||||
// Status indicator which can be: "pure", "view",
|
||||
// "nonpayable" or "payable".
|
||||
StateMutability string
|
||||
|
||||
// Deprecated Status indicators, but removed in v0.6.0.
|
||||
Constant bool // True if function is either pure or view
|
||||
Payable bool // True if function is payable
|
||||
|
||||
// Event relevant indicator represents the event is
|
||||
// declared as anonymous.
|
||||
Anonymous bool
|
||||
Inputs []Argument
|
||||
Outputs []Argument
|
||||
}
|
||||
if err := json.Unmarshal(data, &fields); err != nil {
|
||||
return err
|
||||
@ -129,51 +160,75 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
|
||||
for _, field := range fields {
|
||||
switch field.Type {
|
||||
case "constructor":
|
||||
abi.Constructor = Method{
|
||||
Inputs: field.Inputs,
|
||||
abi.Constructor = NewMethod("", "", Constructor, field.StateMutability, field.Constant, field.Payable, field.Inputs, nil)
|
||||
case "function":
|
||||
name := abi.overloadedMethodName(field.Name)
|
||||
abi.Methods[name] = NewMethod(name, field.Name, Function, field.StateMutability, field.Constant, field.Payable, field.Inputs, field.Outputs)
|
||||
case "fallback":
|
||||
// New introduced function type in v0.6.0, check more detail
|
||||
// here https://solidity.readthedocs.io/en/v0.6.0/contracts.html#fallback-function
|
||||
if abi.HasFallback() {
|
||||
return errors.New("only single fallback is allowed")
|
||||
}
|
||||
// empty defaults to function according to the abi spec
|
||||
case "function", "":
|
||||
name := field.Name
|
||||
_, ok := abi.Methods[name]
|
||||
for idx := 0; ok; idx++ {
|
||||
name = fmt.Sprintf("%s%d", field.Name, idx)
|
||||
_, ok = abi.Methods[name]
|
||||
abi.Fallback = NewMethod("", "", Fallback, field.StateMutability, field.Constant, field.Payable, nil, nil)
|
||||
case "receive":
|
||||
// New introduced function type in v0.6.0, check more detail
|
||||
// here https://solidity.readthedocs.io/en/v0.6.0/contracts.html#fallback-function
|
||||
if abi.HasReceive() {
|
||||
return errors.New("only single receive is allowed")
|
||||
}
|
||||
abi.Methods[name] = Method{
|
||||
Name: name,
|
||||
RawName: field.Name,
|
||||
Const: field.Constant,
|
||||
Inputs: field.Inputs,
|
||||
Outputs: field.Outputs,
|
||||
if field.StateMutability != "payable" {
|
||||
return errors.New("the statemutability of receive can only be payable")
|
||||
}
|
||||
abi.Receive = NewMethod("", "", Receive, field.StateMutability, field.Constant, field.Payable, nil, nil)
|
||||
case "event":
|
||||
name := field.Name
|
||||
_, ok := abi.Events[name]
|
||||
for idx := 0; ok; idx++ {
|
||||
name = fmt.Sprintf("%s%d", field.Name, idx)
|
||||
_, ok = abi.Events[name]
|
||||
}
|
||||
abi.Events[name] = Event{
|
||||
Name: name,
|
||||
RawName: field.Name,
|
||||
Anonymous: field.Anonymous,
|
||||
Inputs: field.Inputs,
|
||||
}
|
||||
name := abi.overloadedEventName(field.Name)
|
||||
abi.Events[name] = NewEvent(name, field.Name, field.Anonymous, field.Inputs)
|
||||
default:
|
||||
return fmt.Errorf("abi: could not recognize type %v of field %v", field.Type, field.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MethodById looks up a method by the 4-byte id
|
||||
// returns nil if none found
|
||||
// overloadedMethodName returns the next available name for a given function.
|
||||
// Needed since solidity allows for function overload.
|
||||
//
|
||||
// e.g. if the abi contains Methods send, send1
|
||||
// overloadedMethodName would return send2 for input send.
|
||||
func (abi *ABI) overloadedMethodName(rawName string) string {
|
||||
name := rawName
|
||||
_, ok := abi.Methods[name]
|
||||
for idx := 0; ok; idx++ {
|
||||
name = fmt.Sprintf("%s%d", rawName, idx)
|
||||
_, ok = abi.Methods[name]
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// overloadedEventName returns the next available name for a given event.
|
||||
// Needed since solidity allows for event overload.
|
||||
//
|
||||
// e.g. if the abi contains events received, received1
|
||||
// overloadedEventName would return received2 for input received.
|
||||
func (abi *ABI) overloadedEventName(rawName string) string {
|
||||
name := rawName
|
||||
_, ok := abi.Events[name]
|
||||
for idx := 0; ok; idx++ {
|
||||
name = fmt.Sprintf("%s%d", rawName, idx)
|
||||
_, ok = abi.Events[name]
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// MethodById looks up a method by the 4-byte id,
|
||||
// returns nil if none found.
|
||||
func (abi *ABI) MethodById(sigdata []byte) (*Method, error) {
|
||||
if len(sigdata) < 4 {
|
||||
return nil, fmt.Errorf("data too short (%d bytes) for abi method lookup", len(sigdata))
|
||||
}
|
||||
for _, method := range abi.Methods {
|
||||
if bytes.Equal(method.ID(), sigdata[:4]) {
|
||||
if bytes.Equal(method.ID, sigdata[:4]) {
|
||||
return &method, nil
|
||||
}
|
||||
}
|
||||
@ -184,9 +239,41 @@ func (abi *ABI) MethodById(sigdata []byte) (*Method, error) {
|
||||
// ABI and returns nil if none found.
|
||||
func (abi *ABI) EventByID(topic common.Hash) (*Event, error) {
|
||||
for _, event := range abi.Events {
|
||||
if bytes.Equal(event.ID().Bytes(), topic.Bytes()) {
|
||||
if bytes.Equal(event.ID.Bytes(), topic.Bytes()) {
|
||||
return &event, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("no event with id: %#x", topic.Hex())
|
||||
}
|
||||
|
||||
// HasFallback returns an indicator whether a fallback function is included.
|
||||
func (abi *ABI) HasFallback() bool {
|
||||
return abi.Fallback.Type == Fallback
|
||||
}
|
||||
|
||||
// HasReceive returns an indicator whether a receive function is included.
|
||||
func (abi *ABI) HasReceive() bool {
|
||||
return abi.Receive.Type == Receive
|
||||
}
|
||||
|
||||
// revertSelector is a special function selector for revert reason unpacking.
|
||||
var revertSelector = crypto.Keccak256([]byte("Error(string)"))[:4]
|
||||
|
||||
// UnpackRevert resolves the abi-encoded revert reason. According to the solidity
|
||||
// spec https://solidity.readthedocs.io/en/latest/control-structures.html#revert,
|
||||
// the provided revert reason is abi-encoded as if it were a call to a function
|
||||
// `Error(string)`. So it's a special tool for it.
|
||||
func UnpackRevert(data []byte) (string, error) {
|
||||
if len(data) < 4 {
|
||||
return "", errors.New("invalid data for unpacking")
|
||||
}
|
||||
if !bytes.Equal(data[:4], revertSelector) {
|
||||
return "", errors.New("invalid data for unpacking")
|
||||
}
|
||||
typ, _ := NewType("string", "", nil)
|
||||
unpacked, err := (Arguments{{Type: typ}}).Unpack(data[4:])
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return unpacked[0].(string), nil
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user