From cf11713d9c29f7fdb8d51a7cd63def25ddc17a92 Mon Sep 17 00:00:00 2001 From: Richard Ramos Date: Tue, 12 Oct 2021 08:39:28 -0400 Subject: [PATCH] feat: wakuv2 peer persistence (#2287) --- go.mod | 2 + go.sum | 11 + node/status_node_services.go | 3 +- params/config.go | 4 + .../gogo/protobuf/gogoproto/Makefile | 37 + .../github.com/gogo/protobuf/gogoproto/doc.go | 169 + .../gogo/protobuf/gogoproto/gogo.pb.go | 874 +++++ .../gogo/protobuf/gogoproto/gogo.pb.golden | 45 + .../gogo/protobuf/gogoproto/gogo.proto | 144 + .../gogo/protobuf/gogoproto/helper.go | 415 +++ .../protoc-gen-gogo/descriptor/Makefile | 36 + .../protoc-gen-gogo/descriptor/descriptor.go | 118 + .../descriptor/descriptor.pb.go | 2865 +++++++++++++++++ .../descriptor/descriptor_gostring.gen.go | 752 +++++ .../protoc-gen-gogo/descriptor/helper.go | 390 +++ .../github.com/ipfs/go-datastore/.gitignore | 1 + vendor/github.com/ipfs/go-datastore/LICENSE | 21 + vendor/github.com/ipfs/go-datastore/Makefile | 9 + vendor/github.com/ipfs/go-datastore/README.md | 47 + .../github.com/ipfs/go-datastore/basic_ds.go | 287 ++ vendor/github.com/ipfs/go-datastore/batch.go | 47 + .../github.com/ipfs/go-datastore/datastore.go | 251 ++ vendor/github.com/ipfs/go-datastore/go.mod | 12 + vendor/github.com/ipfs/go-datastore/go.sum | 16 + vendor/github.com/ipfs/go-datastore/key.go | 309 ++ .../ipfs/go-datastore/query/filter.go | 102 + .../ipfs/go-datastore/query/order.go | 94 + .../ipfs/go-datastore/query/query.go | 426 +++ .../ipfs/go-datastore/query/query_impl.go | 158 + vendor/github.com/ipfs/go-ds-sql/LICENSE | 21 + vendor/github.com/ipfs/go-ds-sql/README.md | 61 + vendor/github.com/ipfs/go-ds-sql/batching.go | 65 + vendor/github.com/ipfs/go-ds-sql/dstore.go | 203 ++ vendor/github.com/ipfs/go-ds-sql/go.mod | 8 + vendor/github.com/ipfs/go-ds-sql/go.sum | 17 + vendor/github.com/ipfs/go-ds-sql/package.json | 30 + vendor/github.com/ipfs/go-ds-sql/txn.go | 122 + .../libp2p/go-libp2p-core/test/addrs.go | 42 + .../libp2p/go-libp2p-core/test/crypto.go | 21 + .../libp2p/go-libp2p-core/test/errors.go | 19 + .../libp2p/go-libp2p-core/test/peer.go | 25 + .../libp2p/go-libp2p-peerstore/pb/Makefile | 12 + .../libp2p/go-libp2p-peerstore/pb/custom.go | 110 + .../go-libp2p-peerstore/pb/pstore.pb.go | 1036 ++++++ .../go-libp2p-peerstore/pb/pstore.proto | 40 + .../go-libp2p-peerstore/pstoreds/addr_book.go | 578 ++++ .../pstoreds/addr_book_gc.go | 388 +++ .../go-libp2p-peerstore/pstoreds/cache.go | 40 + .../pstoreds/cyclic_batch.go | 76 + .../go-libp2p-peerstore/pstoreds/keybook.go | 134 + .../go-libp2p-peerstore/pstoreds/metadata.go | 73 + .../go-libp2p-peerstore/pstoreds/peerstore.go | 166 + .../go-libp2p-peerstore/pstoreds/protobook.go | 188 ++ vendor/modules.txt | 10 + wakuv2/api_test.go | 2 +- wakuv2/config.go | 1 + wakuv2/persistence/queries.go | 93 + wakuv2/waku.go | 35 +- 58 files changed, 11256 insertions(+), 5 deletions(-) create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/Makefile create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/doc.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.proto create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/helper.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go create mode 100644 vendor/github.com/ipfs/go-datastore/.gitignore create mode 100644 vendor/github.com/ipfs/go-datastore/LICENSE create mode 100644 vendor/github.com/ipfs/go-datastore/Makefile create mode 100644 vendor/github.com/ipfs/go-datastore/README.md create mode 100644 vendor/github.com/ipfs/go-datastore/basic_ds.go create mode 100644 vendor/github.com/ipfs/go-datastore/batch.go create mode 100644 vendor/github.com/ipfs/go-datastore/datastore.go create mode 100644 vendor/github.com/ipfs/go-datastore/go.mod create mode 100644 vendor/github.com/ipfs/go-datastore/go.sum create mode 100644 vendor/github.com/ipfs/go-datastore/key.go create mode 100644 vendor/github.com/ipfs/go-datastore/query/filter.go create mode 100644 vendor/github.com/ipfs/go-datastore/query/order.go create mode 100644 vendor/github.com/ipfs/go-datastore/query/query.go create mode 100644 vendor/github.com/ipfs/go-datastore/query/query_impl.go create mode 100644 vendor/github.com/ipfs/go-ds-sql/LICENSE create mode 100644 vendor/github.com/ipfs/go-ds-sql/README.md create mode 100644 vendor/github.com/ipfs/go-ds-sql/batching.go create mode 100644 vendor/github.com/ipfs/go-ds-sql/dstore.go create mode 100644 vendor/github.com/ipfs/go-ds-sql/go.mod create mode 100644 vendor/github.com/ipfs/go-ds-sql/go.sum create mode 100644 vendor/github.com/ipfs/go-ds-sql/package.json create mode 100644 vendor/github.com/ipfs/go-ds-sql/txn.go create mode 100644 vendor/github.com/libp2p/go-libp2p-core/test/addrs.go create mode 100644 vendor/github.com/libp2p/go-libp2p-core/test/crypto.go create mode 100644 vendor/github.com/libp2p/go-libp2p-core/test/errors.go create mode 100644 vendor/github.com/libp2p/go-libp2p-core/test/peer.go create mode 100644 vendor/github.com/libp2p/go-libp2p-peerstore/pb/Makefile create mode 100644 vendor/github.com/libp2p/go-libp2p-peerstore/pb/custom.go create mode 100644 vendor/github.com/libp2p/go-libp2p-peerstore/pb/pstore.pb.go create mode 100644 vendor/github.com/libp2p/go-libp2p-peerstore/pb/pstore.proto create mode 100644 vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/addr_book.go create mode 100644 vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/addr_book_gc.go create mode 100644 vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/cache.go create mode 100644 vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/cyclic_batch.go create mode 100644 vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/keybook.go create mode 100644 vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/metadata.go create mode 100644 vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/peerstore.go create mode 100644 vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/protobook.go create mode 100644 wakuv2/persistence/queries.go diff --git a/go.mod b/go.mod index 52be9b60d..731862abe 100644 --- a/go.mod +++ b/go.mod @@ -21,6 +21,7 @@ require ( github.com/golang/mock v1.4.4 github.com/golang/protobuf v1.5.2 github.com/google/uuid v1.2.0 + github.com/ipfs/go-ds-sql v0.2.0 github.com/ipfs/go-log v1.0.4 github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a github.com/keighl/metabolize v0.0.0-20150915210303-97ab655d4034 @@ -29,6 +30,7 @@ require ( github.com/lib/pq v1.9.0 github.com/libp2p/go-libp2p v0.13.0 github.com/libp2p/go-libp2p-core v0.8.5 + github.com/libp2p/go-libp2p-peerstore v0.2.6 github.com/lucasb-eyer/go-colorful v1.0.3 github.com/mat/besticon v0.0.0-20210314201728-1579f269edb7 github.com/mattn/go-colorable v0.1.4 // indirect diff --git a/go.sum b/go.sum index da1659f78..0f802bc22 100644 --- a/go.sum +++ b/go.sum @@ -38,6 +38,7 @@ collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= contrib.go.opencensus.io/exporter/prometheus v0.3.0/go.mod h1:rpCPVQKhiyH8oomWgm34ZmgIdZa8OVYO5WAIygPbBBE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9 h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= @@ -65,6 +66,7 @@ github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETF github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6nK2Q= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/goquery v1.6.1 h1:FgjbQZKl5HTmcn4sKBgvx8vv63nhyhIpv7lJpFGCWpk= github.com/PuerkitoBio/goquery v1.6.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= @@ -213,11 +215,14 @@ github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zA github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgraph-io/badger v1.6.1 h1:w9pSFNSdq/JPM1N12Fz/F/bzo993Is1W+Q7HjPzi7yg= github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= +github.com/dgraph-io/ristretto v0.0.2 h1:a5WaUrDa0qm0YrAAS1tUykT5El3kt62KNZZeMxQn3po= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= @@ -234,6 +239,7 @@ github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dvyukov/go-fuzz v0.0.0-20191022152526-8cb203812681/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -464,16 +470,20 @@ github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqg github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.4 h1:rjvQ9+muFaJ+QZ7dN5B1MSDNQ0JVZKkkES/rMZmA8X8= github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s= github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= +github.com/ipfs/go-ds-badger v0.2.3 h1:J27YvAcpuA5IvZUbeBxOcQgqnYHUPxoygc6QxxkodZ4= github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= +github.com/ipfs/go-ds-leveldb v0.4.2 h1:QmQoAJ9WkPMUfBLnu1sBVy0xWWlJPg0m4kRAiJL9iaw= github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= +github.com/ipfs/go-ds-sql v0.2.0 h1:ZUHUbU5IydNuBWzcRMOZYkBUwTg+L56o23fEVcbWC7o= github.com/ipfs/go-ds-sql v0.2.0/go.mod h1:/c47NpRiHobwn+8F8EpW0yBy8d3Mx/j/tIlrVN1e1Ec= github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= @@ -1060,6 +1070,7 @@ github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7A github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= diff --git a/node/status_node_services.go b/node/status_node_services.go index 4d6aaeb1f..7540f9f4f 100644 --- a/node/status_node_services.go +++ b/node/status_node_services.go @@ -258,6 +258,7 @@ func (b *StatusNode) wakuV2Service(nodeConfig *params.NodeConfig) (*wakuv2.Waku, WakuRendezvousNodes: nodeConfig.ClusterConfig.WakuRendezvousNodes, PeerExchange: nodeConfig.WakuV2Config.PeerExchange, DiscoveryLimit: nodeConfig.WakuV2Config.DiscoveryLimit, + PersistPeers: nodeConfig.WakuV2Config.PersistPeers, } if cfg.Host == "" { @@ -278,7 +279,7 @@ func (b *StatusNode) wakuV2Service(nodeConfig *params.NodeConfig) (*wakuv2.Waku, } logging.SetAllLoggers(lvl) - w, err := wakuv2.New(nodeConfig.NodeKey, cfg, logutils.ZapLogger()) + w, err := wakuv2.New(nodeConfig.NodeKey, cfg, logutils.ZapLogger(), b.appDB) if err != nil { return nil, err diff --git a/params/config.go b/params/config.go index 1150c4e9f..dc340a491 100644 --- a/params/config.go +++ b/params/config.go @@ -175,6 +175,10 @@ type WakuV2Config struct { // DiscoveryLimit indicates the maximum number of peers to discover DiscoveryLimit int + // PersistPeers indicates if peer records are going to be stored in the DB so next time the node starts, + //it attempts to reconnect to these peers + PersistPeers bool + // EnableMailServer is mode when node is capable of delivering expired messages on demand EnableMailServer bool diff --git a/vendor/github.com/gogo/protobuf/gogoproto/Makefile b/vendor/github.com/gogo/protobuf/gogoproto/Makefile new file mode 100644 index 000000000..0b4659b73 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/Makefile @@ -0,0 +1,37 @@ +# Protocol Buffers for Go with Gadgets +# +# Copyright (c) 2013, The GoGo Authors. All rights reserved. +# http://github.com/gogo/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:../../../../ --proto_path=../../../../:../protobuf/:. *.proto + +restore: + cp gogo.pb.golden gogo.pb.go + +preserve: + cp gogo.pb.go gogo.pb.golden diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go new file mode 100644 index 000000000..081c86fa8 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/doc.go @@ -0,0 +1,169 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package gogoproto provides extensions for protocol buffers to achieve: + + - fast marshalling and unmarshalling. + - peace of mind by optionally generating test and benchmark code. + - more canonical Go structures. + - less typing by optionally generating extra helper code. + - goprotobuf compatibility + +More Canonical Go Structures + +A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. +You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct. +Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions. + + - nullable, if false, a field is generated without a pointer (see warning below). + - embed, if true, the field is generated as an embedded field. + - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128 + - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. + - casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums. + - castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + - castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + +Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset. + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +for a quicker overview. + +The following message: + + package test; + + import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + + message A { + optional string Description = 1 [(gogoproto.nullable) = false]; + optional int64 Number = 2 [(gogoproto.nullable) = false]; + optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; + } + +Will generate a go struct which looks a lot like this: + + type A struct { + Description string + Number int64 + Id github_com_gogo_protobuf_test_custom.Uuid + } + +You will see there are no pointers, since all fields are non-nullable. +You will also see a custom type which marshals to a string. +Be warned it is your responsibility to test your custom types thoroughly. +You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods. + +Next we will embed the message A in message B. + + message B { + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +See below that A is embedded in B. + + type B struct { + A + G []github_com_gogo_protobuf_test_custom.Uint128 + } + +Also see the repeated custom type. + + type Uint128 [2]uint64 + +Next we will create a custom name for one of our fields. + + message C { + optional int64 size = 1 [(gogoproto.customname) = "MySize"]; + } + +See below that the field's name is MySize and not Size. + + type C struct { + MySize *int64 + } + +The is useful when having a protocol buffer message with a field name which conflicts with a generated method. +As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error. +Using customname you can fix this error without changing the field name. +This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable. + +Gogoprotobuf also has some more subtle changes, these could be changed back: + + - the generated package name for imports do not have the extra /filename.pb, + but are actually the imports specified in the .proto file. + +Gogoprotobuf also has lost some features which should be brought back with time: + + - Marshalling and unmarshalling with reflect and without the unsafe package, + this requires work in pointer_reflect.go + +Why does nullable break protocol buffer specifications: + +The protocol buffer specification states, somewhere, that you should be able to tell whether a +field is set or unset. With the option nullable=false this feature is lost, +since your non-nullable fields will always be set. It can be seen as a layer on top of +protocol buffers, where before and after marshalling all non-nullable fields are set +and they cannot be unset. + +Goprotobuf Compatibility: + +Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers. +Gogoprotobuf generates the same code as goprotobuf if no extensions are used. +The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf: + + - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto. + - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix + - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method. + - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face + - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method. + - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension + - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields. + - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). + +Less Typing and Peace of Mind is explained in their specific plugin folders godoc: + + - github.com/gogo/protobuf/plugin/ + +If you do not use any of these extension the code that is generated +will be the same as if goprotobuf has generated it. + +The most complete way to see examples is to look at + + github.com/gogo/protobuf/test/thetest.proto + +Gogoprototest is a seperate project, +because we want to keep gogoprotobuf independent of goprotobuf, +but we still want to test it thoroughly. + +*/ +package gogoproto diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go new file mode 100644 index 000000000..1e91766ae --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -0,0 +1,874 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: gogo.proto + +package gogoproto + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62001, + Name: "gogoproto.goproto_enum_prefix", + Tag: "varint,62001,opt,name=goproto_enum_prefix", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62021, + Name: "gogoproto.goproto_enum_stringer", + Tag: "varint,62021,opt,name=goproto_enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62022, + Name: "gogoproto.enum_stringer", + Tag: "varint,62022,opt,name=enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*string)(nil), + Field: 62023, + Name: "gogoproto.enum_customname", + Tag: "bytes,62023,opt,name=enum_customname", + Filename: "gogo.proto", +} + +var E_Enumdecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62024, + Name: "gogoproto.enumdecl", + Tag: "varint,62024,opt,name=enumdecl", + Filename: "gogo.proto", +} + +var E_EnumvalueCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumValueOptions)(nil), + ExtensionType: (*string)(nil), + Field: 66001, + Name: "gogoproto.enumvalue_customname", + Tag: "bytes,66001,opt,name=enumvalue_customname", + Filename: "gogo.proto", +} + +var E_GoprotoGettersAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63001, + Name: "gogoproto.goproto_getters_all", + Tag: "varint,63001,opt,name=goproto_getters_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63002, + Name: "gogoproto.goproto_enum_prefix_all", + Tag: "varint,63002,opt,name=goproto_enum_prefix_all", + Filename: "gogo.proto", +} + +var E_GoprotoStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63003, + Name: "gogoproto.goproto_stringer_all", + Tag: "varint,63003,opt,name=goproto_stringer_all", + Filename: "gogo.proto", +} + +var E_VerboseEqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63004, + Name: "gogoproto.verbose_equal_all", + Tag: "varint,63004,opt,name=verbose_equal_all", + Filename: "gogo.proto", +} + +var E_FaceAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63005, + Name: "gogoproto.face_all", + Tag: "varint,63005,opt,name=face_all", + Filename: "gogo.proto", +} + +var E_GostringAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63006, + Name: "gogoproto.gostring_all", + Tag: "varint,63006,opt,name=gostring_all", + Filename: "gogo.proto", +} + +var E_PopulateAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63007, + Name: "gogoproto.populate_all", + Tag: "varint,63007,opt,name=populate_all", + Filename: "gogo.proto", +} + +var E_StringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63008, + Name: "gogoproto.stringer_all", + Tag: "varint,63008,opt,name=stringer_all", + Filename: "gogo.proto", +} + +var E_OnlyoneAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63009, + Name: "gogoproto.onlyone_all", + Tag: "varint,63009,opt,name=onlyone_all", + Filename: "gogo.proto", +} + +var E_EqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63013, + Name: "gogoproto.equal_all", + Tag: "varint,63013,opt,name=equal_all", + Filename: "gogo.proto", +} + +var E_DescriptionAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63014, + Name: "gogoproto.description_all", + Tag: "varint,63014,opt,name=description_all", + Filename: "gogo.proto", +} + +var E_TestgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63015, + Name: "gogoproto.testgen_all", + Tag: "varint,63015,opt,name=testgen_all", + Filename: "gogo.proto", +} + +var E_BenchgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63016, + Name: "gogoproto.benchgen_all", + Tag: "varint,63016,opt,name=benchgen_all", + Filename: "gogo.proto", +} + +var E_MarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63017, + Name: "gogoproto.marshaler_all", + Tag: "varint,63017,opt,name=marshaler_all", + Filename: "gogo.proto", +} + +var E_UnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63018, + Name: "gogoproto.unmarshaler_all", + Tag: "varint,63018,opt,name=unmarshaler_all", + Filename: "gogo.proto", +} + +var E_StableMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63019, + Name: "gogoproto.stable_marshaler_all", + Tag: "varint,63019,opt,name=stable_marshaler_all", + Filename: "gogo.proto", +} + +var E_SizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63020, + Name: "gogoproto.sizer_all", + Tag: "varint,63020,opt,name=sizer_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63021, + Name: "gogoproto.goproto_enum_stringer_all", + Tag: "varint,63021,opt,name=goproto_enum_stringer_all", + Filename: "gogo.proto", +} + +var E_EnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63022, + Name: "gogoproto.enum_stringer_all", + Tag: "varint,63022,opt,name=enum_stringer_all", + Filename: "gogo.proto", +} + +var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63023, + Name: "gogoproto.unsafe_marshaler_all", + Tag: "varint,63023,opt,name=unsafe_marshaler_all", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63024, + Name: "gogoproto.unsafe_unmarshaler_all", + Tag: "varint,63024,opt,name=unsafe_unmarshaler_all", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63025, + Name: "gogoproto.goproto_extensions_map_all", + Tag: "varint,63025,opt,name=goproto_extensions_map_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63026, + Name: "gogoproto.goproto_unrecognized_all", + Tag: "varint,63026,opt,name=goproto_unrecognized_all", + Filename: "gogo.proto", +} + +var E_GogoprotoImport = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63027, + Name: "gogoproto.gogoproto_import", + Tag: "varint,63027,opt,name=gogoproto_import", + Filename: "gogo.proto", +} + +var E_ProtosizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63028, + Name: "gogoproto.protosizer_all", + Tag: "varint,63028,opt,name=protosizer_all", + Filename: "gogo.proto", +} + +var E_CompareAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63029, + Name: "gogoproto.compare_all", + Tag: "varint,63029,opt,name=compare_all", + Filename: "gogo.proto", +} + +var E_TypedeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63030, + Name: "gogoproto.typedecl_all", + Tag: "varint,63030,opt,name=typedecl_all", + Filename: "gogo.proto", +} + +var E_EnumdeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63031, + Name: "gogoproto.enumdecl_all", + Tag: "varint,63031,opt,name=enumdecl_all", + Filename: "gogo.proto", +} + +var E_GoprotoRegistration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63032, + Name: "gogoproto.goproto_registration", + Tag: "varint,63032,opt,name=goproto_registration", + Filename: "gogo.proto", +} + +var E_MessagenameAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63033, + Name: "gogoproto.messagename_all", + Tag: "varint,63033,opt,name=messagename_all", + Filename: "gogo.proto", +} + +var E_GoprotoSizecacheAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63034, + Name: "gogoproto.goproto_sizecache_all", + Tag: "varint,63034,opt,name=goproto_sizecache_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63035, + Name: "gogoproto.goproto_unkeyed_all", + Tag: "varint,63035,opt,name=goproto_unkeyed_all", + Filename: "gogo.proto", +} + +var E_GoprotoGetters = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64001, + Name: "gogoproto.goproto_getters", + Tag: "varint,64001,opt,name=goproto_getters", + Filename: "gogo.proto", +} + +var E_GoprotoStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64003, + Name: "gogoproto.goproto_stringer", + Tag: "varint,64003,opt,name=goproto_stringer", + Filename: "gogo.proto", +} + +var E_VerboseEqual = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64004, + Name: "gogoproto.verbose_equal", + Tag: "varint,64004,opt,name=verbose_equal", + Filename: "gogo.proto", +} + +var E_Face = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64005, + Name: "gogoproto.face", + Tag: "varint,64005,opt,name=face", + Filename: "gogo.proto", +} + +var E_Gostring = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64006, + Name: "gogoproto.gostring", + Tag: "varint,64006,opt,name=gostring", + Filename: "gogo.proto", +} + +var E_Populate = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64007, + Name: "gogoproto.populate", + Tag: "varint,64007,opt,name=populate", + Filename: "gogo.proto", +} + +var E_Stringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 67008, + Name: "gogoproto.stringer", + Tag: "varint,67008,opt,name=stringer", + Filename: "gogo.proto", +} + +var E_Onlyone = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64009, + Name: "gogoproto.onlyone", + Tag: "varint,64009,opt,name=onlyone", + Filename: "gogo.proto", +} + +var E_Equal = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64013, + Name: "gogoproto.equal", + Tag: "varint,64013,opt,name=equal", + Filename: "gogo.proto", +} + +var E_Description = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64014, + Name: "gogoproto.description", + Tag: "varint,64014,opt,name=description", + Filename: "gogo.proto", +} + +var E_Testgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64015, + Name: "gogoproto.testgen", + Tag: "varint,64015,opt,name=testgen", + Filename: "gogo.proto", +} + +var E_Benchgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64016, + Name: "gogoproto.benchgen", + Tag: "varint,64016,opt,name=benchgen", + Filename: "gogo.proto", +} + +var E_Marshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64017, + Name: "gogoproto.marshaler", + Tag: "varint,64017,opt,name=marshaler", + Filename: "gogo.proto", +} + +var E_Unmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64018, + Name: "gogoproto.unmarshaler", + Tag: "varint,64018,opt,name=unmarshaler", + Filename: "gogo.proto", +} + +var E_StableMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64019, + Name: "gogoproto.stable_marshaler", + Tag: "varint,64019,opt,name=stable_marshaler", + Filename: "gogo.proto", +} + +var E_Sizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64020, + Name: "gogoproto.sizer", + Tag: "varint,64020,opt,name=sizer", + Filename: "gogo.proto", +} + +var E_UnsafeMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64023, + Name: "gogoproto.unsafe_marshaler", + Tag: "varint,64023,opt,name=unsafe_marshaler", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64024, + Name: "gogoproto.unsafe_unmarshaler", + Tag: "varint,64024,opt,name=unsafe_unmarshaler", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64025, + Name: "gogoproto.goproto_extensions_map", + Tag: "varint,64025,opt,name=goproto_extensions_map", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognized = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64026, + Name: "gogoproto.goproto_unrecognized", + Tag: "varint,64026,opt,name=goproto_unrecognized", + Filename: "gogo.proto", +} + +var E_Protosizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64028, + Name: "gogoproto.protosizer", + Tag: "varint,64028,opt,name=protosizer", + Filename: "gogo.proto", +} + +var E_Compare = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64029, + Name: "gogoproto.compare", + Tag: "varint,64029,opt,name=compare", + Filename: "gogo.proto", +} + +var E_Typedecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64030, + Name: "gogoproto.typedecl", + Tag: "varint,64030,opt,name=typedecl", + Filename: "gogo.proto", +} + +var E_Messagename = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64033, + Name: "gogoproto.messagename", + Tag: "varint,64033,opt,name=messagename", + Filename: "gogo.proto", +} + +var E_GoprotoSizecache = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64034, + Name: "gogoproto.goproto_sizecache", + Tag: "varint,64034,opt,name=goproto_sizecache", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64035, + Name: "gogoproto.goproto_unkeyed", + Tag: "varint,64035,opt,name=goproto_unkeyed", + Filename: "gogo.proto", +} + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65001, + Name: "gogoproto.nullable", + Tag: "varint,65001,opt,name=nullable", + Filename: "gogo.proto", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65002, + Name: "gogoproto.embed", + Tag: "varint,65002,opt,name=embed", + Filename: "gogo.proto", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65003, + Name: "gogoproto.customtype", + Tag: "bytes,65003,opt,name=customtype", + Filename: "gogo.proto", +} + +var E_Customname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65004, + Name: "gogoproto.customname", + Tag: "bytes,65004,opt,name=customname", + Filename: "gogo.proto", +} + +var E_Jsontag = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65005, + Name: "gogoproto.jsontag", + Tag: "bytes,65005,opt,name=jsontag", + Filename: "gogo.proto", +} + +var E_Moretags = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65006, + Name: "gogoproto.moretags", + Tag: "bytes,65006,opt,name=moretags", + Filename: "gogo.proto", +} + +var E_Casttype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65007, + Name: "gogoproto.casttype", + Tag: "bytes,65007,opt,name=casttype", + Filename: "gogo.proto", +} + +var E_Castkey = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65008, + Name: "gogoproto.castkey", + Tag: "bytes,65008,opt,name=castkey", + Filename: "gogo.proto", +} + +var E_Castvalue = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65009, + Name: "gogoproto.castvalue", + Tag: "bytes,65009,opt,name=castvalue", + Filename: "gogo.proto", +} + +var E_Stdtime = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65010, + Name: "gogoproto.stdtime", + Tag: "varint,65010,opt,name=stdtime", + Filename: "gogo.proto", +} + +var E_Stdduration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65011, + Name: "gogoproto.stdduration", + Tag: "varint,65011,opt,name=stdduration", + Filename: "gogo.proto", +} + +var E_Wktpointer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65012, + Name: "gogoproto.wktpointer", + Tag: "varint,65012,opt,name=wktpointer", + Filename: "gogo.proto", +} + +func init() { + proto.RegisterExtension(E_GoprotoEnumPrefix) + proto.RegisterExtension(E_GoprotoEnumStringer) + proto.RegisterExtension(E_EnumStringer) + proto.RegisterExtension(E_EnumCustomname) + proto.RegisterExtension(E_Enumdecl) + proto.RegisterExtension(E_EnumvalueCustomname) + proto.RegisterExtension(E_GoprotoGettersAll) + proto.RegisterExtension(E_GoprotoEnumPrefixAll) + proto.RegisterExtension(E_GoprotoStringerAll) + proto.RegisterExtension(E_VerboseEqualAll) + proto.RegisterExtension(E_FaceAll) + proto.RegisterExtension(E_GostringAll) + proto.RegisterExtension(E_PopulateAll) + proto.RegisterExtension(E_StringerAll) + proto.RegisterExtension(E_OnlyoneAll) + proto.RegisterExtension(E_EqualAll) + proto.RegisterExtension(E_DescriptionAll) + proto.RegisterExtension(E_TestgenAll) + proto.RegisterExtension(E_BenchgenAll) + proto.RegisterExtension(E_MarshalerAll) + proto.RegisterExtension(E_UnmarshalerAll) + proto.RegisterExtension(E_StableMarshalerAll) + proto.RegisterExtension(E_SizerAll) + proto.RegisterExtension(E_GoprotoEnumStringerAll) + proto.RegisterExtension(E_EnumStringerAll) + proto.RegisterExtension(E_UnsafeMarshalerAll) + proto.RegisterExtension(E_UnsafeUnmarshalerAll) + proto.RegisterExtension(E_GoprotoExtensionsMapAll) + proto.RegisterExtension(E_GoprotoUnrecognizedAll) + proto.RegisterExtension(E_GogoprotoImport) + proto.RegisterExtension(E_ProtosizerAll) + proto.RegisterExtension(E_CompareAll) + proto.RegisterExtension(E_TypedeclAll) + proto.RegisterExtension(E_EnumdeclAll) + proto.RegisterExtension(E_GoprotoRegistration) + proto.RegisterExtension(E_MessagenameAll) + proto.RegisterExtension(E_GoprotoSizecacheAll) + proto.RegisterExtension(E_GoprotoUnkeyedAll) + proto.RegisterExtension(E_GoprotoGetters) + proto.RegisterExtension(E_GoprotoStringer) + proto.RegisterExtension(E_VerboseEqual) + proto.RegisterExtension(E_Face) + proto.RegisterExtension(E_Gostring) + proto.RegisterExtension(E_Populate) + proto.RegisterExtension(E_Stringer) + proto.RegisterExtension(E_Onlyone) + proto.RegisterExtension(E_Equal) + proto.RegisterExtension(E_Description) + proto.RegisterExtension(E_Testgen) + proto.RegisterExtension(E_Benchgen) + proto.RegisterExtension(E_Marshaler) + proto.RegisterExtension(E_Unmarshaler) + proto.RegisterExtension(E_StableMarshaler) + proto.RegisterExtension(E_Sizer) + proto.RegisterExtension(E_UnsafeMarshaler) + proto.RegisterExtension(E_UnsafeUnmarshaler) + proto.RegisterExtension(E_GoprotoExtensionsMap) + proto.RegisterExtension(E_GoprotoUnrecognized) + proto.RegisterExtension(E_Protosizer) + proto.RegisterExtension(E_Compare) + proto.RegisterExtension(E_Typedecl) + proto.RegisterExtension(E_Messagename) + proto.RegisterExtension(E_GoprotoSizecache) + proto.RegisterExtension(E_GoprotoUnkeyed) + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) + proto.RegisterExtension(E_Customname) + proto.RegisterExtension(E_Jsontag) + proto.RegisterExtension(E_Moretags) + proto.RegisterExtension(E_Casttype) + proto.RegisterExtension(E_Castkey) + proto.RegisterExtension(E_Castvalue) + proto.RegisterExtension(E_Stdtime) + proto.RegisterExtension(E_Stdduration) + proto.RegisterExtension(E_Wktpointer) +} + +func init() { proto.RegisterFile("gogo.proto", fileDescriptor_592445b5231bc2b9) } + +var fileDescriptor_592445b5231bc2b9 = []byte{ + // 1328 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45, + 0x14, 0x80, 0x85, 0x48, 0x64, 0x4f, 0x79, 0x8b, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9, + 0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x0e, 0xc6, 0x89, 0xc3, 0x76, 0x18, + 0xf5, 0xf4, 0x94, 0xdb, 0x8d, 0xbb, 0xbb, 0x9a, 0xee, 0xea, 0x10, 0xe7, 0x86, 0xc2, 0x22, 0x84, + 0xd8, 0x91, 0x20, 0x21, 0x09, 0x04, 0xc4, 0xbe, 0x86, 0x7d, 0xb9, 0x70, 0x61, 0xb9, 0xf2, 0x1f, + 0xb8, 0x00, 0x66, 0xf7, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x69, 0x8f, 0x54, 0x35, 0xb7, + 0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xef, 0x4d, 0x33, 0xe6, 0x49, 0x4f, 0x4e, 0xc6, + 0x89, 0x54, 0xb2, 0x5e, 0x83, 0xeb, 0xfc, 0x72, 0xdf, 0x7e, 0x4f, 0x4a, 0x2f, 0x10, 0x53, 0xf9, + 0x5f, 0xcd, 0x6c, 0x75, 0xaa, 0x25, 0x52, 0x37, 0xf1, 0x63, 0x25, 0x93, 0x62, 0x31, 0x3f, 0xc6, + 0xc6, 0x70, 0x71, 0x43, 0x44, 0x59, 0xd8, 0x88, 0x13, 0xb1, 0xea, 0x9f, 0xae, 0x5f, 0x3f, 0x59, + 0x90, 0x93, 0x44, 0x4e, 0xce, 0x47, 0x59, 0x78, 0x47, 0xac, 0x7c, 0x19, 0xa5, 0x7b, 0xaf, 0xfc, + 0x72, 0xf5, 0xfe, 0xab, 0x6e, 0xe9, 0x5f, 0x1e, 0x45, 0x14, 0xfe, 0xb7, 0x94, 0x83, 0x7c, 0x99, + 0x5d, 0xd3, 0xe1, 0x4b, 0x55, 0xe2, 0x47, 0x9e, 0x48, 0x0c, 0xc6, 0xef, 0xd1, 0x38, 0xa6, 0x19, + 0x8f, 0x23, 0xca, 0xe7, 0xd8, 0x50, 0x2f, 0xae, 0x1f, 0xd0, 0x35, 0x28, 0x74, 0xc9, 0x02, 0x1b, + 0xc9, 0x25, 0x6e, 0x96, 0x2a, 0x19, 0x46, 0x4e, 0x28, 0x0c, 0x9a, 0x1f, 0x73, 0x4d, 0x6d, 0x79, + 0x18, 0xb0, 0xb9, 0x92, 0xe2, 0x9c, 0xf5, 0xc3, 0x27, 0x2d, 0xe1, 0x06, 0x06, 0xc3, 0x4f, 0xb8, + 0x91, 0x72, 0x3d, 0x3f, 0xc9, 0xc6, 0xe1, 0xfa, 0x94, 0x13, 0x64, 0x42, 0xdf, 0xc9, 0x4d, 0x5d, + 0x3d, 0x27, 0x61, 0x19, 0xc9, 0x7e, 0x3e, 0xbb, 0x2b, 0xdf, 0xce, 0x58, 0x29, 0xd0, 0xf6, 0xa4, + 0x65, 0xd1, 0x13, 0x4a, 0x89, 0x24, 0x6d, 0x38, 0x41, 0xb7, 0xed, 0x1d, 0xf1, 0x83, 0xd2, 0x78, + 0x6e, 0xb3, 0x33, 0x8b, 0x0b, 0x05, 0x39, 0x1b, 0x04, 0x7c, 0x85, 0x5d, 0xdb, 0xe5, 0xa9, 0xb0, + 0x70, 0x9e, 0x47, 0xe7, 0xf8, 0x8e, 0x27, 0x03, 0xb4, 0x4b, 0x8c, 0x3e, 0x2f, 0x73, 0x69, 0xe1, + 0x7c, 0x19, 0x9d, 0x75, 0x64, 0x29, 0xa5, 0x60, 0xbc, 0x8d, 0x8d, 0x9e, 0x12, 0x49, 0x53, 0xa6, + 0xa2, 0x21, 0x1e, 0xc8, 0x9c, 0xc0, 0x42, 0x77, 0x01, 0x75, 0x23, 0x08, 0xce, 0x03, 0x07, 0xae, + 0x83, 0xac, 0x7f, 0xd5, 0x71, 0x85, 0x85, 0xe2, 0x22, 0x2a, 0xfa, 0x60, 0x3d, 0xa0, 0xb3, 0x6c, + 0xd0, 0x93, 0xc5, 0x2d, 0x59, 0xe0, 0x97, 0x10, 0x1f, 0x20, 0x06, 0x15, 0xb1, 0x8c, 0xb3, 0xc0, + 0x51, 0x36, 0x3b, 0x78, 0x85, 0x14, 0xc4, 0xa0, 0xa2, 0x87, 0xb0, 0xbe, 0x4a, 0x8a, 0x54, 0x8b, + 0xe7, 0x0c, 0x1b, 0x90, 0x51, 0xb0, 0x21, 0x23, 0x9b, 0x4d, 0x5c, 0x46, 0x03, 0x43, 0x04, 0x04, + 0xd3, 0xac, 0x66, 0x9b, 0x88, 0x37, 0x36, 0xe9, 0x78, 0x50, 0x06, 0x16, 0xd8, 0x08, 0x15, 0x28, + 0x5f, 0x46, 0x16, 0x8a, 0x37, 0x51, 0x31, 0xac, 0x61, 0x78, 0x1b, 0x4a, 0xa4, 0xca, 0x13, 0x36, + 0x92, 0xb7, 0xe8, 0x36, 0x10, 0xc1, 0x50, 0x36, 0x45, 0xe4, 0xae, 0xd9, 0x19, 0xde, 0xa6, 0x50, + 0x12, 0x03, 0x8a, 0x39, 0x36, 0x14, 0x3a, 0x49, 0xba, 0xe6, 0x04, 0x56, 0xe9, 0x78, 0x07, 0x1d, + 0x83, 0x25, 0x84, 0x11, 0xc9, 0xa2, 0x5e, 0x34, 0xef, 0x52, 0x44, 0x34, 0x0c, 0x8f, 0x5e, 0xaa, + 0x9c, 0x66, 0x20, 0x1a, 0xbd, 0xd8, 0xde, 0xa3, 0xa3, 0x57, 0xb0, 0x8b, 0xba, 0x71, 0x9a, 0xd5, + 0x52, 0xff, 0x8c, 0x95, 0xe6, 0x7d, 0xca, 0x74, 0x0e, 0x00, 0x7c, 0x0f, 0xbb, 0xae, 0x6b, 0x9b, + 0xb0, 0x90, 0x7d, 0x80, 0xb2, 0x89, 0x2e, 0xad, 0x02, 0x4b, 0x42, 0xaf, 0xca, 0x0f, 0xa9, 0x24, + 0x88, 0x8a, 0x6b, 0x89, 0x8d, 0x67, 0x51, 0xea, 0xac, 0xf6, 0x16, 0xb5, 0x8f, 0x28, 0x6a, 0x05, + 0xdb, 0x11, 0xb5, 0x13, 0x6c, 0x02, 0x8d, 0xbd, 0xe5, 0xf5, 0x63, 0x2a, 0xac, 0x05, 0xbd, 0xd2, + 0x99, 0xdd, 0xfb, 0xd8, 0xbe, 0x32, 0x9c, 0xa7, 0x95, 0x88, 0x52, 0x60, 0x1a, 0xa1, 0x13, 0x5b, + 0x98, 0xaf, 0xa0, 0x99, 0x2a, 0xfe, 0x7c, 0x29, 0x58, 0x74, 0x62, 0x90, 0xdf, 0xcd, 0xf6, 0x92, + 0x3c, 0x8b, 0x12, 0xe1, 0x4a, 0x2f, 0xf2, 0xcf, 0x88, 0x96, 0x85, 0xfa, 0x93, 0x4a, 0xaa, 0x56, + 0x34, 0x1c, 0xcc, 0x47, 0xd9, 0x9e, 0x72, 0x56, 0x69, 0xf8, 0x61, 0x2c, 0x13, 0x65, 0x30, 0x7e, + 0x4a, 0x99, 0x2a, 0xb9, 0xa3, 0x39, 0xc6, 0xe7, 0xd9, 0x70, 0xfe, 0xa7, 0xed, 0x23, 0xf9, 0x19, + 0x8a, 0x86, 0xda, 0x14, 0x16, 0x0e, 0x57, 0x86, 0xb1, 0x93, 0xd8, 0xd4, 0xbf, 0xcf, 0xa9, 0x70, + 0x20, 0x82, 0x85, 0x43, 0x6d, 0xc4, 0x02, 0xba, 0xbd, 0x85, 0xe1, 0x0b, 0x2a, 0x1c, 0xc4, 0xa0, + 0x82, 0x06, 0x06, 0x0b, 0xc5, 0x97, 0xa4, 0x20, 0x06, 0x14, 0x77, 0xb6, 0x1b, 0x6d, 0x22, 0x3c, + 0x3f, 0x55, 0x89, 0x03, 0xab, 0x0d, 0xaa, 0xaf, 0x36, 0x3b, 0x87, 0xb0, 0x65, 0x0d, 0x85, 0x4a, + 0x14, 0x8a, 0x34, 0x75, 0x3c, 0x01, 0x13, 0x87, 0xc5, 0xc6, 0xbe, 0xa6, 0x4a, 0xa4, 0x61, 0xb0, + 0x37, 0x6d, 0x42, 0x84, 0xb0, 0xbb, 0x8e, 0xbb, 0x66, 0xa3, 0xfb, 0xa6, 0xb2, 0xb9, 0xe3, 0xc4, + 0x82, 0x53, 0x9b, 0x7f, 0xb2, 0x68, 0x5d, 0x6c, 0x58, 0x3d, 0x9d, 0xdf, 0x56, 0xe6, 0x9f, 0x95, + 0x82, 0x2c, 0x6a, 0xc8, 0x48, 0x65, 0x9e, 0xaa, 0xdf, 0xb8, 0xc3, 0xb5, 0x58, 0xdc, 0x17, 0xe9, + 0x1e, 0xda, 0xc2, 0xfb, 0xed, 0x1c, 0xa7, 0xf8, 0xed, 0xf0, 0x90, 0x77, 0x0e, 0x3d, 0x66, 0xd9, + 0xd9, 0xad, 0xf2, 0x39, 0xef, 0x98, 0x79, 0xf8, 0x11, 0x36, 0xd4, 0x31, 0xf0, 0x98, 0x55, 0x0f, + 0xa3, 0x6a, 0x50, 0x9f, 0x77, 0xf8, 0x01, 0xb6, 0x0b, 0x86, 0x17, 0x33, 0xfe, 0x08, 0xe2, 0xf9, + 0x72, 0x7e, 0x88, 0xf5, 0xd3, 0xd0, 0x62, 0x46, 0x1f, 0x45, 0xb4, 0x44, 0x00, 0xa7, 0x81, 0xc5, + 0x8c, 0x3f, 0x46, 0x38, 0x21, 0x80, 0xdb, 0x87, 0xf0, 0xbb, 0x27, 0x76, 0x61, 0xd3, 0xa1, 0xd8, + 0x4d, 0xb3, 0x3e, 0x9c, 0x54, 0xcc, 0xf4, 0xe3, 0xf8, 0xe5, 0x44, 0xf0, 0x5b, 0xd9, 0x6e, 0xcb, + 0x80, 0x3f, 0x89, 0x68, 0xb1, 0x9e, 0xcf, 0xb1, 0x01, 0x6d, 0x3a, 0x31, 0xe3, 0x4f, 0x21, 0xae, + 0x53, 0xb0, 0x75, 0x9c, 0x4e, 0xcc, 0x82, 0xa7, 0x69, 0xeb, 0x48, 0x40, 0xd8, 0x68, 0x30, 0x31, + 0xd3, 0xcf, 0x50, 0xd4, 0x09, 0xe1, 0x33, 0xac, 0x56, 0x36, 0x1b, 0x33, 0xff, 0x2c, 0xf2, 0x6d, + 0x06, 0x22, 0xa0, 0x35, 0x3b, 0xb3, 0xe2, 0x39, 0x8a, 0x80, 0x46, 0xc1, 0x31, 0xaa, 0x0e, 0x30, + 0x66, 0xd3, 0xf3, 0x74, 0x8c, 0x2a, 0xf3, 0x0b, 0x64, 0x33, 0xaf, 0xf9, 0x66, 0xc5, 0x0b, 0x94, + 0xcd, 0x7c, 0x3d, 0x6c, 0xa3, 0x3a, 0x11, 0x98, 0x1d, 0x2f, 0xd2, 0x36, 0x2a, 0x03, 0x01, 0x5f, + 0x62, 0xf5, 0x9d, 0xd3, 0x80, 0xd9, 0xf7, 0x12, 0xfa, 0x46, 0x77, 0x0c, 0x03, 0xfc, 0x2e, 0x36, + 0xd1, 0x7d, 0x12, 0x30, 0x5b, 0xcf, 0x6d, 0x55, 0x7e, 0xbb, 0xe9, 0x83, 0x00, 0x3f, 0xd1, 0x6e, + 0x29, 0xfa, 0x14, 0x60, 0xd6, 0x9e, 0xdf, 0xea, 0x2c, 0xdc, 0xfa, 0x10, 0xc0, 0x67, 0x19, 0x6b, + 0x37, 0x60, 0xb3, 0xeb, 0x02, 0xba, 0x34, 0x08, 0x8e, 0x06, 0xf6, 0x5f, 0x33, 0x7f, 0x91, 0x8e, + 0x06, 0x12, 0x70, 0x34, 0xa8, 0xf5, 0x9a, 0xe9, 0x4b, 0x74, 0x34, 0x08, 0x81, 0x27, 0x5b, 0xeb, + 0x6e, 0x66, 0xc3, 0x65, 0x7a, 0xb2, 0x35, 0x8a, 0x1f, 0x63, 0xa3, 0x3b, 0x1a, 0xa2, 0x59, 0xf5, + 0x1a, 0xaa, 0xf6, 0x54, 0xfb, 0xa1, 0xde, 0xbc, 0xb0, 0x19, 0x9a, 0x6d, 0xaf, 0x57, 0x9a, 0x17, + 0xf6, 0x42, 0x3e, 0xcd, 0xfa, 0xa3, 0x2c, 0x08, 0xe0, 0xf0, 0xd4, 0x6f, 0xe8, 0xd2, 0x4d, 0x45, + 0xd0, 0x22, 0xc5, 0xaf, 0xdb, 0x18, 0x1d, 0x02, 0xf8, 0x01, 0xb6, 0x5b, 0x84, 0x4d, 0xd1, 0x32, + 0x91, 0xbf, 0x6d, 0x53, 0xc1, 0x84, 0xd5, 0x7c, 0x86, 0xb1, 0xe2, 0xd5, 0x08, 0x84, 0xd9, 0xc4, + 0xfe, 0xbe, 0x5d, 0xbc, 0xa5, 0xd1, 0x90, 0xb6, 0x20, 0x4f, 0x8a, 0x41, 0xb0, 0xd9, 0x29, 0xc8, + 0x33, 0x72, 0x90, 0xf5, 0xdd, 0x9f, 0xca, 0x48, 0x39, 0x9e, 0x89, 0xfe, 0x03, 0x69, 0x5a, 0x0f, + 0x01, 0x0b, 0x65, 0x22, 0x94, 0xe3, 0xa5, 0x26, 0xf6, 0x4f, 0x64, 0x4b, 0x00, 0x60, 0xd7, 0x49, + 0x95, 0xcd, 0x7d, 0xff, 0x45, 0x30, 0x01, 0xb0, 0x69, 0xb8, 0x5e, 0x17, 0x1b, 0x26, 0xf6, 0x6f, + 0xda, 0x34, 0xae, 0xe7, 0x87, 0x58, 0x0d, 0x2e, 0xf3, 0xb7, 0x4a, 0x26, 0xf8, 0x1f, 0x84, 0xdb, + 0x04, 0x7c, 0x73, 0xaa, 0x5a, 0xca, 0x37, 0x07, 0xfb, 0x5f, 0xcc, 0x34, 0xad, 0xe7, 0xb3, 0x6c, + 0x20, 0x55, 0xad, 0x56, 0x86, 0xf3, 0xa9, 0x01, 0xff, 0x6f, 0xbb, 0x7c, 0x65, 0x51, 0x32, 0x90, + 0xed, 0x07, 0xd7, 0x55, 0x2c, 0xfd, 0x48, 0x89, 0xc4, 0x64, 0xd8, 0x42, 0x83, 0x86, 0x1c, 0x9e, + 0x67, 0x63, 0xae, 0x0c, 0xab, 0xdc, 0x61, 0xb6, 0x20, 0x17, 0xe4, 0x52, 0x5e, 0x67, 0xee, 0xbd, + 0xd9, 0xf3, 0xd5, 0x5a, 0xd6, 0x9c, 0x74, 0x65, 0x38, 0x05, 0xbf, 0x3c, 0xda, 0x2f, 0x54, 0xcb, + 0xdf, 0x21, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x70, 0x4e, 0x83, 0x15, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden new file mode 100644 index 000000000..f6502e4b9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden @@ -0,0 +1,45 @@ +// Code generated by protoc-gen-go. +// source: gogo.proto +// DO NOT EDIT! + +package gogoproto + +import proto "github.com/gogo/protobuf/proto" +import json "encoding/json" +import math "math" +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +// Reference proto, json, and math imports to suppress error if they are not otherwise used. +var _ = proto.Marshal +var _ = &json.SyntaxError{} +var _ = math.Inf + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51235, + Name: "gogoproto.nullable", + Tag: "varint,51235,opt,name=nullable", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51236, + Name: "gogoproto.embed", + Tag: "varint,51236,opt,name=embed", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 51237, + Name: "gogoproto.customtype", + Tag: "bytes,51237,opt,name=customtype", +} + +func init() { + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto new file mode 100644 index 000000000..b80c85653 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto @@ -0,0 +1,144 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; +package gogoproto; + +import "google/protobuf/descriptor.proto"; + +option java_package = "com.google.protobuf"; +option java_outer_classname = "GoGoProtos"; +option go_package = "github.com/gogo/protobuf/gogoproto"; + +extend google.protobuf.EnumOptions { + optional bool goproto_enum_prefix = 62001; + optional bool goproto_enum_stringer = 62021; + optional bool enum_stringer = 62022; + optional string enum_customname = 62023; + optional bool enumdecl = 62024; +} + +extend google.protobuf.EnumValueOptions { + optional string enumvalue_customname = 66001; +} + +extend google.protobuf.FileOptions { + optional bool goproto_getters_all = 63001; + optional bool goproto_enum_prefix_all = 63002; + optional bool goproto_stringer_all = 63003; + optional bool verbose_equal_all = 63004; + optional bool face_all = 63005; + optional bool gostring_all = 63006; + optional bool populate_all = 63007; + optional bool stringer_all = 63008; + optional bool onlyone_all = 63009; + + optional bool equal_all = 63013; + optional bool description_all = 63014; + optional bool testgen_all = 63015; + optional bool benchgen_all = 63016; + optional bool marshaler_all = 63017; + optional bool unmarshaler_all = 63018; + optional bool stable_marshaler_all = 63019; + + optional bool sizer_all = 63020; + + optional bool goproto_enum_stringer_all = 63021; + optional bool enum_stringer_all = 63022; + + optional bool unsafe_marshaler_all = 63023; + optional bool unsafe_unmarshaler_all = 63024; + + optional bool goproto_extensions_map_all = 63025; + optional bool goproto_unrecognized_all = 63026; + optional bool gogoproto_import = 63027; + optional bool protosizer_all = 63028; + optional bool compare_all = 63029; + optional bool typedecl_all = 63030; + optional bool enumdecl_all = 63031; + + optional bool goproto_registration = 63032; + optional bool messagename_all = 63033; + + optional bool goproto_sizecache_all = 63034; + optional bool goproto_unkeyed_all = 63035; +} + +extend google.protobuf.MessageOptions { + optional bool goproto_getters = 64001; + optional bool goproto_stringer = 64003; + optional bool verbose_equal = 64004; + optional bool face = 64005; + optional bool gostring = 64006; + optional bool populate = 64007; + optional bool stringer = 67008; + optional bool onlyone = 64009; + + optional bool equal = 64013; + optional bool description = 64014; + optional bool testgen = 64015; + optional bool benchgen = 64016; + optional bool marshaler = 64017; + optional bool unmarshaler = 64018; + optional bool stable_marshaler = 64019; + + optional bool sizer = 64020; + + optional bool unsafe_marshaler = 64023; + optional bool unsafe_unmarshaler = 64024; + + optional bool goproto_extensions_map = 64025; + optional bool goproto_unrecognized = 64026; + + optional bool protosizer = 64028; + optional bool compare = 64029; + + optional bool typedecl = 64030; + + optional bool messagename = 64033; + + optional bool goproto_sizecache = 64034; + optional bool goproto_unkeyed = 64035; +} + +extend google.protobuf.FieldOptions { + optional bool nullable = 65001; + optional bool embed = 65002; + optional string customtype = 65003; + optional string customname = 65004; + optional string jsontag = 65005; + optional string moretags = 65006; + optional string casttype = 65007; + optional string castkey = 65008; + optional string castvalue = 65009; + + optional bool stdtime = 65010; + optional bool stdduration = 65011; + optional bool wktpointer = 65012; + +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go new file mode 100644 index 000000000..390d4e4be --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/helper.go @@ -0,0 +1,415 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gogoproto + +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +import proto "github.com/gogo/protobuf/proto" + +func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Embed, false) +} + +func IsNullable(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Nullable, true) +} + +func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdtime, false) +} + +func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdduration, false) +} + +func IsStdDouble(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.DoubleValue" +} + +func IsStdFloat(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.FloatValue" +} + +func IsStdInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int64Value" +} + +func IsStdUInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt64Value" +} + +func IsStdInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int32Value" +} + +func IsStdUInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt32Value" +} + +func IsStdBool(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BoolValue" +} + +func IsStdString(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.StringValue" +} + +func IsStdBytes(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BytesValue" +} + +func IsStdType(field *google_protobuf.FieldDescriptorProto) bool { + return (IsStdTime(field) || IsStdDuration(field) || + IsStdDouble(field) || IsStdFloat(field) || + IsStdInt64(field) || IsStdUInt64(field) || + IsStdInt32(field) || IsStdUInt32(field) || + IsStdBool(field) || + IsStdString(field) || IsStdBytes(field)) +} + +func IsWktPtr(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) +} + +func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { + nullable := IsNullable(field) + if field.IsMessage() || IsCustomType(field) { + return nullable + } + if proto3 { + return false + } + return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES +} + +func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCustomType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastKey(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastValue(field) + if len(typ) > 0 { + return true + } + return false +} + +func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true)) +} + +func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true)) +} + +func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customtype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Casttype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castkey) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastValue(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castvalue) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool { + name := GetCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool { + name := GetEnumCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool { + name := GetEnumValueCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Jsontag) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Moretags) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool + +func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true)) +} + +func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true)) +} + +func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true)) +} + +func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false)) +} + +func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false)) +} + +func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false)) +} + +func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false)) +} + +func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false)) +} + +func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false)) +} + +func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false)) +} + +func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false)) +} + +func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false)) +} + +func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false)) +} + +func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false)) +} + +func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false)) +} + +func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false)) +} + +func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false)) +} + +func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false)) +} + +func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true)) +} + +func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false)) +} + +func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false)) +} + +func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false)) +} + +func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true)) +} + +func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true)) +} + +func IsProto3(file *google_protobuf.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true) +} + +func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false)) +} + +func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false) +} + +func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false)) +} + +func HasSizecache(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoSizecache, proto.GetBoolExtension(file.Options, E_GoprotoSizecacheAll, true)) +} + +func HasUnkeyed(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnkeyed, proto.GetBoolExtension(file.Options, E_GoprotoUnkeyedAll, true)) +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile new file mode 100644 index 000000000..3496dc99d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile @@ -0,0 +1,36 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + go install github.com/gogo/protobuf/protoc-gen-gostring + protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto + protoc --gostring_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go new file mode 100644 index 000000000..a85bf1984 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go @@ -0,0 +1,118 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package descriptor provides functions for obtaining protocol buffer +// descriptors for generated Go types. +// +// These functions cannot go in package proto because they depend on the +// generated protobuf descriptor messages, which themselves depend on proto. +package descriptor + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + + "github.com/gogo/protobuf/proto" +) + +// extractFile extracts a FileDescriptorProto from a gzip'd buffer. +func extractFile(gz []byte) (*FileDescriptorProto, error) { + r, err := gzip.NewReader(bytes.NewReader(gz)) + if err != nil { + return nil, fmt.Errorf("failed to open gzip reader: %v", err) + } + defer r.Close() + + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) + } + + fd := new(FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) + } + + return fd, nil +} + +// Message is a proto.Message with a method to return its descriptor. +// +// Message types generated by the protocol compiler always satisfy +// the Message interface. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it +// describing the given message. +func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) { + gz, path := msg.Descriptor() + fd, err := extractFile(gz) + if err != nil { + panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) + } + + md = fd.MessageType[path[0]] + for _, i := range path[1:] { + md = md.NestedType[i] + } + return fd, md +} + +// Is this field a scalar numeric type? +func (field *FieldDescriptorProto) IsScalar() bool { + if field.Type == nil { + return false + } + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE, + FieldDescriptorProto_TYPE_FLOAT, + FieldDescriptorProto_TYPE_INT64, + FieldDescriptorProto_TYPE_UINT64, + FieldDescriptorProto_TYPE_INT32, + FieldDescriptorProto_TYPE_FIXED64, + FieldDescriptorProto_TYPE_FIXED32, + FieldDescriptorProto_TYPE_BOOL, + FieldDescriptorProto_TYPE_UINT32, + FieldDescriptorProto_TYPE_ENUM, + FieldDescriptorProto_TYPE_SFIXED32, + FieldDescriptorProto_TYPE_SFIXED64, + FieldDescriptorProto_TYPE_SINT32, + FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go new file mode 100644 index 000000000..18b2a3318 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -0,0 +1,2865 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} + +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} + +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} + +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} + +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} + +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} + +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} + +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} + +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} + +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} + +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} + +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} + +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} + +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} + +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} + +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} + +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} + +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} + +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} + +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} + +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} + +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} + +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} + +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} + +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{0} +} +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (m *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(m, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{1} +} +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(m, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2} +} +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (m *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(m, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 0} +} +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 1} +} +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) +} +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{3} +} + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(m, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4} +} +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(m, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{5} +} +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(m, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6} +} +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(m, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6, 0} +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{7} +} +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{8} +} +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(m, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{9} +} +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(m, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. + PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10} +} + +var extRange_FileOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (m *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(m, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetPhpMetadataNamespace() string { + if m != nil && m.PhpMetadataNamespace != nil { + return *m.PhpMetadataNamespace + } + return "" +} + +func (m *FileOptions) GetRubyPackage() string { + if m != nil && m.RubyPackage != nil { + return *m.RubyPackage + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{11} +} + +var extRange_MessageOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (m *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(m, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12} +} + +var extRange_FieldOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (m *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(m, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{13} +} + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (m *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(m, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{14} +} + +var extRange_EnumOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (m *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(m, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{15} +} + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (m *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(m, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{16} +} + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (m *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(m, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17} +} + +var extRange_MethodOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (m *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(m, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18} +} +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(m, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18, 0} +} +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19} +} +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(m, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19, 0} +} +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src) +} +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20} +} +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(m, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20, 0} +} +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") +} + +func init() { proto.RegisterFile("descriptor.proto", fileDescriptor_308767df5ffe18af) } + +var fileDescriptor_308767df5ffe18af = []byte{ + // 2522 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, + 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0x65, 0x7a, 0xec, 0x75, 0x18, 0xef, 0x47, 0x1c, 0xed, 0x66, + 0xe3, 0x24, 0xbb, 0xca, 0xc2, 0x49, 0x9c, 0xac, 0x53, 0x6c, 0x2b, 0x4b, 0x8c, 0x57, 0xa9, 0xbe, + 0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, + 0x83, 0x1e, 0x02, 0xf4, 0x54, 0xa0, 0x7f, 0x40, 0x51, 0x14, 0x3d, 0xf4, 0xb2, 0x40, 0xff, 0x80, + 0x02, 0xed, 0xbd, 0xd7, 0x02, 0xbd, 0xf7, 0x50, 0xa0, 0x05, 0xda, 0x3f, 0xa1, 0xc7, 0x62, 0x66, + 0x48, 0x8a, 0xd4, 0x47, 0xe2, 0x5d, 0x20, 0xd9, 0x93, 0x3d, 0xef, 0xfd, 0xde, 0x9b, 0x37, 0x8f, + 0xbf, 0x79, 0xf3, 0x66, 0x04, 0x82, 0x46, 0x5c, 0xd5, 0xd1, 0x6d, 0xcf, 0x72, 0x2a, 0xb6, 0x63, + 0x79, 0x16, 0x5a, 0x1b, 0x5a, 0xd6, 0xd0, 0x20, 0x7c, 0x74, 0x32, 0x19, 0x94, 0x5b, 0xb0, 0x7e, + 0x4f, 0x37, 0x48, 0x3d, 0x04, 0xf6, 0x88, 0x87, 0xee, 0x40, 0x7a, 0xa0, 0x1b, 0x44, 0x4c, 0xec, + 0xa4, 0x76, 0x0b, 0x7b, 0x1f, 0x56, 0x66, 0x8c, 0x2a, 0x71, 0x8b, 0x2e, 0x15, 0xcb, 0xcc, 0xa2, + 0xfc, 0xef, 0x34, 0x6c, 0x2c, 0xd0, 0x22, 0x04, 0x69, 0x13, 0x8f, 0xa9, 0xc7, 0xc4, 0x6e, 0x5e, + 0x66, 0xff, 0x23, 0x11, 0x56, 0x6c, 0xac, 0x3e, 0xc6, 0x43, 0x22, 0x26, 0x99, 0x38, 0x18, 0xa2, + 0xf7, 0x01, 0x34, 0x62, 0x13, 0x53, 0x23, 0xa6, 0x7a, 0x2a, 0xa6, 0x76, 0x52, 0xbb, 0x79, 0x39, + 0x22, 0x41, 0xd7, 0x60, 0xdd, 0x9e, 0x9c, 0x18, 0xba, 0xaa, 0x44, 0x60, 0xb0, 0x93, 0xda, 0xcd, + 0xc8, 0x02, 0x57, 0xd4, 0xa7, 0xe0, 0xcb, 0xb0, 0xf6, 0x94, 0xe0, 0xc7, 0x51, 0x68, 0x81, 0x41, + 0x4b, 0x54, 0x1c, 0x01, 0xd6, 0xa0, 0x38, 0x26, 0xae, 0x8b, 0x87, 0x44, 0xf1, 0x4e, 0x6d, 0x22, + 0xa6, 0xd9, 0xea, 0x77, 0xe6, 0x56, 0x3f, 0xbb, 0xf2, 0x82, 0x6f, 0xd5, 0x3f, 0xb5, 0x09, 0xaa, + 0x42, 0x9e, 0x98, 0x93, 0x31, 0xf7, 0x90, 0x59, 0x92, 0x3f, 0xc9, 0x9c, 0x8c, 0x67, 0xbd, 0xe4, + 0xa8, 0x99, 0xef, 0x62, 0xc5, 0x25, 0xce, 0x13, 0x5d, 0x25, 0x62, 0x96, 0x39, 0xb8, 0x3c, 0xe7, + 0xa0, 0xc7, 0xf5, 0xb3, 0x3e, 0x02, 0x3b, 0x54, 0x83, 0x3c, 0x79, 0xe6, 0x11, 0xd3, 0xd5, 0x2d, + 0x53, 0x5c, 0x61, 0x4e, 0x2e, 0x2d, 0xf8, 0x8a, 0xc4, 0xd0, 0x66, 0x5d, 0x4c, 0xed, 0xd0, 0x3e, + 0xac, 0x58, 0xb6, 0xa7, 0x5b, 0xa6, 0x2b, 0xe6, 0x76, 0x12, 0xbb, 0x85, 0xbd, 0x77, 0x17, 0x12, + 0xa1, 0xc3, 0x31, 0x72, 0x00, 0x46, 0x0d, 0x10, 0x5c, 0x6b, 0xe2, 0xa8, 0x44, 0x51, 0x2d, 0x8d, + 0x28, 0xba, 0x39, 0xb0, 0xc4, 0x3c, 0x73, 0x70, 0x61, 0x7e, 0x21, 0x0c, 0x58, 0xb3, 0x34, 0xd2, + 0x30, 0x07, 0x96, 0x5c, 0x72, 0x63, 0x63, 0xb4, 0x05, 0x59, 0xf7, 0xd4, 0xf4, 0xf0, 0x33, 0xb1, + 0xc8, 0x18, 0xe2, 0x8f, 0xca, 0x7f, 0xce, 0xc2, 0xda, 0x59, 0x28, 0x76, 0x17, 0x32, 0x03, 0xba, + 0x4a, 0x31, 0xf9, 0x6d, 0x72, 0xc0, 0x6d, 0xe2, 0x49, 0xcc, 0x7e, 0xc7, 0x24, 0x56, 0xa1, 0x60, + 0x12, 0xd7, 0x23, 0x1a, 0x67, 0x44, 0xea, 0x8c, 0x9c, 0x02, 0x6e, 0x34, 0x4f, 0xa9, 0xf4, 0x77, + 0xa2, 0xd4, 0x03, 0x58, 0x0b, 0x43, 0x52, 0x1c, 0x6c, 0x0e, 0x03, 0x6e, 0x5e, 0x7f, 0x55, 0x24, + 0x15, 0x29, 0xb0, 0x93, 0xa9, 0x99, 0x5c, 0x22, 0xb1, 0x31, 0xaa, 0x03, 0x58, 0x26, 0xb1, 0x06, + 0x8a, 0x46, 0x54, 0x43, 0xcc, 0x2d, 0xc9, 0x52, 0x87, 0x42, 0xe6, 0xb2, 0x64, 0x71, 0xa9, 0x6a, + 0xa0, 0xcf, 0xa6, 0x54, 0x5b, 0x59, 0xc2, 0x94, 0x16, 0xdf, 0x64, 0x73, 0x6c, 0x3b, 0x86, 0x92, + 0x43, 0x28, 0xef, 0x89, 0xe6, 0xaf, 0x2c, 0xcf, 0x82, 0xa8, 0xbc, 0x72, 0x65, 0xb2, 0x6f, 0xc6, + 0x17, 0xb6, 0xea, 0x44, 0x87, 0xe8, 0x03, 0x08, 0x05, 0x0a, 0xa3, 0x15, 0xb0, 0x2a, 0x54, 0x0c, + 0x84, 0x6d, 0x3c, 0x26, 0xdb, 0xcf, 0xa1, 0x14, 0x4f, 0x0f, 0xda, 0x84, 0x8c, 0xeb, 0x61, 0xc7, + 0x63, 0x2c, 0xcc, 0xc8, 0x7c, 0x80, 0x04, 0x48, 0x11, 0x53, 0x63, 0x55, 0x2e, 0x23, 0xd3, 0x7f, + 0xd1, 0x8f, 0xa6, 0x0b, 0x4e, 0xb1, 0x05, 0x7f, 0x34, 0xff, 0x45, 0x63, 0x9e, 0x67, 0xd7, 0xbd, + 0x7d, 0x1b, 0x56, 0x63, 0x0b, 0x38, 0xeb, 0xd4, 0xe5, 0x5f, 0xc0, 0xdb, 0x0b, 0x5d, 0xa3, 0x07, + 0xb0, 0x39, 0x31, 0x75, 0xd3, 0x23, 0x8e, 0xed, 0x10, 0xca, 0x58, 0x3e, 0x95, 0xf8, 0x9f, 0x95, + 0x25, 0x9c, 0x3b, 0x8e, 0xa2, 0xb9, 0x17, 0x79, 0x63, 0x32, 0x2f, 0xbc, 0x9a, 0xcf, 0xfd, 0x77, + 0x45, 0x78, 0xf1, 0xe2, 0xc5, 0x8b, 0x64, 0xf9, 0x37, 0x59, 0xd8, 0x5c, 0xb4, 0x67, 0x16, 0x6e, + 0xdf, 0x2d, 0xc8, 0x9a, 0x93, 0xf1, 0x09, 0x71, 0x58, 0x92, 0x32, 0xb2, 0x3f, 0x42, 0x55, 0xc8, + 0x18, 0xf8, 0x84, 0x18, 0x62, 0x7a, 0x27, 0xb1, 0x5b, 0xda, 0xbb, 0x76, 0xa6, 0x5d, 0x59, 0x69, + 0x52, 0x13, 0x99, 0x5b, 0xa2, 0xcf, 0x21, 0xed, 0x97, 0x68, 0xea, 0xe1, 0xea, 0xd9, 0x3c, 0xd0, + 0xbd, 0x24, 0x33, 0x3b, 0xf4, 0x0e, 0xe4, 0xe9, 0x5f, 0xce, 0x8d, 0x2c, 0x8b, 0x39, 0x47, 0x05, + 0x94, 0x17, 0x68, 0x1b, 0x72, 0x6c, 0x9b, 0x68, 0x24, 0x38, 0xda, 0xc2, 0x31, 0x25, 0x96, 0x46, + 0x06, 0x78, 0x62, 0x78, 0xca, 0x13, 0x6c, 0x4c, 0x08, 0x23, 0x7c, 0x5e, 0x2e, 0xfa, 0xc2, 0x9f, + 0x52, 0x19, 0xba, 0x00, 0x05, 0xbe, 0xab, 0x74, 0x53, 0x23, 0xcf, 0x58, 0xf5, 0xcc, 0xc8, 0x7c, + 0xa3, 0x35, 0xa8, 0x84, 0x4e, 0xff, 0xc8, 0xb5, 0xcc, 0x80, 0x9a, 0x6c, 0x0a, 0x2a, 0x60, 0xd3, + 0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x5e, 0xde, 0x2c, 0xa7, 0xca, 0x7f, 0x4a, 0x42, 0x9a, 0xd5, + 0x8b, 0x35, 0x28, 0xf4, 0x1f, 0x76, 0x25, 0xa5, 0xde, 0x39, 0x3e, 0x6c, 0x4a, 0x42, 0x02, 0x95, + 0x00, 0x98, 0xe0, 0x5e, 0xb3, 0x53, 0xed, 0x0b, 0xc9, 0x70, 0xdc, 0x68, 0xf7, 0xf7, 0x6f, 0x0a, + 0xa9, 0xd0, 0xe0, 0x98, 0x0b, 0xd2, 0x51, 0xc0, 0x8d, 0x3d, 0x21, 0x83, 0x04, 0x28, 0x72, 0x07, + 0x8d, 0x07, 0x52, 0x7d, 0xff, 0xa6, 0x90, 0x8d, 0x4b, 0x6e, 0xec, 0x09, 0x2b, 0x68, 0x15, 0xf2, + 0x4c, 0x72, 0xd8, 0xe9, 0x34, 0x85, 0x5c, 0xe8, 0xb3, 0xd7, 0x97, 0x1b, 0xed, 0x23, 0x21, 0x1f, + 0xfa, 0x3c, 0x92, 0x3b, 0xc7, 0x5d, 0x01, 0x42, 0x0f, 0x2d, 0xa9, 0xd7, 0xab, 0x1e, 0x49, 0x42, + 0x21, 0x44, 0x1c, 0x3e, 0xec, 0x4b, 0x3d, 0xa1, 0x18, 0x0b, 0xeb, 0xc6, 0x9e, 0xb0, 0x1a, 0x4e, + 0x21, 0xb5, 0x8f, 0x5b, 0x42, 0x09, 0xad, 0xc3, 0x2a, 0x9f, 0x22, 0x08, 0x62, 0x6d, 0x46, 0xb4, + 0x7f, 0x53, 0x10, 0xa6, 0x81, 0x70, 0x2f, 0xeb, 0x31, 0xc1, 0xfe, 0x4d, 0x01, 0x95, 0x6b, 0x90, + 0x61, 0xec, 0x42, 0x08, 0x4a, 0xcd, 0xea, 0xa1, 0xd4, 0x54, 0x3a, 0xdd, 0x7e, 0xa3, 0xd3, 0xae, + 0x36, 0x85, 0xc4, 0x54, 0x26, 0x4b, 0x3f, 0x39, 0x6e, 0xc8, 0x52, 0x5d, 0x48, 0x46, 0x65, 0x5d, + 0xa9, 0xda, 0x97, 0xea, 0x42, 0xaa, 0xac, 0xc2, 0xe6, 0xa2, 0x3a, 0xb9, 0x70, 0x67, 0x44, 0x3e, + 0x71, 0x72, 0xc9, 0x27, 0x66, 0xbe, 0xe6, 0x3e, 0xf1, 0xbf, 0x92, 0xb0, 0xb1, 0xe0, 0xac, 0x58, + 0x38, 0xc9, 0x0f, 0x21, 0xc3, 0x29, 0xca, 0x4f, 0xcf, 0x2b, 0x0b, 0x0f, 0x1d, 0x46, 0xd8, 0xb9, + 0x13, 0x94, 0xd9, 0x45, 0x3b, 0x88, 0xd4, 0x92, 0x0e, 0x82, 0xba, 0x98, 0xab, 0xe9, 0x3f, 0x9f, + 0xab, 0xe9, 0xfc, 0xd8, 0xdb, 0x3f, 0xcb, 0xb1, 0xc7, 0x64, 0xdf, 0xae, 0xb6, 0x67, 0x16, 0xd4, + 0xf6, 0xbb, 0xb0, 0x3e, 0xe7, 0xe8, 0xcc, 0x35, 0xf6, 0x97, 0x09, 0x10, 0x97, 0x25, 0xe7, 0x15, + 0x95, 0x2e, 0x19, 0xab, 0x74, 0x77, 0x67, 0x33, 0x78, 0x71, 0xf9, 0x47, 0x98, 0xfb, 0xd6, 0xdf, + 0x24, 0x60, 0x6b, 0x71, 0xa7, 0xb8, 0x30, 0x86, 0xcf, 0x21, 0x3b, 0x26, 0xde, 0xc8, 0x0a, 0xba, + 0xa5, 0x8f, 0x16, 0x9c, 0xc1, 0x54, 0x3d, 0xfb, 0xb1, 0x7d, 0xab, 0xe8, 0x21, 0x9e, 0x5a, 0xd6, + 0xee, 0xf1, 0x68, 0xe6, 0x22, 0xfd, 0x55, 0x12, 0xde, 0x5e, 0xe8, 0x7c, 0x61, 0xa0, 0xef, 0x01, + 0xe8, 0xa6, 0x3d, 0xf1, 0x78, 0x47, 0xc4, 0x0b, 0x6c, 0x9e, 0x49, 0x58, 0xf1, 0xa2, 0xc5, 0x73, + 0xe2, 0x85, 0xfa, 0x14, 0xd3, 0x03, 0x17, 0x31, 0xc0, 0x9d, 0x69, 0xa0, 0x69, 0x16, 0xe8, 0xfb, + 0x4b, 0x56, 0x3a, 0x47, 0xcc, 0x4f, 0x41, 0x50, 0x0d, 0x9d, 0x98, 0x9e, 0xe2, 0x7a, 0x0e, 0xc1, + 0x63, 0xdd, 0x1c, 0xb2, 0x13, 0x24, 0x77, 0x90, 0x19, 0x60, 0xc3, 0x25, 0xf2, 0x1a, 0x57, 0xf7, + 0x02, 0x2d, 0xb5, 0x60, 0x04, 0x72, 0x22, 0x16, 0xd9, 0x98, 0x05, 0x57, 0x87, 0x16, 0xe5, 0x5f, + 0xe7, 0xa1, 0x10, 0xe9, 0xab, 0xd1, 0x45, 0x28, 0x3e, 0xc2, 0x4f, 0xb0, 0x12, 0xdc, 0x95, 0x78, + 0x26, 0x0a, 0x54, 0xd6, 0xf5, 0xef, 0x4b, 0x9f, 0xc2, 0x26, 0x83, 0x58, 0x13, 0x8f, 0x38, 0x8a, + 0x6a, 0x60, 0xd7, 0x65, 0x49, 0xcb, 0x31, 0x28, 0xa2, 0xba, 0x0e, 0x55, 0xd5, 0x02, 0x0d, 0xba, + 0x05, 0x1b, 0xcc, 0x62, 0x3c, 0x31, 0x3c, 0xdd, 0x36, 0x88, 0x42, 0x6f, 0x6f, 0x2e, 0x3b, 0x49, + 0xc2, 0xc8, 0xd6, 0x29, 0xa2, 0xe5, 0x03, 0x68, 0x44, 0x2e, 0xaa, 0xc3, 0x7b, 0xcc, 0x6c, 0x48, + 0x4c, 0xe2, 0x60, 0x8f, 0x28, 0xe4, 0xeb, 0x09, 0x36, 0x5c, 0x05, 0x9b, 0x9a, 0x32, 0xc2, 0xee, + 0x48, 0xdc, 0xa4, 0x0e, 0x0e, 0x93, 0x62, 0x42, 0x3e, 0x4f, 0x81, 0x47, 0x3e, 0x4e, 0x62, 0xb0, + 0xaa, 0xa9, 0x7d, 0x81, 0xdd, 0x11, 0x3a, 0x80, 0x2d, 0xe6, 0xc5, 0xf5, 0x1c, 0xdd, 0x1c, 0x2a, + 0xea, 0x88, 0xa8, 0x8f, 0x95, 0x89, 0x37, 0xb8, 0x23, 0xbe, 0x13, 0x9d, 0x9f, 0x45, 0xd8, 0x63, + 0x98, 0x1a, 0x85, 0x1c, 0x7b, 0x83, 0x3b, 0xa8, 0x07, 0x45, 0xfa, 0x31, 0xc6, 0xfa, 0x73, 0xa2, + 0x0c, 0x2c, 0x87, 0x1d, 0x8d, 0xa5, 0x05, 0xa5, 0x29, 0x92, 0xc1, 0x4a, 0xc7, 0x37, 0x68, 0x59, + 0x1a, 0x39, 0xc8, 0xf4, 0xba, 0x92, 0x54, 0x97, 0x0b, 0x81, 0x97, 0x7b, 0x96, 0x43, 0x09, 0x35, + 0xb4, 0xc2, 0x04, 0x17, 0x38, 0xa1, 0x86, 0x56, 0x90, 0xde, 0x5b, 0xb0, 0xa1, 0xaa, 0x7c, 0xcd, + 0xba, 0xaa, 0xf8, 0x77, 0x2c, 0x57, 0x14, 0x62, 0xc9, 0x52, 0xd5, 0x23, 0x0e, 0xf0, 0x39, 0xee, + 0xa2, 0xcf, 0xe0, 0xed, 0x69, 0xb2, 0xa2, 0x86, 0xeb, 0x73, 0xab, 0x9c, 0x35, 0xbd, 0x05, 0x1b, + 0xf6, 0xe9, 0xbc, 0x21, 0x8a, 0xcd, 0x68, 0x9f, 0xce, 0x9a, 0xdd, 0x86, 0x4d, 0x7b, 0x64, 0xcf, + 0xdb, 0x5d, 0x8d, 0xda, 0x21, 0x7b, 0x64, 0xcf, 0x1a, 0x5e, 0x62, 0x17, 0x6e, 0x87, 0xa8, 0xd8, + 0x23, 0x9a, 0x78, 0x2e, 0x0a, 0x8f, 0x28, 0xd0, 0x75, 0x10, 0x54, 0x55, 0x21, 0x26, 0x3e, 0x31, + 0x88, 0x82, 0x1d, 0x62, 0x62, 0x57, 0xbc, 0x10, 0x05, 0x97, 0x54, 0x55, 0x62, 0xda, 0x2a, 0x53, + 0xa2, 0xab, 0xb0, 0x6e, 0x9d, 0x3c, 0x52, 0x39, 0x25, 0x15, 0xdb, 0x21, 0x03, 0xfd, 0x99, 0xf8, + 0x21, 0xcb, 0xef, 0x1a, 0x55, 0x30, 0x42, 0x76, 0x99, 0x18, 0x5d, 0x01, 0x41, 0x75, 0x47, 0xd8, + 0xb1, 0x59, 0x4d, 0x76, 0x6d, 0xac, 0x12, 0xf1, 0x12, 0x87, 0x72, 0x79, 0x3b, 0x10, 0xd3, 0x2d, + 0xe1, 0x3e, 0xd5, 0x07, 0x5e, 0xe0, 0xf1, 0x32, 0xdf, 0x12, 0x4c, 0xe6, 0x7b, 0xdb, 0x05, 0x81, + 0xa6, 0x22, 0x36, 0xf1, 0x2e, 0x83, 0x95, 0xec, 0x91, 0x1d, 0x9d, 0xf7, 0x03, 0x58, 0xa5, 0xc8, + 0xe9, 0xa4, 0x57, 0x78, 0x43, 0x66, 0x8f, 0x22, 0x33, 0xde, 0x84, 0x2d, 0x0a, 0x1a, 0x13, 0x0f, + 0x6b, 0xd8, 0xc3, 0x11, 0xf4, 0xc7, 0x0c, 0x4d, 0xf3, 0xde, 0xf2, 0x95, 0xb1, 0x38, 0x9d, 0xc9, + 0xc9, 0x69, 0xc8, 0xac, 0x4f, 0x78, 0x9c, 0x54, 0x16, 0x70, 0xeb, 0xb5, 0x35, 0xdd, 0xe5, 0x03, + 0x28, 0x46, 0x89, 0x8f, 0xf2, 0xc0, 0xa9, 0x2f, 0x24, 0x68, 0x17, 0x54, 0xeb, 0xd4, 0x69, 0xff, + 0xf2, 0x95, 0x24, 0x24, 0x69, 0x1f, 0xd5, 0x6c, 0xf4, 0x25, 0x45, 0x3e, 0x6e, 0xf7, 0x1b, 0x2d, + 0x49, 0x48, 0x45, 0x1b, 0xf6, 0xbf, 0x26, 0xa1, 0x14, 0xbf, 0x7b, 0xa1, 0x1f, 0xc0, 0xb9, 0xe0, + 0xa1, 0xc4, 0x25, 0x9e, 0xf2, 0x54, 0x77, 0xd8, 0x5e, 0x1c, 0x63, 0x7e, 0x2e, 0x86, 0x6c, 0xd8, + 0xf4, 0x51, 0x3d, 0xe2, 0x7d, 0xa9, 0x3b, 0x74, 0xa7, 0x8d, 0xb1, 0x87, 0x9a, 0x70, 0xc1, 0xb4, + 0x14, 0xd7, 0xc3, 0xa6, 0x86, 0x1d, 0x4d, 0x99, 0x3e, 0x51, 0x29, 0x58, 0x55, 0x89, 0xeb, 0x5a, + 0xfc, 0x0c, 0x0c, 0xbd, 0xbc, 0x6b, 0x5a, 0x3d, 0x1f, 0x3c, 0x3d, 0x1c, 0xaa, 0x3e, 0x74, 0x86, + 0xb9, 0xa9, 0x65, 0xcc, 0x7d, 0x07, 0xf2, 0x63, 0x6c, 0x2b, 0xc4, 0xf4, 0x9c, 0x53, 0xd6, 0x71, + 0xe7, 0xe4, 0xdc, 0x18, 0xdb, 0x12, 0x1d, 0xbf, 0x99, 0x8b, 0xcf, 0x3f, 0x52, 0x50, 0x8c, 0x76, + 0xdd, 0xf4, 0x12, 0xa3, 0xb2, 0x03, 0x2a, 0xc1, 0x4a, 0xd8, 0x07, 0x2f, 0xed, 0xd1, 0x2b, 0x35, + 0x7a, 0x72, 0x1d, 0x64, 0x79, 0x2f, 0x2c, 0x73, 0x4b, 0xda, 0x35, 0x50, 0x6a, 0x11, 0xde, 0x7b, + 0xe4, 0x64, 0x7f, 0x84, 0x8e, 0x20, 0xfb, 0xc8, 0x65, 0xbe, 0xb3, 0xcc, 0xf7, 0x87, 0x2f, 0xf7, + 0x7d, 0xbf, 0xc7, 0x9c, 0xe7, 0xef, 0xf7, 0x94, 0x76, 0x47, 0x6e, 0x55, 0x9b, 0xb2, 0x6f, 0x8e, + 0xce, 0x43, 0xda, 0xc0, 0xcf, 0x4f, 0xe3, 0x67, 0x1c, 0x13, 0x9d, 0x35, 0xf1, 0xe7, 0x21, 0xfd, + 0x94, 0xe0, 0xc7, 0xf1, 0x93, 0x85, 0x89, 0x5e, 0x23, 0xf5, 0xaf, 0x43, 0x86, 0xe5, 0x0b, 0x01, + 0xf8, 0x19, 0x13, 0xde, 0x42, 0x39, 0x48, 0xd7, 0x3a, 0x32, 0xa5, 0xbf, 0x00, 0x45, 0x2e, 0x55, + 0xba, 0x0d, 0xa9, 0x26, 0x09, 0xc9, 0xf2, 0x2d, 0xc8, 0xf2, 0x24, 0xd0, 0xad, 0x11, 0xa6, 0x41, + 0x78, 0xcb, 0x1f, 0xfa, 0x3e, 0x12, 0x81, 0xf6, 0xb8, 0x75, 0x28, 0xc9, 0x42, 0x32, 0xfa, 0x79, + 0x5d, 0x28, 0x46, 0x1b, 0xee, 0x37, 0xc3, 0xa9, 0xbf, 0x24, 0xa0, 0x10, 0x69, 0xa0, 0x69, 0xe7, + 0x83, 0x0d, 0xc3, 0x7a, 0xaa, 0x60, 0x43, 0xc7, 0xae, 0x4f, 0x0a, 0x60, 0xa2, 0x2a, 0x95, 0x9c, + 0xf5, 0xa3, 0xbd, 0x91, 0xe0, 0x7f, 0x9f, 0x00, 0x61, 0xb6, 0x77, 0x9d, 0x09, 0x30, 0xf1, 0xbd, + 0x06, 0xf8, 0xbb, 0x04, 0x94, 0xe2, 0x0d, 0xeb, 0x4c, 0x78, 0x17, 0xbf, 0xd7, 0xf0, 0xfe, 0x99, + 0x84, 0xd5, 0x58, 0x9b, 0x7a, 0xd6, 0xe8, 0xbe, 0x86, 0x75, 0x5d, 0x23, 0x63, 0xdb, 0xf2, 0x88, + 0xa9, 0x9e, 0x2a, 0x06, 0x79, 0x42, 0x0c, 0xb1, 0xcc, 0x0a, 0xc5, 0xf5, 0x97, 0x37, 0xc2, 0x95, + 0xc6, 0xd4, 0xae, 0x49, 0xcd, 0x0e, 0x36, 0x1a, 0x75, 0xa9, 0xd5, 0xed, 0xf4, 0xa5, 0x76, 0xed, + 0xa1, 0x72, 0xdc, 0xfe, 0x71, 0xbb, 0xf3, 0x65, 0x5b, 0x16, 0xf4, 0x19, 0xd8, 0x6b, 0xdc, 0xea, + 0x5d, 0x10, 0x66, 0x83, 0x42, 0xe7, 0x60, 0x51, 0x58, 0xc2, 0x5b, 0x68, 0x03, 0xd6, 0xda, 0x1d, + 0xa5, 0xd7, 0xa8, 0x4b, 0x8a, 0x74, 0xef, 0x9e, 0x54, 0xeb, 0xf7, 0xf8, 0xd3, 0x46, 0x88, 0xee, + 0xc7, 0x37, 0xf5, 0x6f, 0x53, 0xb0, 0xb1, 0x20, 0x12, 0x54, 0xf5, 0x2f, 0x25, 0xfc, 0x9e, 0xf4, + 0xc9, 0x59, 0xa2, 0xaf, 0xd0, 0xae, 0xa0, 0x8b, 0x1d, 0xcf, 0xbf, 0xc3, 0x5c, 0x01, 0x9a, 0x25, + 0xd3, 0xd3, 0x07, 0x3a, 0x71, 0xfc, 0x97, 0x20, 0x7e, 0x53, 0x59, 0x9b, 0xca, 0xf9, 0x63, 0xd0, + 0xc7, 0x80, 0x6c, 0xcb, 0xd5, 0x3d, 0xfd, 0x09, 0x51, 0x74, 0x33, 0x78, 0x36, 0xa2, 0x37, 0x97, + 0xb4, 0x2c, 0x04, 0x9a, 0x86, 0xe9, 0x85, 0x68, 0x93, 0x0c, 0xf1, 0x0c, 0x9a, 0x16, 0xf0, 0x94, + 0x2c, 0x04, 0x9a, 0x10, 0x7d, 0x11, 0x8a, 0x9a, 0x35, 0xa1, 0xed, 0x1c, 0xc7, 0xd1, 0xf3, 0x22, + 0x21, 0x17, 0xb8, 0x2c, 0x84, 0xf8, 0x8d, 0xfa, 0xf4, 0xbd, 0xaa, 0x28, 0x17, 0xb8, 0x8c, 0x43, + 0x2e, 0xc3, 0x1a, 0x1e, 0x0e, 0x1d, 0xea, 0x3c, 0x70, 0xc4, 0xaf, 0x1e, 0xa5, 0x50, 0xcc, 0x80, + 0xdb, 0xf7, 0x21, 0x17, 0xe4, 0x81, 0x1e, 0xc9, 0x34, 0x13, 0x8a, 0xcd, 0xef, 0xd3, 0xc9, 0xdd, + 0xbc, 0x9c, 0x33, 0x03, 0xe5, 0x45, 0x28, 0xea, 0xae, 0x32, 0x7d, 0x7e, 0x4f, 0xee, 0x24, 0x77, + 0x73, 0x72, 0x41, 0x77, 0xc3, 0xa7, 0xcb, 0xf2, 0x37, 0x49, 0x28, 0xc5, 0x7f, 0x3e, 0x40, 0x75, + 0xc8, 0x19, 0x96, 0x8a, 0x19, 0xb5, 0xf8, 0x6f, 0x57, 0xbb, 0xaf, 0xf8, 0xc5, 0xa1, 0xd2, 0xf4, + 0xf1, 0x72, 0x68, 0xb9, 0xfd, 0xb7, 0x04, 0xe4, 0x02, 0x31, 0xda, 0x82, 0xb4, 0x8d, 0xbd, 0x11, + 0x73, 0x97, 0x39, 0x4c, 0x0a, 0x09, 0x99, 0x8d, 0xa9, 0xdc, 0xb5, 0xb1, 0xc9, 0x28, 0xe0, 0xcb, + 0xe9, 0x98, 0x7e, 0x57, 0x83, 0x60, 0x8d, 0xdd, 0x6b, 0xac, 0xf1, 0x98, 0x98, 0x9e, 0x1b, 0x7c, + 0x57, 0x5f, 0x5e, 0xf3, 0xc5, 0xe8, 0x1a, 0xac, 0x7b, 0x0e, 0xd6, 0x8d, 0x18, 0x36, 0xcd, 0xb0, + 0x42, 0xa0, 0x08, 0xc1, 0x07, 0x70, 0x3e, 0xf0, 0xab, 0x11, 0x0f, 0xab, 0x23, 0xa2, 0x4d, 0x8d, + 0xb2, 0xec, 0xfd, 0xe2, 0x9c, 0x0f, 0xa8, 0xfb, 0xfa, 0xc0, 0xb6, 0xfc, 0xf7, 0x04, 0xac, 0x07, + 0x37, 0x31, 0x2d, 0x4c, 0x56, 0x0b, 0x00, 0x9b, 0xa6, 0xe5, 0x45, 0xd3, 0x35, 0x4f, 0xe5, 0x39, + 0xbb, 0x4a, 0x35, 0x34, 0x92, 0x23, 0x0e, 0xb6, 0xc7, 0x00, 0x53, 0xcd, 0xd2, 0xb4, 0x5d, 0x80, + 0x82, 0xff, 0xdb, 0x10, 0xfb, 0x81, 0x91, 0xdf, 0xdd, 0x81, 0x8b, 0xe8, 0x95, 0x0d, 0x6d, 0x42, + 0xe6, 0x84, 0x0c, 0x75, 0xd3, 0x7f, 0xf1, 0xe5, 0x83, 0xe0, 0x85, 0x25, 0x1d, 0xbe, 0xb0, 0x1c, + 0xfe, 0x0c, 0x36, 0x54, 0x6b, 0x3c, 0x1b, 0xee, 0xa1, 0x30, 0xf3, 0x7e, 0xe0, 0x7e, 0x91, 0xf8, + 0x0a, 0xa6, 0x2d, 0xe6, 0xff, 0x12, 0x89, 0x3f, 0x24, 0x53, 0x47, 0xdd, 0xc3, 0x3f, 0x26, 0xb7, + 0x8f, 0xb8, 0x69, 0x37, 0x58, 0xa9, 0x4c, 0x06, 0x06, 0x51, 0x69, 0xf4, 0xff, 0x0f, 0x00, 0x00, + 0xff, 0xff, 0x88, 0x17, 0xc1, 0xbe, 0x38, 0x1d, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go new file mode 100644 index 000000000..165b2110d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go @@ -0,0 +1,752 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + proto "github.com/gogo/protobuf/proto" + math "math" + reflect "reflect" + sort "sort" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (this *FileDescriptorSet) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.FileDescriptorSet{") + if this.File != nil { + s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 16) + s = append(s, "&descriptor.FileDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Package != nil { + s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n") + } + if this.Dependency != nil { + s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n") + } + if this.PublicDependency != nil { + s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n") + } + if this.WeakDependency != nil { + s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n") + } + if this.MessageType != nil { + s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.Service != nil { + s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceCodeInfo != nil { + s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n") + } + if this.Syntax != nil { + s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.DescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Field != nil { + s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.NestedType != nil { + s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.ExtensionRange != nil { + s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n") + } + if this.OneofDecl != nil { + s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ExtensionRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.DescriptorProto_ExtensionRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.DescriptorProto_ReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ExtensionRangeOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.ExtensionRangeOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.FieldDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Label != nil { + s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n") + } + if this.Type != nil { + s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n") + } + if this.TypeName != nil { + s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n") + } + if this.Extendee != nil { + s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n") + } + if this.DefaultValue != nil { + s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n") + } + if this.OneofIndex != nil { + s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n") + } + if this.JsonName != nil { + s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.OneofDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.EnumDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto_EnumReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumValueDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.ServiceDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Method != nil { + s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&descriptor.MethodDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.InputType != nil { + s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n") + } + if this.OutputType != nil { + s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ClientStreaming != nil { + s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n") + } + if this.ServerStreaming != nil { + s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 25) + s = append(s, "&descriptor.FileOptions{") + if this.JavaPackage != nil { + s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n") + } + if this.JavaOuterClassname != nil { + s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n") + } + if this.JavaMultipleFiles != nil { + s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n") + } + if this.JavaGenerateEqualsAndHash != nil { + s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n") + } + if this.JavaStringCheckUtf8 != nil { + s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n") + } + if this.OptimizeFor != nil { + s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n") + } + if this.GoPackage != nil { + s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n") + } + if this.CcGenericServices != nil { + s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n") + } + if this.JavaGenericServices != nil { + s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n") + } + if this.PyGenericServices != nil { + s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n") + } + if this.PhpGenericServices != nil { + s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.CcEnableArenas != nil { + s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n") + } + if this.ObjcClassPrefix != nil { + s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n") + } + if this.CsharpNamespace != nil { + s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n") + } + if this.SwiftPrefix != nil { + s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n") + } + if this.PhpClassPrefix != nil { + s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n") + } + if this.PhpNamespace != nil { + s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n") + } + if this.PhpMetadataNamespace != nil { + s = append(s, "PhpMetadataNamespace: "+valueToGoStringDescriptor(this.PhpMetadataNamespace, "string")+",\n") + } + if this.RubyPackage != nil { + s = append(s, "RubyPackage: "+valueToGoStringDescriptor(this.RubyPackage, "string")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MessageOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.MessageOptions{") + if this.MessageSetWireFormat != nil { + s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n") + } + if this.NoStandardDescriptorAccessor != nil { + s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.MapEntry != nil { + s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.FieldOptions{") + if this.Ctype != nil { + s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n") + } + if this.Packed != nil { + s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n") + } + if this.Jstype != nil { + s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n") + } + if this.Lazy != nil { + s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.Weak != nil { + s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.OneofOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumOptions{") + if this.AllowAlias != nil { + s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumValueOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.ServiceOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.MethodOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.IdempotencyLevel != nil { + s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.UninterpretedOption{") + if this.Name != nil { + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + } + if this.IdentifierValue != nil { + s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n") + } + if this.PositiveIntValue != nil { + s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n") + } + if this.NegativeIntValue != nil { + s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n") + } + if this.DoubleValue != nil { + s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n") + } + if this.StringValue != nil { + s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n") + } + if this.AggregateValue != nil { + s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption_NamePart) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.UninterpretedOption_NamePart{") + if this.NamePart != nil { + s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n") + } + if this.IsExtension != nil { + s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.SourceCodeInfo{") + if this.Location != nil { + s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo_Location) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.SourceCodeInfo_Location{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.Span != nil { + s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n") + } + if this.LeadingComments != nil { + s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n") + } + if this.TrailingComments != nil { + s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n") + } + if this.LeadingDetachedComments != nil { + s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.GeneratedCodeInfo{") + if this.Annotation != nil { + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo_Annotation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.SourceFile != nil { + s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n") + } + if this.Begin != nil { + s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDescriptor(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string { + e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m) + if e == nil { + return "nil" + } + s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{" + keys := make([]int, 0, len(e)) + for k := range e { + keys = append(keys, int(k)) + } + sort.Ints(keys) + ss := []string{} + for _, k := range keys { + ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) + } + s += strings.Join(ss, ",") + "})" + return s +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go new file mode 100644 index 000000000..e0846a357 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go @@ -0,0 +1,390 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package descriptor + +import ( + "strings" +) + +func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) { + if !msg.GetOptions().GetMapEntry() { + return nil, nil + } + return msg.GetField()[0], msg.GetField()[1] +} + +func dotToUnderscore(r rune) rune { + if r == '.' { + return '_' + } + return r +} + +func (field *FieldDescriptorProto) WireType() (wire int) { + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE: + return 1 + case FieldDescriptorProto_TYPE_FLOAT: + return 5 + case FieldDescriptorProto_TYPE_INT64: + return 0 + case FieldDescriptorProto_TYPE_UINT64: + return 0 + case FieldDescriptorProto_TYPE_INT32: + return 0 + case FieldDescriptorProto_TYPE_UINT32: + return 0 + case FieldDescriptorProto_TYPE_FIXED64: + return 1 + case FieldDescriptorProto_TYPE_FIXED32: + return 5 + case FieldDescriptorProto_TYPE_BOOL: + return 0 + case FieldDescriptorProto_TYPE_STRING: + return 2 + case FieldDescriptorProto_TYPE_GROUP: + return 2 + case FieldDescriptorProto_TYPE_MESSAGE: + return 2 + case FieldDescriptorProto_TYPE_BYTES: + return 2 + case FieldDescriptorProto_TYPE_ENUM: + return 0 + case FieldDescriptorProto_TYPE_SFIXED32: + return 5 + case FieldDescriptorProto_TYPE_SFIXED64: + return 1 + case FieldDescriptorProto_TYPE_SINT32: + return 0 + case FieldDescriptorProto_TYPE_SINT64: + return 0 + } + panic("unreachable") +} + +func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) { + packed := field.IsPacked() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) { + packed := field.IsPacked3() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey() []byte { + x := field.GetKeyUint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (field *FieldDescriptorProto) GetKey3() []byte { + x := field.GetKey3Uint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { + msg := desc.GetMessage(packageName, messageName) + if msg == nil { + return nil + } + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto { + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+".")) + if nes != nil { + return nes + } + } + return nil +} + +func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+".")) + if res != nil { + return res + } + } + return nil +} + +func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + if msg.GetName()+"."+nes.GetName() == typeName { + return nes + } + } + } + } + return nil +} + +func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + if msg.GetName()+"."+nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + } + } + return false +} + +func (msg *DescriptorProto) IsExtendable() bool { + return len(msg.GetExtensionRange()) > 0 +} + +func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetName() == fieldName { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetNumber() == fieldNum { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", "" + } + field := parent.GetFieldDescriptor(fieldName) + if field == nil { + var extPackageName string + extPackageName, field = desc.FindExtension(packageName, typeName, fieldName) + if field == nil { + return "", "" + } + packageName = extPackageName + } + typeNames := strings.Split(field.GetTypeName(), ".") + if len(typeNames) == 1 { + msg := desc.GetMessage(packageName, typeName) + if msg == nil { + return "", "" + } + return packageName, msg.GetName() + } + if len(typeNames) > 2 { + for i := 1; i < len(typeNames)-1; i++ { + packageName = strings.Join(typeNames[1:len(typeNames)-i], ".") + typeName = strings.Join(typeNames[len(typeNames)-i:], ".") + msg := desc.GetMessage(packageName, typeName) + if msg != nil { + typeNames := strings.Split(msg.GetName(), ".") + if len(typeNames) == 1 { + return packageName, msg.GetName() + } + return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1] + } + } + } + return "", "" +} + +func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto { + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, enum := range file.GetEnumType() { + if enum.GetName() == typeName { + return enum + } + } + } + return nil +} + +func (f *FieldDescriptorProto) IsEnum() bool { + return *f.Type == FieldDescriptorProto_TYPE_ENUM +} + +func (f *FieldDescriptorProto) IsMessage() bool { + return *f.Type == FieldDescriptorProto_TYPE_MESSAGE +} + +func (f *FieldDescriptorProto) IsBytes() bool { + return *f.Type == FieldDescriptorProto_TYPE_BYTES +} + +func (f *FieldDescriptorProto) IsRepeated() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED +} + +func (f *FieldDescriptorProto) IsString() bool { + return *f.Type == FieldDescriptorProto_TYPE_STRING +} + +func (f *FieldDescriptorProto) IsBool() bool { + return *f.Type == FieldDescriptorProto_TYPE_BOOL +} + +func (f *FieldDescriptorProto) IsRequired() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED +} + +func (f *FieldDescriptorProto) IsPacked() bool { + return f.Options != nil && f.GetOptions().GetPacked() +} + +func (f *FieldDescriptorProto) IsPacked3() bool { + if f.IsRepeated() && f.IsScalar() { + if f.Options == nil || f.GetOptions().Packed == nil { + return true + } + return f.Options != nil && f.GetOptions().GetPacked() + } + return false +} + +func (m *DescriptorProto) HasExtension() bool { + return len(m.ExtensionRange) > 0 +} diff --git a/vendor/github.com/ipfs/go-datastore/.gitignore b/vendor/github.com/ipfs/go-datastore/.gitignore new file mode 100644 index 000000000..1377554eb --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/.gitignore @@ -0,0 +1 @@ +*.swp diff --git a/vendor/github.com/ipfs/go-datastore/LICENSE b/vendor/github.com/ipfs/go-datastore/LICENSE new file mode 100644 index 000000000..f20490238 --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/LICENSE @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) 2016 Juan Batiz-Benet + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/ipfs/go-datastore/Makefile b/vendor/github.com/ipfs/go-datastore/Makefile new file mode 100644 index 000000000..54152565e --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/Makefile @@ -0,0 +1,9 @@ +export IPFS_API ?= v04x.ipfs.io + +gx: + go get -u github.com/whyrusleeping/gx + go get -u github.com/whyrusleeping/gx-go + +deps: gx + gx --verbose install --global + gx-go rewrite diff --git a/vendor/github.com/ipfs/go-datastore/README.md b/vendor/github.com/ipfs/go-datastore/README.md new file mode 100644 index 000000000..58df92b6d --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/README.md @@ -0,0 +1,47 @@ +# go-datastore + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) +[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) +[![standard-readme compliant](https://img.shields.io/badge/standard--readme-OK-green.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) +[![GoDoc](https://godoc.org/github.com/ipfs/go-datastore?status.svg)](https://godoc.org/github.com/ipfs/go-datastore) + +> key-value datastore interfaces + +## Lead Maintainer + +[Steven Allen](https://github.com/Stebalien) + +## Table of Contents + +- [Background](#background) +- [Documentation](#documentation) +- [Contribute](#contribute) +- [License](#license) + +## Background + +Datastore is a generic layer of abstraction for data store and database access. It is a simple API with the aim to enable application development in a datastore-agnostic way, allowing datastores to be swapped seamlessly without changing application code. Thus, one can leverage different datastores with different strengths without committing the application to one datastore throughout its lifetime. + +In addition, grouped datastores significantly simplify interesting data access patterns (such as caching and sharding). + +Based on [datastore.py](https://github.com/datastore/datastore). + +## Documentation + +https://godoc.org/github.com/ipfs/go-datastore + +## Contribute + +Feel free to join in. All welcome. Open an [issue](https://github.com/ipfs/go-datastore/issues)! + +This repository falls under the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). + +### Want to hack on IPFS? + +[![](https://cdn.rawgit.com/jbenet/contribute-ipfs-gif/master/img/contribute.gif)](https://github.com/ipfs/community/blob/master/contributing.md) + +## License + +MIT + diff --git a/vendor/github.com/ipfs/go-datastore/basic_ds.go b/vendor/github.com/ipfs/go-datastore/basic_ds.go new file mode 100644 index 000000000..4c851875d --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/basic_ds.go @@ -0,0 +1,287 @@ +package datastore + +import ( + "log" + + dsq "github.com/ipfs/go-datastore/query" +) + +// Here are some basic datastore implementations. + +// MapDatastore uses a standard Go map for internal storage. +type MapDatastore struct { + values map[Key][]byte +} + +// NewMapDatastore constructs a MapDatastore. It is _not_ thread-safe by +// default, wrap using sync.MutexWrap if you need thread safety (the answer here +// is usually yes). +func NewMapDatastore() (d *MapDatastore) { + return &MapDatastore{ + values: make(map[Key][]byte), + } +} + +// Put implements Datastore.Put +func (d *MapDatastore) Put(key Key, value []byte) (err error) { + d.values[key] = value + return nil +} + +// Sync implements Datastore.Sync +func (d *MapDatastore) Sync(prefix Key) error { + return nil +} + +// Get implements Datastore.Get +func (d *MapDatastore) Get(key Key) (value []byte, err error) { + val, found := d.values[key] + if !found { + return nil, ErrNotFound + } + return val, nil +} + +// Has implements Datastore.Has +func (d *MapDatastore) Has(key Key) (exists bool, err error) { + _, found := d.values[key] + return found, nil +} + +// GetSize implements Datastore.GetSize +func (d *MapDatastore) GetSize(key Key) (size int, err error) { + if v, found := d.values[key]; found { + return len(v), nil + } + return -1, ErrNotFound +} + +// Delete implements Datastore.Delete +func (d *MapDatastore) Delete(key Key) (err error) { + delete(d.values, key) + return nil +} + +// Query implements Datastore.Query +func (d *MapDatastore) Query(q dsq.Query) (dsq.Results, error) { + re := make([]dsq.Entry, 0, len(d.values)) + for k, v := range d.values { + e := dsq.Entry{Key: k.String(), Size: len(v)} + if !q.KeysOnly { + e.Value = v + } + re = append(re, e) + } + r := dsq.ResultsWithEntries(q, re) + r = dsq.NaiveQueryApply(q, r) + return r, nil +} + +func (d *MapDatastore) Batch() (Batch, error) { + return NewBasicBatch(d), nil +} + +func (d *MapDatastore) Close() error { + return nil +} + +// NullDatastore stores nothing, but conforms to the API. +// Useful to test with. +type NullDatastore struct { +} + +// NewNullDatastore constructs a null datastoe +func NewNullDatastore() *NullDatastore { + return &NullDatastore{} +} + +// Put implements Datastore.Put +func (d *NullDatastore) Put(key Key, value []byte) (err error) { + return nil +} + +// Sync implements Datastore.Sync +func (d *NullDatastore) Sync(prefix Key) error { + return nil +} + +// Get implements Datastore.Get +func (d *NullDatastore) Get(key Key) (value []byte, err error) { + return nil, ErrNotFound +} + +// Has implements Datastore.Has +func (d *NullDatastore) Has(key Key) (exists bool, err error) { + return false, nil +} + +// Has implements Datastore.GetSize +func (d *NullDatastore) GetSize(key Key) (size int, err error) { + return -1, ErrNotFound +} + +// Delete implements Datastore.Delete +func (d *NullDatastore) Delete(key Key) (err error) { + return nil +} + +// Query implements Datastore.Query +func (d *NullDatastore) Query(q dsq.Query) (dsq.Results, error) { + return dsq.ResultsWithEntries(q, nil), nil +} + +func (d *NullDatastore) Batch() (Batch, error) { + return NewBasicBatch(d), nil +} + +func (d *NullDatastore) Close() error { + return nil +} + +// LogDatastore logs all accesses through the datastore. +type LogDatastore struct { + Name string + child Datastore +} + +// Shim is a datastore which has a child. +type Shim interface { + Datastore + + Children() []Datastore +} + +// NewLogDatastore constructs a log datastore. +func NewLogDatastore(ds Datastore, name string) *LogDatastore { + if len(name) < 1 { + name = "LogDatastore" + } + return &LogDatastore{Name: name, child: ds} +} + +// Children implements Shim +func (d *LogDatastore) Children() []Datastore { + return []Datastore{d.child} +} + +// Put implements Datastore.Put +func (d *LogDatastore) Put(key Key, value []byte) (err error) { + log.Printf("%s: Put %s\n", d.Name, key) + // log.Printf("%s: Put %s ```%s```", d.Name, key, value) + return d.child.Put(key, value) +} + +// Sync implements Datastore.Sync +func (d *LogDatastore) Sync(prefix Key) error { + log.Printf("%s: Sync %s\n", d.Name, prefix) + return d.child.Sync(prefix) +} + +// Get implements Datastore.Get +func (d *LogDatastore) Get(key Key) (value []byte, err error) { + log.Printf("%s: Get %s\n", d.Name, key) + return d.child.Get(key) +} + +// Has implements Datastore.Has +func (d *LogDatastore) Has(key Key) (exists bool, err error) { + log.Printf("%s: Has %s\n", d.Name, key) + return d.child.Has(key) +} + +// GetSize implements Datastore.GetSize +func (d *LogDatastore) GetSize(key Key) (size int, err error) { + log.Printf("%s: GetSize %s\n", d.Name, key) + return d.child.GetSize(key) +} + +// Delete implements Datastore.Delete +func (d *LogDatastore) Delete(key Key) (err error) { + log.Printf("%s: Delete %s\n", d.Name, key) + return d.child.Delete(key) +} + +// DiskUsage implements the PersistentDatastore interface. +func (d *LogDatastore) DiskUsage() (uint64, error) { + log.Printf("%s: DiskUsage\n", d.Name) + return DiskUsage(d.child) +} + +// Query implements Datastore.Query +func (d *LogDatastore) Query(q dsq.Query) (dsq.Results, error) { + log.Printf("%s: Query\n", d.Name) + log.Printf("%s: q.Prefix: %s\n", d.Name, q.Prefix) + log.Printf("%s: q.KeysOnly: %v\n", d.Name, q.KeysOnly) + log.Printf("%s: q.Filters: %d\n", d.Name, len(q.Filters)) + log.Printf("%s: q.Orders: %d\n", d.Name, len(q.Orders)) + log.Printf("%s: q.Offset: %d\n", d.Name, q.Offset) + + return d.child.Query(q) +} + +// LogBatch logs all accesses through the batch. +type LogBatch struct { + Name string + child Batch +} + +func (d *LogDatastore) Batch() (Batch, error) { + log.Printf("%s: Batch\n", d.Name) + if bds, ok := d.child.(Batching); ok { + b, err := bds.Batch() + + if err != nil { + return nil, err + } + return &LogBatch{ + Name: d.Name, + child: b, + }, nil + } + return nil, ErrBatchUnsupported +} + +// Put implements Batch.Put +func (d *LogBatch) Put(key Key, value []byte) (err error) { + log.Printf("%s: BatchPut %s\n", d.Name, key) + // log.Printf("%s: Put %s ```%s```", d.Name, key, value) + return d.child.Put(key, value) +} + +// Delete implements Batch.Delete +func (d *LogBatch) Delete(key Key) (err error) { + log.Printf("%s: BatchDelete %s\n", d.Name, key) + return d.child.Delete(key) +} + +// Commit implements Batch.Commit +func (d *LogBatch) Commit() (err error) { + log.Printf("%s: BatchCommit\n", d.Name) + return d.child.Commit() +} + +func (d *LogDatastore) Close() error { + log.Printf("%s: Close\n", d.Name) + return d.child.Close() +} + +func (d *LogDatastore) Check() error { + if c, ok := d.child.(CheckedDatastore); ok { + return c.Check() + } + return nil +} + +func (d *LogDatastore) Scrub() error { + if c, ok := d.child.(ScrubbedDatastore); ok { + return c.Scrub() + } + return nil +} + +func (d *LogDatastore) CollectGarbage() error { + if c, ok := d.child.(GCDatastore); ok { + return c.CollectGarbage() + } + return nil +} diff --git a/vendor/github.com/ipfs/go-datastore/batch.go b/vendor/github.com/ipfs/go-datastore/batch.go new file mode 100644 index 000000000..41e23ffe4 --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/batch.go @@ -0,0 +1,47 @@ +package datastore + +type op struct { + delete bool + value []byte +} + +// basicBatch implements the transaction interface for datastores who do +// not have any sort of underlying transactional support +type basicBatch struct { + ops map[Key]op + + target Datastore +} + +func NewBasicBatch(ds Datastore) Batch { + return &basicBatch{ + ops: make(map[Key]op), + target: ds, + } +} + +func (bt *basicBatch) Put(key Key, val []byte) error { + bt.ops[key] = op{value: val} + return nil +} + +func (bt *basicBatch) Delete(key Key) error { + bt.ops[key] = op{delete: true} + return nil +} + +func (bt *basicBatch) Commit() error { + var err error + for k, op := range bt.ops { + if op.delete { + err = bt.target.Delete(k) + } else { + err = bt.target.Put(k, op.value) + } + if err != nil { + break + } + } + + return err +} diff --git a/vendor/github.com/ipfs/go-datastore/datastore.go b/vendor/github.com/ipfs/go-datastore/datastore.go new file mode 100644 index 000000000..04ca726c4 --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/datastore.go @@ -0,0 +1,251 @@ +package datastore + +import ( + "errors" + "io" + "time" + + query "github.com/ipfs/go-datastore/query" +) + +/* +Datastore represents storage for any key-value pair. + +Datastores are general enough to be backed by all kinds of different storage: +in-memory caches, databases, a remote datastore, flat files on disk, etc. + +The general idea is to wrap a more complicated storage facility in a simple, +uniform interface, keeping the freedom of using the right tools for the job. +In particular, a Datastore can aggregate other datastores in interesting ways, +like sharded (to distribute load) or tiered access (caches before databases). + +While Datastores should be written general enough to accept all sorts of +values, some implementations will undoubtedly have to be specific (e.g. SQL +databases where fields should be decomposed into columns), particularly to +support queries efficiently. Moreover, certain datastores may enforce certain +types of values (e.g. requiring an io.Reader, a specific struct, etc) or +serialization formats (JSON, Protobufs, etc). + +IMPORTANT: No Datastore should ever Panic! This is a cross-module interface, +and thus it should behave predictably and handle exceptional conditions with +proper error reporting. Thus, all Datastore calls may return errors, which +should be checked by callers. +*/ +type Datastore interface { + Read + Write + // Sync guarantees that any Put or Delete calls under prefix that returned + // before Sync(prefix) was called will be observed after Sync(prefix) + // returns, even if the program crashes. If Put/Delete operations already + // satisfy these requirements then Sync may be a no-op. + // + // If the prefix fails to Sync this method returns an error. + Sync(prefix Key) error + io.Closer +} + +// Write is the write-side of the Datastore interface. +type Write interface { + // Put stores the object `value` named by `key`. + // + // The generalized Datastore interface does not impose a value type, + // allowing various datastore middleware implementations (which do not + // handle the values directly) to be composed together. + // + // Ultimately, the lowest-level datastore will need to do some value checking + // or risk getting incorrect values. It may also be useful to expose a more + // type-safe interface to your application, and do the checking up-front. + Put(key Key, value []byte) error + + // Delete removes the value for given `key`. If the key is not in the + // datastore, this method returns no error. + Delete(key Key) error +} + +// Read is the read-side of the Datastore interface. +type Read interface { + // Get retrieves the object `value` named by `key`. + // Get will return ErrNotFound if the key is not mapped to a value. + Get(key Key) (value []byte, err error) + + // Has returns whether the `key` is mapped to a `value`. + // In some contexts, it may be much cheaper only to check for existence of + // a value, rather than retrieving the value itself. (e.g. HTTP HEAD). + // The default implementation is found in `GetBackedHas`. + Has(key Key) (exists bool, err error) + + // GetSize returns the size of the `value` named by `key`. + // In some contexts, it may be much cheaper to only get the size of the + // value rather than retrieving the value itself. + GetSize(key Key) (size int, err error) + + // Query searches the datastore and returns a query result. This function + // may return before the query actually runs. To wait for the query: + // + // result, _ := ds.Query(q) + // + // // use the channel interface; result may come in at different times + // for entry := range result.Next() { ... } + // + // // or wait for the query to be completely done + // entries, _ := result.Rest() + // for entry := range entries { ... } + // + Query(q query.Query) (query.Results, error) +} + +// Batching datastores support deferred, grouped updates to the database. +// `Batch`es do NOT have transactional semantics: updates to the underlying +// datastore are not guaranteed to occur in the same iota of time. Similarly, +// batched updates will not be flushed to the underlying datastore until +// `Commit` has been called. `Txn`s from a `TxnDatastore` have all the +// capabilities of a `Batch`, but the reverse is NOT true. +type Batching interface { + Datastore + + Batch() (Batch, error) +} + +// ErrBatchUnsupported is returned if the by Batch if the Datastore doesn't +// actually support batching. +var ErrBatchUnsupported = errors.New("this datastore does not support batching") + +// CheckedDatastore is an interface that should be implemented by datastores +// which may need checking on-disk data integrity. +type CheckedDatastore interface { + Datastore + + Check() error +} + +// ScrubbedDatastore is an interface that should be implemented by datastores +// which want to provide a mechanism to check data integrity and/or +// error correction. +type ScrubbedDatastore interface { + Datastore + + Scrub() error +} + +// GCDatastore is an interface that should be implemented by datastores which +// don't free disk space by just removing data from them. +type GCDatastore interface { + Datastore + + CollectGarbage() error +} + +// PersistentDatastore is an interface that should be implemented by datastores +// which can report disk usage. +type PersistentDatastore interface { + Datastore + + // DiskUsage returns the space used by a datastore, in bytes. + DiskUsage() (uint64, error) +} + +// DiskUsage checks if a Datastore is a +// PersistentDatastore and returns its DiskUsage(), +// otherwise returns 0. +func DiskUsage(d Datastore) (uint64, error) { + persDs, ok := d.(PersistentDatastore) + if !ok { + return 0, nil + } + return persDs.DiskUsage() +} + +// TTLDatastore is an interface that should be implemented by datastores that +// support expiring entries. +type TTLDatastore interface { + Datastore + TTL +} + +// TTL encapulates the methods that deal with entries with time-to-live. +type TTL interface { + PutWithTTL(key Key, value []byte, ttl time.Duration) error + SetTTL(key Key, ttl time.Duration) error + GetExpiration(key Key) (time.Time, error) +} + +// Txn extends the Datastore type. Txns allow users to batch queries and +// mutations to the Datastore into atomic groups, or transactions. Actions +// performed on a transaction will not take hold until a successful call to +// Commit has been made. Likewise, transactions can be aborted by calling +// Discard before a successful Commit has been made. +type Txn interface { + Read + Write + + // Commit finalizes a transaction, attempting to commit it to the Datastore. + // May return an error if the transaction has gone stale. The presence of an + // error is an indication that the data was not committed to the Datastore. + Commit() error + // Discard throws away changes recorded in a transaction without committing + // them to the underlying Datastore. Any calls made to Discard after Commit + // has been successfully called will have no effect on the transaction and + // state of the Datastore, making it safe to defer. + Discard() +} + +// TxnDatastore is an interface that should be implemented by datastores that +// support transactions. +type TxnDatastore interface { + Datastore + + NewTransaction(readOnly bool) (Txn, error) +} + +// Errors + +type dsError struct { + error + isNotFound bool +} + +func (e *dsError) NotFound() bool { + return e.isNotFound +} + +// ErrNotFound is returned by Get and GetSize when a datastore does not map the +// given key to a value. +var ErrNotFound error = &dsError{error: errors.New("datastore: key not found"), isNotFound: true} + +// GetBackedHas provides a default Datastore.Has implementation. +// It exists so Datastore.Has implementations can use it, like so: +// +// func (*d SomeDatastore) Has(key Key) (exists bool, err error) { +// return GetBackedHas(d, key) +// } +func GetBackedHas(ds Read, key Key) (bool, error) { + _, err := ds.Get(key) + switch err { + case nil: + return true, nil + case ErrNotFound: + return false, nil + default: + return false, err + } +} + +// GetBackedSize provides a default Datastore.GetSize implementation. +// It exists so Datastore.GetSize implementations can use it, like so: +// +// func (*d SomeDatastore) GetSize(key Key) (size int, err error) { +// return GetBackedSize(d, key) +// } +func GetBackedSize(ds Read, key Key) (int, error) { + value, err := ds.Get(key) + if err == nil { + return len(value), nil + } + return -1, err +} + +type Batch interface { + Write + + Commit() error +} diff --git a/vendor/github.com/ipfs/go-datastore/go.mod b/vendor/github.com/ipfs/go-datastore/go.mod new file mode 100644 index 000000000..709a9c126 --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/go.mod @@ -0,0 +1,12 @@ +module github.com/ipfs/go-datastore + +require ( + github.com/google/uuid v1.1.1 + github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8 + github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8 + github.com/kr/pretty v0.1.0 // indirect + golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 +) + +go 1.12 diff --git a/vendor/github.com/ipfs/go-datastore/go.sum b/vendor/github.com/ipfs/go-datastore/go.sum new file mode 100644 index 000000000..d1289721f --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/go.sum @@ -0,0 +1,16 @@ +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8 h1:NAviDvJ0WXgD+yiL2Rj35AmnfgI11+pHXbdciD917U0= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8 h1:bspPhN+oKYFk5fcGNuQzp6IGzYQSenLEgH3s6jkXrWw= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/ipfs/go-datastore/key.go b/vendor/github.com/ipfs/go-datastore/key.go new file mode 100644 index 000000000..42cea3084 --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/key.go @@ -0,0 +1,309 @@ +package datastore + +import ( + "encoding/json" + "path" + "strings" + + dsq "github.com/ipfs/go-datastore/query" + + "github.com/google/uuid" +) + +/* +A Key represents the unique identifier of an object. +Our Key scheme is inspired by file systems and Google App Engine key model. + +Keys are meant to be unique across a system. Keys are hierarchical, +incorporating more and more specific namespaces. Thus keys can be deemed +'children' or 'ancestors' of other keys:: + + Key("/Comedy") + Key("/Comedy/MontyPython") + +Also, every namespace can be parametrized to embed relevant object +information. For example, the Key `name` (most specific namespace) could +include the object type:: + + Key("/Comedy/MontyPython/Actor:JohnCleese") + Key("/Comedy/MontyPython/Sketch:CheeseShop") + Key("/Comedy/MontyPython/Sketch:CheeseShop/Character:Mousebender") + +*/ +type Key struct { + string +} + +// NewKey constructs a key from string. it will clean the value. +func NewKey(s string) Key { + k := Key{s} + k.Clean() + return k +} + +// RawKey creates a new Key without safety checking the input. Use with care. +func RawKey(s string) Key { + // accept an empty string and fix it to avoid special cases + // elsewhere + if len(s) == 0 { + return Key{"/"} + } + + // perform a quick sanity check that the key is in the correct + // format, if it is not then it is a programmer error and it is + // okay to panic + if len(s) == 0 || s[0] != '/' || (len(s) > 1 && s[len(s)-1] == '/') { + panic("invalid datastore key: " + s) + } + + return Key{s} +} + +// KeyWithNamespaces constructs a key out of a namespace slice. +func KeyWithNamespaces(ns []string) Key { + return NewKey(strings.Join(ns, "/")) +} + +// Clean up a Key, using path.Clean. +func (k *Key) Clean() { + switch { + case len(k.string) == 0: + k.string = "/" + case k.string[0] == '/': + k.string = path.Clean(k.string) + default: + k.string = path.Clean("/" + k.string) + } +} + +// Strings is the string value of Key +func (k Key) String() string { + return k.string +} + +// Bytes returns the string value of Key as a []byte +func (k Key) Bytes() []byte { + return []byte(k.string) +} + +// Equal checks equality of two keys +func (k Key) Equal(k2 Key) bool { + return k.string == k2.string +} + +// Less checks whether this key is sorted lower than another. +func (k Key) Less(k2 Key) bool { + list1 := k.List() + list2 := k2.List() + for i, c1 := range list1 { + if len(list2) < (i + 1) { + return false + } + + c2 := list2[i] + if c1 < c2 { + return true + } else if c1 > c2 { + return false + } + // c1 == c2, continue + } + + // list1 is shorter or exactly the same. + return len(list1) < len(list2) +} + +// List returns the `list` representation of this Key. +// NewKey("/Comedy/MontyPython/Actor:JohnCleese").List() +// ["Comedy", "MontyPythong", "Actor:JohnCleese"] +func (k Key) List() []string { + return strings.Split(k.string, "/")[1:] +} + +// Reverse returns the reverse of this Key. +// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Reverse() +// NewKey("/Actor:JohnCleese/MontyPython/Comedy") +func (k Key) Reverse() Key { + l := k.List() + r := make([]string, len(l)) + for i, e := range l { + r[len(l)-i-1] = e + } + return KeyWithNamespaces(r) +} + +// Namespaces returns the `namespaces` making up this Key. +// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Namespaces() +// ["Comedy", "MontyPython", "Actor:JohnCleese"] +func (k Key) Namespaces() []string { + return k.List() +} + +// BaseNamespace returns the "base" namespace of this key (path.Base(filename)) +// NewKey("/Comedy/MontyPython/Actor:JohnCleese").BaseNamespace() +// "Actor:JohnCleese" +func (k Key) BaseNamespace() string { + n := k.Namespaces() + return n[len(n)-1] +} + +// Type returns the "type" of this key (value of last namespace). +// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Type() +// "Actor" +func (k Key) Type() string { + return NamespaceType(k.BaseNamespace()) +} + +// Name returns the "name" of this key (field of last namespace). +// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Name() +// "JohnCleese" +func (k Key) Name() string { + return NamespaceValue(k.BaseNamespace()) +} + +// Instance returns an "instance" of this type key (appends value to namespace). +// NewKey("/Comedy/MontyPython/Actor").Instance("JohnClesse") +// NewKey("/Comedy/MontyPython/Actor:JohnCleese") +func (k Key) Instance(s string) Key { + return NewKey(k.string + ":" + s) +} + +// Path returns the "path" of this key (parent + type). +// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Path() +// NewKey("/Comedy/MontyPython/Actor") +func (k Key) Path() Key { + s := k.Parent().string + "/" + NamespaceType(k.BaseNamespace()) + return NewKey(s) +} + +// Parent returns the `parent` Key of this Key. +// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Parent() +// NewKey("/Comedy/MontyPython") +func (k Key) Parent() Key { + n := k.List() + if len(n) == 1 { + return RawKey("/") + } + return NewKey(strings.Join(n[:len(n)-1], "/")) +} + +// Child returns the `child` Key of this Key. +// NewKey("/Comedy/MontyPython").Child(NewKey("Actor:JohnCleese")) +// NewKey("/Comedy/MontyPython/Actor:JohnCleese") +func (k Key) Child(k2 Key) Key { + switch { + case k.string == "/": + return k2 + case k2.string == "/": + return k + default: + return RawKey(k.string + k2.string) + } +} + +// ChildString returns the `child` Key of this Key -- string helper. +// NewKey("/Comedy/MontyPython").ChildString("Actor:JohnCleese") +// NewKey("/Comedy/MontyPython/Actor:JohnCleese") +func (k Key) ChildString(s string) Key { + return NewKey(k.string + "/" + s) +} + +// IsAncestorOf returns whether this key is a prefix of `other` +// NewKey("/Comedy").IsAncestorOf("/Comedy/MontyPython") +// true +func (k Key) IsAncestorOf(other Key) bool { + // equivalent to HasPrefix(other, k.string + "/") + + if len(other.string) <= len(k.string) { + // We're not long enough to be a child. + return false + } + + if k.string == "/" { + // We're the root and the other key is longer. + return true + } + + // "other" starts with /k.string/ + return other.string[len(k.string)] == '/' && other.string[:len(k.string)] == k.string +} + +// IsDescendantOf returns whether this key contains another as a prefix. +// NewKey("/Comedy/MontyPython").IsDescendantOf("/Comedy") +// true +func (k Key) IsDescendantOf(other Key) bool { + return other.IsAncestorOf(k) +} + +// IsTopLevel returns whether this key has only one namespace. +func (k Key) IsTopLevel() bool { + return len(k.List()) == 1 +} + +// MarshalJSON implements the json.Marshaler interface, +// keys are represented as JSON strings +func (k Key) MarshalJSON() ([]byte, error) { + return json.Marshal(k.String()) +} + +// UnmarshalJSON implements the json.Unmarshaler interface, +// keys will parse any value specified as a key to a string +func (k *Key) UnmarshalJSON(data []byte) error { + var key string + if err := json.Unmarshal(data, &key); err != nil { + return err + } + *k = NewKey(key) + return nil +} + +// RandomKey returns a randomly (uuid) generated key. +// RandomKey() +// NewKey("/f98719ea086343f7b71f32ea9d9d521d") +func RandomKey() Key { + return NewKey(strings.Replace(uuid.New().String(), "-", "", -1)) +} + +/* +A Key Namespace is like a path element. +A namespace can optionally include a type (delimited by ':') + + > NamespaceValue("Song:PhilosopherSong") + PhilosopherSong + > NamespaceType("Song:PhilosopherSong") + Song + > NamespaceType("Music:Song:PhilosopherSong") + Music:Song +*/ + +// NamespaceType is the first component of a namespace. `foo` in `foo:bar` +func NamespaceType(namespace string) string { + parts := strings.Split(namespace, ":") + if len(parts) < 2 { + return "" + } + return strings.Join(parts[0:len(parts)-1], ":") +} + +// NamespaceValue returns the last component of a namespace. `baz` in `f:b:baz` +func NamespaceValue(namespace string) string { + parts := strings.Split(namespace, ":") + return parts[len(parts)-1] +} + +// KeySlice attaches the methods of sort.Interface to []Key, +// sorting in increasing order. +type KeySlice []Key + +func (p KeySlice) Len() int { return len(p) } +func (p KeySlice) Less(i, j int) bool { return p[i].Less(p[j]) } +func (p KeySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// EntryKeys +func EntryKeys(e []dsq.Entry) []Key { + ks := make([]Key, len(e)) + for i, e := range e { + ks[i] = NewKey(e.Key) + } + return ks +} diff --git a/vendor/github.com/ipfs/go-datastore/query/filter.go b/vendor/github.com/ipfs/go-datastore/query/filter.go new file mode 100644 index 000000000..1935c4888 --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/query/filter.go @@ -0,0 +1,102 @@ +package query + +import ( + "bytes" + "fmt" + "strings" +) + +// Filter is an object that tests ResultEntries +type Filter interface { + // Filter returns whether an entry passes the filter + Filter(e Entry) bool +} + +// Op is a comparison operator +type Op string + +var ( + Equal = Op("==") + NotEqual = Op("!=") + GreaterThan = Op(">") + GreaterThanOrEqual = Op(">=") + LessThan = Op("<") + LessThanOrEqual = Op("<=") +) + +// FilterValueCompare is used to signal to datastores they +// should apply internal comparisons. unfortunately, there +// is no way to apply comparisons* to interface{} types in +// Go, so if the datastore doesnt have a special way to +// handle these comparisons, you must provided the +// TypedFilter to actually do filtering. +// +// [*] other than == and !=, which use reflect.DeepEqual. +type FilterValueCompare struct { + Op Op + Value []byte +} + +func (f FilterValueCompare) Filter(e Entry) bool { + cmp := bytes.Compare(e.Value, f.Value) + switch f.Op { + case Equal: + return cmp == 0 + case NotEqual: + return cmp != 0 + case LessThan: + return cmp < 0 + case LessThanOrEqual: + return cmp <= 0 + case GreaterThan: + return cmp > 0 + case GreaterThanOrEqual: + return cmp >= 0 + default: + panic(fmt.Errorf("unknown operation: %s", f.Op)) + } +} + +func (f FilterValueCompare) String() string { + return fmt.Sprintf("VALUE %s %q", f.Op, string(f.Value)) +} + +type FilterKeyCompare struct { + Op Op + Key string +} + +func (f FilterKeyCompare) Filter(e Entry) bool { + switch f.Op { + case Equal: + return e.Key == f.Key + case NotEqual: + return e.Key != f.Key + case GreaterThan: + return e.Key > f.Key + case GreaterThanOrEqual: + return e.Key >= f.Key + case LessThan: + return e.Key < f.Key + case LessThanOrEqual: + return e.Key <= f.Key + default: + panic(fmt.Errorf("unknown op '%s'", f.Op)) + } +} + +func (f FilterKeyCompare) String() string { + return fmt.Sprintf("KEY %s %q", f.Op, f.Key) +} + +type FilterKeyPrefix struct { + Prefix string +} + +func (f FilterKeyPrefix) Filter(e Entry) bool { + return strings.HasPrefix(e.Key, f.Prefix) +} + +func (f FilterKeyPrefix) String() string { + return fmt.Sprintf("PREFIX(%q)", f.Prefix) +} diff --git a/vendor/github.com/ipfs/go-datastore/query/order.go b/vendor/github.com/ipfs/go-datastore/query/order.go new file mode 100644 index 000000000..19931557e --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/query/order.go @@ -0,0 +1,94 @@ +package query + +import ( + "bytes" + "sort" + "strings" +) + +// Order is an object used to order objects +type Order interface { + Compare(a, b Entry) int +} + +// OrderByFunction orders the results based on the result of the given function. +type OrderByFunction func(a, b Entry) int + +func (o OrderByFunction) Compare(a, b Entry) int { + return o(a, b) +} + +func (OrderByFunction) String() string { + return "FN" +} + +// OrderByValue is used to signal to datastores they should apply internal +// orderings. +type OrderByValue struct{} + +func (o OrderByValue) Compare(a, b Entry) int { + return bytes.Compare(a.Value, b.Value) +} + +func (OrderByValue) String() string { + return "VALUE" +} + +// OrderByValueDescending is used to signal to datastores they +// should apply internal orderings. +type OrderByValueDescending struct{} + +func (o OrderByValueDescending) Compare(a, b Entry) int { + return -bytes.Compare(a.Value, b.Value) +} + +func (OrderByValueDescending) String() string { + return "desc(VALUE)" +} + +// OrderByKey +type OrderByKey struct{} + +func (o OrderByKey) Compare(a, b Entry) int { + return strings.Compare(a.Key, b.Key) +} + +func (OrderByKey) String() string { + return "KEY" +} + +// OrderByKeyDescending +type OrderByKeyDescending struct{} + +func (o OrderByKeyDescending) Compare(a, b Entry) int { + return -strings.Compare(a.Key, b.Key) +} + +func (OrderByKeyDescending) String() string { + return "desc(KEY)" +} + +// Less returns true if a comes before b with the requested orderings. +func Less(orders []Order, a, b Entry) bool { + for _, cmp := range orders { + switch cmp.Compare(a, b) { + case 0: + case -1: + return true + case 1: + return false + } + } + + // This gives us a *stable* sort for free. We don't care + // preserving the order from the underlying datastore + // because it's undefined. + return a.Key < b.Key +} + +// Sort sorts the given entries using the given orders. +func Sort(orders []Order, entries []Entry) { + sort.Slice(entries, func(i int, j int) bool { + return Less(orders, entries[i], entries[j]) + }) +} diff --git a/vendor/github.com/ipfs/go-datastore/query/query.go b/vendor/github.com/ipfs/go-datastore/query/query.go new file mode 100644 index 000000000..a390e5bf3 --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/query/query.go @@ -0,0 +1,426 @@ +package query + +import ( + "fmt" + "time" + + goprocess "github.com/jbenet/goprocess" +) + +/* +Query represents storage for any key-value pair. + +tl;dr: + + queries are supported across datastores. + Cheap on top of relational dbs, and expensive otherwise. + Pick the right tool for the job! + +In addition to the key-value store get and set semantics, datastore +provides an interface to retrieve multiple records at a time through +the use of queries. The datastore Query model gleans a common set of +operations performed when querying. To avoid pasting here years of +database research, let’s summarize the operations datastore supports. + +Query Operations, applied in-order: + + * prefix - scope the query to a given path prefix + * filters - select a subset of values by applying constraints + * orders - sort the results by applying sort conditions, hierarchically. + * offset - skip a number of results (for efficient pagination) + * limit - impose a numeric limit on the number of results + +Datastore combines these operations into a simple Query class that allows +applications to define their constraints in a simple, generic, way without +introducing datastore specific calls, languages, etc. + +However, take heed: not all datastores support efficiently performing these +operations. Pick a datastore based on your needs. If you need efficient look-ups, +go for a simple key/value store. If you need efficient queries, consider an SQL +backed datastore. + +Notes: + + * Prefix: When a query filters by prefix, it selects keys that are strict + children of the prefix. For example, a prefix "/foo" would select "/foo/bar" + but not "/foobar" or "/foo", + * Orders: Orders are applied hierarchically. Results are sorted by the first + ordering, then entries equal under the first ordering are sorted with the + second ordering, etc. + * Limits & Offset: Limits and offsets are applied after everything else. +*/ +type Query struct { + Prefix string // namespaces the query to results whose keys have Prefix + Filters []Filter // filter results. apply sequentially + Orders []Order // order results. apply hierarchically + Limit int // maximum number of results + Offset int // skip given number of results + KeysOnly bool // return only keys. + ReturnExpirations bool // return expirations (see TTLDatastore) + ReturnsSizes bool // always return sizes. If not set, datastore impl can return + // // it anyway if it doesn't involve a performance cost. If KeysOnly + // // is not set, Size should always be set. +} + +// String returns a string representation of the Query for debugging/validation +// purposes. Do not use it for SQL queries. +func (q Query) String() string { + s := "SELECT keys" + if !q.KeysOnly { + s += ",vals" + } + if q.ReturnExpirations { + s += ",exps" + } + + s += " " + + if q.Prefix != "" { + s += fmt.Sprintf("FROM %q ", q.Prefix) + } + + if len(q.Filters) > 0 { + s += fmt.Sprintf("FILTER [%s", q.Filters[0]) + for _, f := range q.Filters[1:] { + s += fmt.Sprintf(", %s", f) + } + s += "] " + } + + if len(q.Orders) > 0 { + s += fmt.Sprintf("ORDER [%s", q.Orders[0]) + for _, f := range q.Orders[1:] { + s += fmt.Sprintf(", %s", f) + } + s += "] " + } + + if q.Offset > 0 { + s += fmt.Sprintf("OFFSET %d ", q.Offset) + } + + if q.Limit > 0 { + s += fmt.Sprintf("LIMIT %d ", q.Limit) + } + // Will always end with a space, strip it. + return s[:len(s)-1] +} + +// Entry is a query result entry. +type Entry struct { + Key string // cant be ds.Key because circular imports ...!!! + Value []byte // Will be nil if KeysOnly has been passed. + Expiration time.Time // Entry expiration timestamp if requested and supported (see TTLDatastore). + Size int // Might be -1 if the datastore doesn't support listing the size with KeysOnly + // // or if ReturnsSizes is not set +} + +// Result is a special entry that includes an error, so that the client +// may be warned about internal errors. If Error is non-nil, Entry must be +// empty. +type Result struct { + Entry + + Error error +} + +// Results is a set of Query results. This is the interface for clients. +// Example: +// +// qr, _ := myds.Query(q) +// for r := range qr.Next() { +// if r.Error != nil { +// // handle. +// break +// } +// +// fmt.Println(r.Entry.Key, r.Entry.Value) +// } +// +// or, wait on all results at once: +// +// qr, _ := myds.Query(q) +// es, _ := qr.Rest() +// for _, e := range es { +// fmt.Println(e.Key, e.Value) +// } +// +type Results interface { + Query() Query // the query these Results correspond to + Next() <-chan Result // returns a channel to wait for the next result + NextSync() (Result, bool) // blocks and waits to return the next result, second parameter returns false when results are exhausted + Rest() ([]Entry, error) // waits till processing finishes, returns all entries at once. + Close() error // client may call Close to signal early exit + + // Process returns a goprocess.Process associated with these results. + // most users will not need this function (Close is all they want), + // but it's here in case you want to connect the results to other + // goprocess-friendly things. + Process() goprocess.Process +} + +// results implements Results +type results struct { + query Query + proc goprocess.Process + res <-chan Result +} + +func (r *results) Next() <-chan Result { + return r.res +} + +func (r *results) NextSync() (Result, bool) { + val, ok := <-r.res + return val, ok +} + +func (r *results) Rest() ([]Entry, error) { + var es []Entry + for e := range r.res { + if e.Error != nil { + return es, e.Error + } + es = append(es, e.Entry) + } + <-r.proc.Closed() // wait till the processing finishes. + return es, nil +} + +func (r *results) Process() goprocess.Process { + return r.proc +} + +func (r *results) Close() error { + return r.proc.Close() +} + +func (r *results) Query() Query { + return r.query +} + +// ResultBuilder is what implementors use to construct results +// Implementors of datastores and their clients must respect the +// Process of the Request: +// +// * clients must call r.Process().Close() on an early exit, so +// implementations can reclaim resources. +// * if the Entries are read to completion (channel closed), Process +// should be closed automatically. +// * datastores must respect <-Process.Closing(), which intermediates +// an early close signal from the client. +// +type ResultBuilder struct { + Query Query + Process goprocess.Process + Output chan Result +} + +// Results returns a Results to to this builder. +func (rb *ResultBuilder) Results() Results { + return &results{ + query: rb.Query, + proc: rb.Process, + res: rb.Output, + } +} + +const NormalBufSize = 1 +const KeysOnlyBufSize = 128 + +func NewResultBuilder(q Query) *ResultBuilder { + bufSize := NormalBufSize + if q.KeysOnly { + bufSize = KeysOnlyBufSize + } + b := &ResultBuilder{ + Query: q, + Output: make(chan Result, bufSize), + } + b.Process = goprocess.WithTeardown(func() error { + close(b.Output) + return nil + }) + return b +} + +// ResultsWithChan returns a Results object from a channel +// of Result entries. +// +// DEPRECATED: This iterator is impossible to cancel correctly. Canceling it +// will leave anything trying to write to the result channel hanging. +func ResultsWithChan(q Query, res <-chan Result) Results { + return ResultsWithProcess(q, func(worker goprocess.Process, out chan<- Result) { + for { + select { + case <-worker.Closing(): // client told us to close early + return + case e, more := <-res: + if !more { + return + } + + select { + case out <- e: + case <-worker.Closing(): // client told us to close early + return + } + } + } + }) +} + +// ResultsWithProcess returns a Results object with the results generated by the +// passed subprocess. +func ResultsWithProcess(q Query, proc func(goprocess.Process, chan<- Result)) Results { + b := NewResultBuilder(q) + + // go consume all the entries and add them to the results. + b.Process.Go(func(worker goprocess.Process) { + proc(worker, b.Output) + }) + + go b.Process.CloseAfterChildren() //nolint + return b.Results() +} + +// ResultsWithEntries returns a Results object from a list of entries +func ResultsWithEntries(q Query, res []Entry) Results { + i := 0 + return ResultsFromIterator(q, Iterator{ + Next: func() (Result, bool) { + if i >= len(res) { + return Result{}, false + } + next := res[i] + i++ + return Result{Entry: next}, true + }, + }) +} + +func ResultsReplaceQuery(r Results, q Query) Results { + switch r := r.(type) { + case *results: + // note: not using field names to make sure all fields are copied + return &results{q, r.proc, r.res} + case *resultsIter: + // note: not using field names to make sure all fields are copied + lr := r.legacyResults + if lr != nil { + lr = &results{q, lr.proc, lr.res} + } + return &resultsIter{q, r.next, r.close, lr} + default: + panic("unknown results type") + } +} + +// +// ResultFromIterator provides an alternative way to to construct +// results without the use of channels. +// + +func ResultsFromIterator(q Query, iter Iterator) Results { + if iter.Close == nil { + iter.Close = noopClose + } + return &resultsIter{ + query: q, + next: iter.Next, + close: iter.Close, + } +} + +func noopClose() error { + return nil +} + +type Iterator struct { + Next func() (Result, bool) + Close func() error // note: might be called more than once +} + +type resultsIter struct { + query Query + next func() (Result, bool) + close func() error + legacyResults *results +} + +func (r *resultsIter) Next() <-chan Result { + r.useLegacyResults() + return r.legacyResults.Next() +} + +func (r *resultsIter) NextSync() (Result, bool) { + if r.legacyResults != nil { + return r.legacyResults.NextSync() + } else { + res, ok := r.next() + if !ok { + r.close() + } + return res, ok + } +} + +func (r *resultsIter) Rest() ([]Entry, error) { + var es []Entry + for { + e, ok := r.NextSync() + if !ok { + break + } + if e.Error != nil { + return es, e.Error + } + es = append(es, e.Entry) + } + return es, nil +} + +func (r *resultsIter) Process() goprocess.Process { + r.useLegacyResults() + return r.legacyResults.Process() +} + +func (r *resultsIter) Close() error { + if r.legacyResults != nil { + return r.legacyResults.Close() + } else { + return r.close() + } +} + +func (r *resultsIter) Query() Query { + return r.query +} + +func (r *resultsIter) useLegacyResults() { + if r.legacyResults != nil { + return + } + + b := NewResultBuilder(r.query) + + // go consume all the entries and add them to the results. + b.Process.Go(func(worker goprocess.Process) { + defer r.close() + for { + e, ok := r.next() + if !ok { + break + } + select { + case b.Output <- e: + case <-worker.Closing(): // client told us to close early + return + } + } + }) + + go b.Process.CloseAfterChildren() //nolint + + r.legacyResults = b.Results().(*results) +} diff --git a/vendor/github.com/ipfs/go-datastore/query/query_impl.go b/vendor/github.com/ipfs/go-datastore/query/query_impl.go new file mode 100644 index 000000000..dd554e743 --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/query/query_impl.go @@ -0,0 +1,158 @@ +package query + +import ( + "path" + + goprocess "github.com/jbenet/goprocess" +) + +// NaiveFilter applies a filter to the results. +func NaiveFilter(qr Results, filter Filter) Results { + return ResultsFromIterator(qr.Query(), Iterator{ + Next: func() (Result, bool) { + for { + e, ok := qr.NextSync() + if !ok { + return Result{}, false + } + if e.Error != nil || filter.Filter(e.Entry) { + return e, true + } + } + }, + Close: func() error { + return qr.Close() + }, + }) +} + +// NaiveLimit truncates the results to a given int limit +func NaiveLimit(qr Results, limit int) Results { + if limit == 0 { + // 0 means no limit + return qr + } + closed := false + return ResultsFromIterator(qr.Query(), Iterator{ + Next: func() (Result, bool) { + if limit == 0 { + if !closed { + closed = true + err := qr.Close() + if err != nil { + return Result{Error: err}, true + } + } + return Result{}, false + } + limit-- + return qr.NextSync() + }, + Close: func() error { + if closed { + return nil + } + closed = true + return qr.Close() + }, + }) +} + +// NaiveOffset skips a given number of results +func NaiveOffset(qr Results, offset int) Results { + return ResultsFromIterator(qr.Query(), Iterator{ + Next: func() (Result, bool) { + for ; offset > 0; offset-- { + res, ok := qr.NextSync() + if !ok || res.Error != nil { + return res, ok + } + } + return qr.NextSync() + }, + Close: func() error { + return qr.Close() + }, + }) +} + +// NaiveOrder reorders results according to given orders. +// WARNING: this is the only non-stream friendly operation! +func NaiveOrder(qr Results, orders ...Order) Results { + // Short circuit. + if len(orders) == 0 { + return qr + } + + return ResultsWithProcess(qr.Query(), func(worker goprocess.Process, out chan<- Result) { + defer qr.Close() + var entries []Entry + collect: + for { + select { + case <-worker.Closing(): + return + case e, ok := <-qr.Next(): + if !ok { + break collect + } + if e.Error != nil { + out <- e + continue + } + entries = append(entries, e.Entry) + } + } + + Sort(orders, entries) + + for _, e := range entries { + select { + case <-worker.Closing(): + return + case out <- Result{Entry: e}: + } + } + }) +} + +func NaiveQueryApply(q Query, qr Results) Results { + if q.Prefix != "" { + // Clean the prefix as a key and append / so a prefix of /bar + // only finds /bar/baz, not /barbaz. + prefix := q.Prefix + if len(prefix) == 0 { + prefix = "/" + } else { + if prefix[0] != '/' { + prefix = "/" + prefix + } + prefix = path.Clean(prefix) + } + // If the prefix is empty, ignore it. + if prefix != "/" { + qr = NaiveFilter(qr, FilterKeyPrefix{prefix + "/"}) + } + } + for _, f := range q.Filters { + qr = NaiveFilter(qr, f) + } + if len(q.Orders) > 0 { + qr = NaiveOrder(qr, q.Orders...) + } + if q.Offset != 0 { + qr = NaiveOffset(qr, q.Offset) + } + if q.Limit != 0 { + qr = NaiveLimit(qr, q.Limit) + } + return qr +} + +func ResultEntriesFrom(keys []string, vals [][]byte) []Entry { + re := make([]Entry, len(keys)) + for i, k := range keys { + re[i] = Entry{Key: k, Size: len(vals[i]), Value: vals[i]} + } + return re +} diff --git a/vendor/github.com/ipfs/go-ds-sql/LICENSE b/vendor/github.com/ipfs/go-ds-sql/LICENSE new file mode 100644 index 000000000..26100332b --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-sql/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Jeromy Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/ipfs/go-ds-sql/README.md b/vendor/github.com/ipfs/go-ds-sql/README.md new file mode 100644 index 000000000..8a5a7068b --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-sql/README.md @@ -0,0 +1,61 @@ +# SQL Datastore + +[![CircleCI](https://circleci.com/gh/ipfs/go-ds-sql.svg?style=shield)](https://circleci.com/gh/ipfs/go-ds-sql) +[![Coverage](https://codecov.io/gh/ipfs/go-ds-sql/branch/master/graph/badge.svg)](https://codecov.io/gh/ipfs/go-ds-sql) +[![Standard README](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg)](https://github.com/RichardLitt/standard-readme) +[![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg)](https://godoc.org/github.com/ipfs/go-ds-sql) +[![golang version](https://img.shields.io/badge/golang-%3E%3D1.14.0-orange.svg)](https://golang.org/) +[![Go Report Card](https://goreportcard.com/badge/github.com/ipfs/go-ds-sql)](https://goreportcard.com/report/github.com/ipfs/go-ds-sql) + +An implementation of [the datastore interface](https://github.com/ipfs/go-datastore) +that can be backed by any sql database. + +## Install + +```sh +go get github.com/ipfs/go-ds-sql +``` + +## Usage + +Ensure a database is created and a table exists with `key` and `data` columns. For example, in PostgreSQL you can create a table with the following structure (replacing `table_name` with the name of the table the datastore will use - by default this is `blocks`): + +```sql +CREATE TABLE IF NOT EXISTS table_name (key TEXT NOT NULL UNIQUE, data BYTEA) +``` + +It's recommended to create an index on the `key` column that is optimised for prefix scans. For example, in PostgreSQL you can create a `text_pattern_ops` index on the table: + +```sql +CREATE INDEX IF NOT EXISTS table_name_key_text_pattern_ops_idx ON table_name (key text_pattern_ops) +``` + +Import and use in your application: + +```go +import ( + "database/sql" + "github.com/ipfs/go-ds-sql" + pg "github.com/ipfs/go-ds-sql/postgres" +) + +mydb, _ := sql.Open("yourdb", "yourdbparameters") + +// Implement the Queries interface for your SQL impl. +// ...or use the provided PostgreSQL queries +queries := pg.NewQueries("blocks") + +ds := sqlds.NewDatastore(mydb, queries) +``` + +## API + +[GoDoc Reference](https://godoc.org/github.com/ipfs/go-ds-sql) + +## Contribute + +Feel free to dive in! [Open an issue](https://github.com/ipfs/go-ds-sql/issues/new) or submit PRs. + +## License + +[MIT](LICENSE) diff --git a/vendor/github.com/ipfs/go-ds-sql/batching.go b/vendor/github.com/ipfs/go-ds-sql/batching.go new file mode 100644 index 000000000..34126a3d3 --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-sql/batching.go @@ -0,0 +1,65 @@ +package sqlds + +import ( + "context" + + ds "github.com/ipfs/go-datastore" +) + +type op struct { + delete bool + value []byte +} + +type batch struct { + ds *Datastore + ops map[ds.Key]op +} + +// Batch creates a set of deferred updates to the database. +// Since SQL does not support a true batch of updates, +// operations are buffered and then executed sequentially +// over a single connection when Commit is called. +func (d *Datastore) Batch() (ds.Batch, error) { + return &batch{ + ds: d, + ops: make(map[ds.Key]op), + }, nil +} + +func (bt *batch) Put(key ds.Key, val []byte) error { + bt.ops[key] = op{value: val} + return nil +} + +func (bt *batch) Delete(key ds.Key) error { + bt.ops[key] = op{delete: true} + return nil +} + +func (bt *batch) Commit() error { + return bt.CommitContext(context.Background()) +} + +func (bt *batch) CommitContext(ctx context.Context) error { + conn, err := bt.ds.db.Conn(ctx) + if err != nil { + return err + } + defer conn.Close() + + for k, op := range bt.ops { + if op.delete { + _, err = conn.ExecContext(ctx, bt.ds.queries.Delete(), k.String()) + } else { + _, err = conn.ExecContext(ctx, bt.ds.queries.Put(), k.String(), op.value) + } + if err != nil { + break + } + } + + return err +} + +var _ ds.Batching = (*Datastore)(nil) diff --git a/vendor/github.com/ipfs/go-ds-sql/dstore.go b/vendor/github.com/ipfs/go-ds-sql/dstore.go new file mode 100644 index 000000000..d0b431bc7 --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-sql/dstore.go @@ -0,0 +1,203 @@ +package sqlds + +import ( + "database/sql" + "fmt" + + ds "github.com/ipfs/go-datastore" + dsq "github.com/ipfs/go-datastore/query" +) + +// Queries generates SQL queries for datastore operations. +type Queries interface { + Delete() string + Exists() string + Get() string + Put() string + Query() string + Prefix() string + Limit() string + Offset() string + GetSize() string +} + +// Datastore is a SQL backed datastore. +type Datastore struct { + db *sql.DB + queries Queries +} + +// NewDatastore returns a new SQL datastore. +func NewDatastore(db *sql.DB, queries Queries) *Datastore { + return &Datastore{db: db, queries: queries} +} + +// Close closes the underying SQL database. +func (d *Datastore) Close() error { + return d.db.Close() +} + +// Delete removes a row from the SQL database by the given key. +func (d *Datastore) Delete(key ds.Key) error { + _, err := d.db.Exec(d.queries.Delete(), key.String()) + if err != nil { + return err + } + + return nil +} + +// Get retrieves a value from the SQL database by the given key. +func (d *Datastore) Get(key ds.Key) (value []byte, err error) { + row := d.db.QueryRow(d.queries.Get(), key.String()) + var out []byte + + switch err := row.Scan(&out); err { + case sql.ErrNoRows: + return nil, ds.ErrNotFound + case nil: + return out, nil + default: + return nil, err + } +} + +// Has determines if a value for the given key exists in the SQL database. +func (d *Datastore) Has(key ds.Key) (exists bool, err error) { + row := d.db.QueryRow(d.queries.Exists(), key.String()) + + switch err := row.Scan(&exists); err { + case sql.ErrNoRows: + return exists, nil + case nil: + return exists, nil + default: + return exists, err + } +} + +// Put "upserts" a row into the SQL database. +func (d *Datastore) Put(key ds.Key, value []byte) error { + _, err := d.db.Exec(d.queries.Put(), key.String(), value) + if err != nil { + return err + } + + return nil +} + +// Query returns multiple rows from the SQL database based on the passed query parameters. +func (d *Datastore) Query(q dsq.Query) (dsq.Results, error) { + raw, err := d.rawQuery(q) + if err != nil { + return nil, err + } + + for _, f := range q.Filters { + raw = dsq.NaiveFilter(raw, f) + } + + raw = dsq.NaiveOrder(raw, q.Orders...) + + // if we have filters or orders, offset and limit won't have been applied in the query + if len(q.Filters) > 0 || len(q.Orders) > 0 { + if q.Offset != 0 { + raw = dsq.NaiveOffset(raw, q.Offset) + } + if q.Limit != 0 { + raw = dsq.NaiveLimit(raw, q.Limit) + } + } + + return raw, nil +} + +func (d *Datastore) rawQuery(q dsq.Query) (dsq.Results, error) { + var rows *sql.Rows + var err error + + rows, err = queryWithParams(d, q) + if err != nil { + return nil, err + } + + it := dsq.Iterator{ + Next: func() (dsq.Result, bool) { + if !rows.Next() { + return dsq.Result{}, false + } + + var key string + var out []byte + + err := rows.Scan(&key, &out) + if err != nil { + return dsq.Result{Error: err}, false + } + + entry := dsq.Entry{Key: key} + + if !q.KeysOnly { + entry.Value = out + } + if q.ReturnsSizes { + entry.Size = len(out) + } + + return dsq.Result{Entry: entry}, true + }, + Close: func() error { + return rows.Close() + }, + } + + return dsq.ResultsFromIterator(q, it), nil +} + +// Sync is noop for SQL databases. +func (d *Datastore) Sync(key ds.Key) error { + return nil +} + +// GetSize determines the size in bytes of the value for a given key. +func (d *Datastore) GetSize(key ds.Key) (int, error) { + row := d.db.QueryRow(d.queries.GetSize(), key.String()) + var size int + + switch err := row.Scan(&size); err { + case sql.ErrNoRows: + return -1, ds.ErrNotFound + case nil: + return size, nil + default: + return 0, err + } +} + +// queryWithParams applies prefix, limit, and offset params in pg query +func queryWithParams(d *Datastore, q dsq.Query) (*sql.Rows, error) { + var qNew = d.queries.Query() + + if q.Prefix != "" { + // normalize + prefix := ds.NewKey(q.Prefix).String() + if prefix != "/" { + qNew += fmt.Sprintf(d.queries.Prefix(), prefix+"/") + } + } + + // only apply limit and offset if we do not have to naive filter/order the results + if len(q.Filters) == 0 && len(q.Orders) == 0 { + if q.Limit != 0 { + qNew += fmt.Sprintf(d.queries.Limit(), q.Limit) + } + if q.Offset != 0 { + qNew += fmt.Sprintf(d.queries.Offset(), q.Offset) + } + } + + return d.db.Query(qNew) + +} + +var _ ds.Datastore = (*Datastore)(nil) diff --git a/vendor/github.com/ipfs/go-ds-sql/go.mod b/vendor/github.com/ipfs/go-ds-sql/go.mod new file mode 100644 index 000000000..7d470271a --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-sql/go.mod @@ -0,0 +1,8 @@ +module github.com/ipfs/go-ds-sql + +go 1.12 + +require ( + github.com/ipfs/go-datastore v0.4.4 + github.com/lib/pq v1.3.0 +) diff --git a/vendor/github.com/ipfs/go-ds-sql/go.sum b/vendor/github.com/ipfs/go-ds-sql/go.sum new file mode 100644 index 000000000..aa6c48c39 --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-sql/go.sum @@ -0,0 +1,17 @@ +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/ipfs/go-datastore v0.4.4 h1:rjvQ9+muFaJ+QZ7dN5B1MSDNQ0JVZKkkES/rMZmA8X8= +github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8 h1:bspPhN+oKYFk5fcGNuQzp6IGzYQSenLEgH3s6jkXrWw= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= +github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/ipfs/go-ds-sql/package.json b/vendor/github.com/ipfs/go-ds-sql/package.json new file mode 100644 index 000000000..f5cccc1b6 --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-sql/package.json @@ -0,0 +1,30 @@ +{ + "author": "whyrusleeping", + "bugs": { + "url": "https://github.com/whyrusleeping/sql-datastore" + }, + "gx": { + "dvcsimport": "github.com/whyrusleeping/sql-datastore" + }, + "gxDependencies": [ + { + "author": "magik6k", + "hash": "QmfJhaxwzBCorUmZNRmY87z4mD6roRrHFMqddhiS4D4XWr", + "name": "pq", + "version": "1.0.0" + }, + { + "author": "jbenet", + "hash": "QmPGYyi1DtuWyUkG3PtvLz1xb4ScjnUvwJMCoX3cxeyxNr", + "name": "go-datastore", + "version": "3.5.0" + } + ], + "gxVersion": "0.14.0", + "language": "go", + "license": "", + "name": "sql-datastore", + "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", + "version": "1.0.2" +} + diff --git a/vendor/github.com/ipfs/go-ds-sql/txn.go b/vendor/github.com/ipfs/go-ds-sql/txn.go new file mode 100644 index 000000000..54ccb187f --- /dev/null +++ b/vendor/github.com/ipfs/go-ds-sql/txn.go @@ -0,0 +1,122 @@ +package sqlds + +import ( + "database/sql" + "fmt" + + datastore "github.com/ipfs/go-datastore" + ds "github.com/ipfs/go-datastore" + dsq "github.com/ipfs/go-datastore/query" +) + +// ErrNotImplemented is returned when the SQL datastore does not yet implement the function call. +var ErrNotImplemented = fmt.Errorf("not implemented") + +type txn struct { + db *sql.DB + queries Queries + txn *sql.Tx +} + +// NewTransaction creates a new database transaction, note the readOnly parameter is ignored by this implementation. +func (ds *Datastore) NewTransaction(_ bool) (datastore.Txn, error) { + sqlTxn, err := ds.db.Begin() + if err != nil { + if sqlTxn != nil { + // nothing we can do about this error. + _ = sqlTxn.Rollback() + } + + return nil, err + } + + return &txn{ + db: ds.db, + queries: ds.queries, + txn: sqlTxn, + }, nil +} + +func (t *txn) Get(key ds.Key) ([]byte, error) { + row := t.txn.QueryRow(t.queries.Get(), key.String()) + var out []byte + + switch err := row.Scan(&out); err { + case sql.ErrNoRows: + return nil, ds.ErrNotFound + case nil: + return out, nil + default: + return nil, err + } +} + +func (t *txn) Has(key ds.Key) (bool, error) { + row := t.txn.QueryRow(t.queries.Exists(), key.String()) + var exists bool + + switch err := row.Scan(&exists); err { + case sql.ErrNoRows: + return exists, nil + case nil: + return exists, nil + default: + return exists, err + } +} + +func (t *txn) GetSize(key ds.Key) (int, error) { + row := t.txn.QueryRow(t.queries.GetSize(), key.String()) + var size int + + switch err := row.Scan(&size); err { + case sql.ErrNoRows: + return -1, ds.ErrNotFound + case nil: + return size, nil + default: + return 0, err + } +} + +func (t *txn) Query(q dsq.Query) (dsq.Results, error) { + return nil, ErrNotImplemented +} + +// Put adds a value to the datastore identified by the given key. +func (t *txn) Put(key ds.Key, val []byte) error { + _, err := t.txn.Exec(t.queries.Put(), key.String(), val) + if err != nil { + _ = t.txn.Rollback() + return err + } + return nil +} + +// Delete removes a value from the datastore that matches the given key. +func (t *txn) Delete(key ds.Key) error { + _, err := t.txn.Exec(t.queries.Delete(), key.String()) + if err != nil { + _ = t.txn.Rollback() + return err + } + return nil +} + +// Commit finalizes a transaction. +func (t *txn) Commit() error { + err := t.txn.Commit() + if err != nil { + _ = t.txn.Rollback() + return err + } + return nil +} + +// Discard throws away changes recorded in a transaction without committing +// them to the underlying Datastore. +func (t *txn) Discard() { + _ = t.txn.Rollback() +} + +var _ ds.TxnDatastore = (*Datastore)(nil) diff --git a/vendor/github.com/libp2p/go-libp2p-core/test/addrs.go b/vendor/github.com/libp2p/go-libp2p-core/test/addrs.go new file mode 100644 index 000000000..e18849c48 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/test/addrs.go @@ -0,0 +1,42 @@ +package test + +import ( + "fmt" + "testing" + + ma "github.com/multiformats/go-multiaddr" +) + +func GenerateTestAddrs(n int) []ma.Multiaddr { + out := make([]ma.Multiaddr, n) + for i := 0; i < n; i++ { + a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/1.2.3.4/tcp/%d", i)) + if err != nil { + continue + } + out[i] = a + } + return out +} + +func AssertAddressesEqual(t *testing.T, exp, act []ma.Multiaddr) { + t.Helper() + if len(exp) != len(act) { + t.Fatalf("lengths not the same. expected %d, got %d\n", len(exp), len(act)) + } + + for _, a := range exp { + found := false + + for _, b := range act { + if a.Equal(b) { + found = true + break + } + } + + if !found { + t.Fatalf("expected address %s not found", a) + } + } +} diff --git a/vendor/github.com/libp2p/go-libp2p-core/test/crypto.go b/vendor/github.com/libp2p/go-libp2p-core/test/crypto.go new file mode 100644 index 000000000..8733cf924 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/test/crypto.go @@ -0,0 +1,21 @@ +package test + +import ( + "math/rand" + "sync/atomic" + + ci "github.com/libp2p/go-libp2p-core/crypto" +) + +var globalSeed int64 + +func RandTestKeyPair(typ, bits int) (ci.PrivKey, ci.PubKey, error) { + // workaround for low time resolution + seed := atomic.AddInt64(&globalSeed, 1) + return SeededTestKeyPair(typ, bits, seed) +} + +func SeededTestKeyPair(typ, bits int, seed int64) (ci.PrivKey, ci.PubKey, error) { + r := rand.New(rand.NewSource(seed)) + return ci.GenerateKeyPairWithReader(typ, bits, r) +} diff --git a/vendor/github.com/libp2p/go-libp2p-core/test/errors.go b/vendor/github.com/libp2p/go-libp2p-core/test/errors.go new file mode 100644 index 000000000..82a3e696f --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/test/errors.go @@ -0,0 +1,19 @@ +package test + +import ( + "testing" +) + +func AssertNilError(t *testing.T, err error) { + t.Helper() + if err != nil { + t.Errorf("unexpected error: %v", err) + } +} + +func ExpectError(t *testing.T, err error, msg string) { + t.Helper() + if err == nil { + t.Error(msg) + } +} diff --git a/vendor/github.com/libp2p/go-libp2p-core/test/peer.go b/vendor/github.com/libp2p/go-libp2p-core/test/peer.go new file mode 100644 index 000000000..b95639fe2 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/test/peer.go @@ -0,0 +1,25 @@ +package test + +import ( + "math/rand" + "testing" + + "github.com/libp2p/go-libp2p-core/peer" + + mh "github.com/multiformats/go-multihash" +) + +func RandPeerID() (peer.ID, error) { + buf := make([]byte, 16) + rand.Read(buf) + h, _ := mh.Sum(buf, mh.SHA2_256, -1) + return peer.ID(h), nil +} + +func RandPeerIDFatal(t testing.TB) peer.ID { + p, err := RandPeerID() + if err != nil { + t.Fatal(err) + } + return p +} diff --git a/vendor/github.com/libp2p/go-libp2p-peerstore/pb/Makefile b/vendor/github.com/libp2p/go-libp2p-peerstore/pb/Makefile new file mode 100644 index 000000000..51e71f89f --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-peerstore/pb/Makefile @@ -0,0 +1,12 @@ +PB = $(wildcard *.proto) +GO = $(PB:.proto=.pb.go) + +all: $(GO) + +%.pb.go: %.proto + protoc --proto_path=$(GOPATH)/src:. --gogofaster_out=. $< + +clean: + rm -f *.pb.go + +.PHONY: clean \ No newline at end of file diff --git a/vendor/github.com/libp2p/go-libp2p-peerstore/pb/custom.go b/vendor/github.com/libp2p/go-libp2p-peerstore/pb/custom.go new file mode 100644 index 000000000..41784a4f6 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-peerstore/pb/custom.go @@ -0,0 +1,110 @@ +package pstore_pb + +import ( + "encoding/json" + + proto "github.com/gogo/protobuf/proto" + peer "github.com/libp2p/go-libp2p-core/peer" + pt "github.com/libp2p/go-libp2p-core/test" + ma "github.com/multiformats/go-multiaddr" +) + +// customGogoType aggregates the interfaces that custom Gogo types need to implement. +// it is only used for type assertions. +type customGogoType interface { + proto.Marshaler + proto.Unmarshaler + json.Marshaler + json.Unmarshaler + proto.Sizer +} + +// ProtoAddr is a custom type used by gogo to serde raw peer IDs into the peer.ID type, and back. +type ProtoPeerID struct { + peer.ID +} + +var _ customGogoType = (*ProtoPeerID)(nil) + +func (id ProtoPeerID) Marshal() ([]byte, error) { + return []byte(id.ID), nil +} + +func (id ProtoPeerID) MarshalTo(data []byte) (n int, err error) { + return copy(data, []byte(id.ID)), nil +} + +func (id ProtoPeerID) MarshalJSON() ([]byte, error) { + m, _ := id.Marshal() + return json.Marshal(m) +} + +func (id *ProtoPeerID) Unmarshal(data []byte) (err error) { + id.ID = peer.ID(string(data)) + return nil +} + +func (id *ProtoPeerID) UnmarshalJSON(data []byte) error { + var v []byte + err := json.Unmarshal(data, &v) + if err != nil { + return err + } + return id.Unmarshal(v) +} + +func (id ProtoPeerID) Size() int { + return len([]byte(id.ID)) +} + +// ProtoAddr is a custom type used by gogo to serde raw multiaddresses into the ma.Multiaddr type, and back. +type ProtoAddr struct { + ma.Multiaddr +} + +var _ customGogoType = (*ProtoAddr)(nil) + +func (a ProtoAddr) Marshal() ([]byte, error) { + return a.Bytes(), nil +} + +func (a ProtoAddr) MarshalTo(data []byte) (n int, err error) { + return copy(data, a.Bytes()), nil +} + +func (a ProtoAddr) MarshalJSON() ([]byte, error) { + m, _ := a.Marshal() + return json.Marshal(m) +} + +func (a *ProtoAddr) Unmarshal(data []byte) (err error) { + a.Multiaddr, err = ma.NewMultiaddrBytes(data) + return err +} + +func (a *ProtoAddr) UnmarshalJSON(data []byte) error { + v := new([]byte) + err := json.Unmarshal(data, v) + if err != nil { + return err + } + return a.Unmarshal(*v) +} + +func (a ProtoAddr) Size() int { + return len(a.Bytes()) +} + +// NewPopulatedProtoAddr generates a populated instance of the custom gogo type ProtoAddr. +// It is required by gogo-generated tests. +func NewPopulatedProtoAddr(r randyPstore) *ProtoAddr { + a, _ := ma.NewMultiaddr("/ip4/123.123.123.123/tcp/7001") + return &ProtoAddr{Multiaddr: a} +} + +// NewPopulatedProtoPeerID generates a populated instance of the custom gogo type ProtoPeerID. +// It is required by gogo-generated tests. +func NewPopulatedProtoPeerID(r randyPstore) *ProtoPeerID { + id, _ := pt.RandPeerID() + return &ProtoPeerID{ID: id} +} diff --git a/vendor/github.com/libp2p/go-libp2p-peerstore/pb/pstore.pb.go b/vendor/github.com/libp2p/go-libp2p-peerstore/pb/pstore.pb.go new file mode 100644 index 000000000..c7d99807a --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-peerstore/pb/pstore.pb.go @@ -0,0 +1,1036 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: pstore.proto + +package pstore_pb + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// AddrBookRecord represents a record for a peer in the address book. +type AddrBookRecord struct { + // The peer ID. + Id *ProtoPeerID `protobuf:"bytes,1,opt,name=id,proto3,customtype=ProtoPeerID" json:"id,omitempty"` + // The multiaddresses. This is a sorted list where element 0 expires the soonest. + Addrs []*AddrBookRecord_AddrEntry `protobuf:"bytes,2,rep,name=addrs,proto3" json:"addrs,omitempty"` + // The most recently received signed PeerRecord. + CertifiedRecord *AddrBookRecord_CertifiedRecord `protobuf:"bytes,3,opt,name=certified_record,json=certifiedRecord,proto3" json:"certified_record,omitempty"` +} + +func (m *AddrBookRecord) Reset() { *m = AddrBookRecord{} } +func (m *AddrBookRecord) String() string { return proto.CompactTextString(m) } +func (*AddrBookRecord) ProtoMessage() {} +func (*AddrBookRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_f96873690e08a98f, []int{0} +} +func (m *AddrBookRecord) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AddrBookRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AddrBookRecord.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AddrBookRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddrBookRecord.Merge(m, src) +} +func (m *AddrBookRecord) XXX_Size() int { + return m.Size() +} +func (m *AddrBookRecord) XXX_DiscardUnknown() { + xxx_messageInfo_AddrBookRecord.DiscardUnknown(m) +} + +var xxx_messageInfo_AddrBookRecord proto.InternalMessageInfo + +func (m *AddrBookRecord) GetAddrs() []*AddrBookRecord_AddrEntry { + if m != nil { + return m.Addrs + } + return nil +} + +func (m *AddrBookRecord) GetCertifiedRecord() *AddrBookRecord_CertifiedRecord { + if m != nil { + return m.CertifiedRecord + } + return nil +} + +// AddrEntry represents a single multiaddress. +type AddrBookRecord_AddrEntry struct { + Addr *ProtoAddr `protobuf:"bytes,1,opt,name=addr,proto3,customtype=ProtoAddr" json:"addr,omitempty"` + // The point in time when this address expires. + Expiry int64 `protobuf:"varint,2,opt,name=expiry,proto3" json:"expiry,omitempty"` + // The original TTL of this address. + Ttl int64 `protobuf:"varint,3,opt,name=ttl,proto3" json:"ttl,omitempty"` +} + +func (m *AddrBookRecord_AddrEntry) Reset() { *m = AddrBookRecord_AddrEntry{} } +func (m *AddrBookRecord_AddrEntry) String() string { return proto.CompactTextString(m) } +func (*AddrBookRecord_AddrEntry) ProtoMessage() {} +func (*AddrBookRecord_AddrEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_f96873690e08a98f, []int{0, 0} +} +func (m *AddrBookRecord_AddrEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AddrBookRecord_AddrEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AddrBookRecord_AddrEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AddrBookRecord_AddrEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddrBookRecord_AddrEntry.Merge(m, src) +} +func (m *AddrBookRecord_AddrEntry) XXX_Size() int { + return m.Size() +} +func (m *AddrBookRecord_AddrEntry) XXX_DiscardUnknown() { + xxx_messageInfo_AddrBookRecord_AddrEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_AddrBookRecord_AddrEntry proto.InternalMessageInfo + +func (m *AddrBookRecord_AddrEntry) GetExpiry() int64 { + if m != nil { + return m.Expiry + } + return 0 +} + +func (m *AddrBookRecord_AddrEntry) GetTtl() int64 { + if m != nil { + return m.Ttl + } + return 0 +} + +// CertifiedRecord contains a serialized signed PeerRecord used to +// populate the signedAddrs list. +type AddrBookRecord_CertifiedRecord struct { + // The Seq counter from the signed PeerRecord envelope + Seq uint64 `protobuf:"varint,1,opt,name=seq,proto3" json:"seq,omitempty"` + // The serialized bytes of the SignedEnvelope containing the PeerRecord. + Raw []byte `protobuf:"bytes,2,opt,name=raw,proto3" json:"raw,omitempty"` +} + +func (m *AddrBookRecord_CertifiedRecord) Reset() { *m = AddrBookRecord_CertifiedRecord{} } +func (m *AddrBookRecord_CertifiedRecord) String() string { return proto.CompactTextString(m) } +func (*AddrBookRecord_CertifiedRecord) ProtoMessage() {} +func (*AddrBookRecord_CertifiedRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_f96873690e08a98f, []int{0, 1} +} +func (m *AddrBookRecord_CertifiedRecord) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AddrBookRecord_CertifiedRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AddrBookRecord_CertifiedRecord.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AddrBookRecord_CertifiedRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddrBookRecord_CertifiedRecord.Merge(m, src) +} +func (m *AddrBookRecord_CertifiedRecord) XXX_Size() int { + return m.Size() +} +func (m *AddrBookRecord_CertifiedRecord) XXX_DiscardUnknown() { + xxx_messageInfo_AddrBookRecord_CertifiedRecord.DiscardUnknown(m) +} + +var xxx_messageInfo_AddrBookRecord_CertifiedRecord proto.InternalMessageInfo + +func (m *AddrBookRecord_CertifiedRecord) GetSeq() uint64 { + if m != nil { + return m.Seq + } + return 0 +} + +func (m *AddrBookRecord_CertifiedRecord) GetRaw() []byte { + if m != nil { + return m.Raw + } + return nil +} + +func init() { + proto.RegisterType((*AddrBookRecord)(nil), "pstore.pb.AddrBookRecord") + proto.RegisterType((*AddrBookRecord_AddrEntry)(nil), "pstore.pb.AddrBookRecord.AddrEntry") + proto.RegisterType((*AddrBookRecord_CertifiedRecord)(nil), "pstore.pb.AddrBookRecord.CertifiedRecord") +} + +func init() { proto.RegisterFile("pstore.proto", fileDescriptor_f96873690e08a98f) } + +var fileDescriptor_f96873690e08a98f = []byte{ + // 322 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x28, 0x2e, 0xc9, + 0x2f, 0x4a, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x84, 0xf1, 0x92, 0xa4, 0x74, 0xd3, + 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xd3, 0xf3, 0xd3, 0xf3, 0xf5, 0xc1, + 0x2a, 0x92, 0x4a, 0xd3, 0xc0, 0x3c, 0x30, 0x07, 0xcc, 0x82, 0xe8, 0x54, 0xba, 0xcc, 0xc4, 0xc5, + 0xe7, 0x98, 0x92, 0x52, 0xe4, 0x94, 0x9f, 0x9f, 0x1d, 0x94, 0x9a, 0x9c, 0x5f, 0x94, 0x22, 0x24, + 0xcf, 0xc5, 0x94, 0x99, 0x22, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xe3, 0xc4, 0x7f, 0xeb, 0x9e, 0x3c, + 0x77, 0x00, 0x48, 0x65, 0x40, 0x6a, 0x6a, 0x91, 0xa7, 0x4b, 0x10, 0x53, 0x66, 0x8a, 0x90, 0x25, + 0x17, 0x6b, 0x62, 0x4a, 0x4a, 0x51, 0xb1, 0x04, 0x93, 0x02, 0xb3, 0x06, 0xb7, 0x91, 0xb2, 0x1e, + 0xdc, 0x76, 0x3d, 0x54, 0xa3, 0xc0, 0x5c, 0xd7, 0xbc, 0x92, 0xa2, 0xca, 0x20, 0x88, 0x0e, 0xa1, + 0x10, 0x2e, 0x81, 0xe4, 0xd4, 0xa2, 0x92, 0xcc, 0xb4, 0xcc, 0xd4, 0x94, 0xf8, 0x22, 0xb0, 0x22, + 0x09, 0x66, 0x05, 0x46, 0x0d, 0x6e, 0x23, 0x4d, 0xdc, 0xa6, 0x38, 0xc3, 0x74, 0x40, 0xf8, 0x41, + 0xfc, 0xc9, 0xa8, 0x02, 0x52, 0x11, 0x5c, 0x9c, 0x70, 0x9b, 0x84, 0x14, 0xb9, 0x58, 0x40, 0x76, + 0x41, 0x3d, 0xc0, 0x7b, 0xeb, 0x9e, 0x3c, 0x27, 0xd8, 0x03, 0x20, 0x15, 0x41, 0x60, 0x29, 0x21, + 0x31, 0x2e, 0xb6, 0xd4, 0x8a, 0x82, 0xcc, 0xa2, 0x4a, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xe6, 0x20, + 0x28, 0x4f, 0x48, 0x80, 0x8b, 0xb9, 0xa4, 0x24, 0x07, 0xec, 0x20, 0xe6, 0x20, 0x10, 0x53, 0xca, + 0x94, 0x8b, 0x1f, 0xcd, 0x76, 0x90, 0xa2, 0xe2, 0xd4, 0x42, 0xb0, 0xf1, 0x2c, 0x41, 0x20, 0x26, + 0x48, 0xa4, 0x28, 0xb1, 0x1c, 0x6c, 0x16, 0x4f, 0x10, 0x88, 0xe9, 0xa4, 0xf0, 0xe3, 0xa1, 0x1c, + 0xe3, 0x81, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, + 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0x90, 0xc4, + 0x06, 0x0e, 0x7e, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x29, 0xcd, 0xe8, 0xd4, 0xc8, 0x01, + 0x00, 0x00, +} + +func (m *AddrBookRecord) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AddrBookRecord) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AddrBookRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CertifiedRecord != nil { + { + size, err := m.CertifiedRecord.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPstore(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Addrs) > 0 { + for iNdEx := len(m.Addrs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Addrs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPstore(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Id != nil { + { + size := m.Id.Size() + i -= size + if _, err := m.Id.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintPstore(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AddrBookRecord_AddrEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AddrBookRecord_AddrEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AddrBookRecord_AddrEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Ttl != 0 { + i = encodeVarintPstore(dAtA, i, uint64(m.Ttl)) + i-- + dAtA[i] = 0x18 + } + if m.Expiry != 0 { + i = encodeVarintPstore(dAtA, i, uint64(m.Expiry)) + i-- + dAtA[i] = 0x10 + } + if m.Addr != nil { + { + size := m.Addr.Size() + i -= size + if _, err := m.Addr.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintPstore(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AddrBookRecord_CertifiedRecord) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AddrBookRecord_CertifiedRecord) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AddrBookRecord_CertifiedRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Raw) > 0 { + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) + i = encodeVarintPstore(dAtA, i, uint64(len(m.Raw))) + i-- + dAtA[i] = 0x12 + } + if m.Seq != 0 { + i = encodeVarintPstore(dAtA, i, uint64(m.Seq)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintPstore(dAtA []byte, offset int, v uint64) int { + offset -= sovPstore(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedAddrBookRecord(r randyPstore, easy bool) *AddrBookRecord { + this := &AddrBookRecord{} + this.Id = NewPopulatedProtoPeerID(r) + if r.Intn(5) != 0 { + v1 := r.Intn(5) + this.Addrs = make([]*AddrBookRecord_AddrEntry, v1) + for i := 0; i < v1; i++ { + this.Addrs[i] = NewPopulatedAddrBookRecord_AddrEntry(r, easy) + } + } + if r.Intn(5) != 0 { + this.CertifiedRecord = NewPopulatedAddrBookRecord_CertifiedRecord(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedAddrBookRecord_AddrEntry(r randyPstore, easy bool) *AddrBookRecord_AddrEntry { + this := &AddrBookRecord_AddrEntry{} + this.Addr = NewPopulatedProtoAddr(r) + this.Expiry = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Expiry *= -1 + } + this.Ttl = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Ttl *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedAddrBookRecord_CertifiedRecord(r randyPstore, easy bool) *AddrBookRecord_CertifiedRecord { + this := &AddrBookRecord_CertifiedRecord{} + this.Seq = uint64(uint64(r.Uint32())) + v2 := r.Intn(100) + this.Raw = make([]byte, v2) + for i := 0; i < v2; i++ { + this.Raw[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +type randyPstore interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RunePstore(r randyPstore) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringPstore(r randyPstore) string { + v3 := r.Intn(100) + tmps := make([]rune, v3) + for i := 0; i < v3; i++ { + tmps[i] = randUTF8RunePstore(r) + } + return string(tmps) +} +func randUnrecognizedPstore(r randyPstore, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldPstore(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldPstore(dAtA []byte, r randyPstore, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulatePstore(dAtA, uint64(key)) + v4 := r.Int63() + if r.Intn(2) == 0 { + v4 *= -1 + } + dAtA = encodeVarintPopulatePstore(dAtA, uint64(v4)) + case 1: + dAtA = encodeVarintPopulatePstore(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulatePstore(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulatePstore(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulatePstore(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulatePstore(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *AddrBookRecord) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != nil { + l = m.Id.Size() + n += 1 + l + sovPstore(uint64(l)) + } + if len(m.Addrs) > 0 { + for _, e := range m.Addrs { + l = e.Size() + n += 1 + l + sovPstore(uint64(l)) + } + } + if m.CertifiedRecord != nil { + l = m.CertifiedRecord.Size() + n += 1 + l + sovPstore(uint64(l)) + } + return n +} + +func (m *AddrBookRecord_AddrEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Addr != nil { + l = m.Addr.Size() + n += 1 + l + sovPstore(uint64(l)) + } + if m.Expiry != 0 { + n += 1 + sovPstore(uint64(m.Expiry)) + } + if m.Ttl != 0 { + n += 1 + sovPstore(uint64(m.Ttl)) + } + return n +} + +func (m *AddrBookRecord_CertifiedRecord) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Seq != 0 { + n += 1 + sovPstore(uint64(m.Seq)) + } + l = len(m.Raw) + if l > 0 { + n += 1 + l + sovPstore(uint64(l)) + } + return n +} + +func sovPstore(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozPstore(x uint64) (n int) { + return sovPstore(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *AddrBookRecord) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPstore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddrBookRecord: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddrBookRecord: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPstore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthPstore + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthPstore + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var v ProtoPeerID + m.Id = &v + if err := m.Id.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPstore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPstore + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPstore + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addrs = append(m.Addrs, &AddrBookRecord_AddrEntry{}) + if err := m.Addrs[len(m.Addrs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CertifiedRecord", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPstore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPstore + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPstore + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CertifiedRecord == nil { + m.CertifiedRecord = &AddrBookRecord_CertifiedRecord{} + } + if err := m.CertifiedRecord.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPstore(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPstore + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPstore + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AddrBookRecord_AddrEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPstore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddrEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddrEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPstore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthPstore + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthPstore + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var v ProtoAddr + m.Addr = &v + if err := m.Addr.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Expiry", wireType) + } + m.Expiry = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPstore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Expiry |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ttl", wireType) + } + m.Ttl = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPstore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Ttl |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipPstore(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPstore + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPstore + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AddrBookRecord_CertifiedRecord) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPstore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CertifiedRecord: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CertifiedRecord: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seq", wireType) + } + m.Seq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPstore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seq |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPstore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthPstore + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthPstore + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPstore(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPstore + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPstore + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPstore(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPstore + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPstore + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPstore + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthPstore + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupPstore + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthPstore + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthPstore = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPstore = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupPstore = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/libp2p/go-libp2p-peerstore/pb/pstore.proto b/vendor/github.com/libp2p/go-libp2p-peerstore/pb/pstore.proto new file mode 100644 index 000000000..0d1abd1da --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-peerstore/pb/pstore.proto @@ -0,0 +1,40 @@ +syntax = "proto3"; +package pstore.pb; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.benchgen_all) = true; +option (gogoproto.populate_all) = true; + +// AddrBookRecord represents a record for a peer in the address book. +message AddrBookRecord { + // The peer ID. + bytes id = 1 [(gogoproto.customtype) = "ProtoPeerID"]; + + // The multiaddresses. This is a sorted list where element 0 expires the soonest. + repeated AddrEntry addrs = 2; + + // The most recently received signed PeerRecord. + CertifiedRecord certified_record = 3; + + // AddrEntry represents a single multiaddress. + message AddrEntry { + bytes addr = 1 [(gogoproto.customtype) = "ProtoAddr"]; + + // The point in time when this address expires. + int64 expiry = 2; + + // The original TTL of this address. + int64 ttl = 3; + } + + // CertifiedRecord contains a serialized signed PeerRecord used to + // populate the signedAddrs list. + message CertifiedRecord { + // The Seq counter from the signed PeerRecord envelope + uint64 seq = 1; + + // The serialized bytes of the SignedEnvelope containing the PeerRecord. + bytes raw = 2; + } +} diff --git a/vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/addr_book.go b/vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/addr_book.go new file mode 100644 index 000000000..51a1cf0e7 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/addr_book.go @@ -0,0 +1,578 @@ +package pstoreds + +import ( + "context" + "fmt" + "sort" + "sync" + "time" + + "github.com/libp2p/go-libp2p-core/record" + + ds "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" + logging "github.com/ipfs/go-log" + + "github.com/libp2p/go-libp2p-core/peer" + pstore "github.com/libp2p/go-libp2p-core/peerstore" + pb "github.com/libp2p/go-libp2p-peerstore/pb" + "github.com/libp2p/go-libp2p-peerstore/pstoremem" + + lru "github.com/hashicorp/golang-lru" + b32 "github.com/multiformats/go-base32" + ma "github.com/multiformats/go-multiaddr" +) + +type ttlWriteMode int + +const ( + ttlOverride ttlWriteMode = iota + ttlExtend +) + +var ( + log = logging.Logger("peerstore/ds") + + // Peer addresses are stored db key pattern: + // /peers/addrs/ + addrBookBase = ds.NewKey("/peers/addrs") +) + +// addrsRecord decorates the AddrBookRecord with locks and metadata. +type addrsRecord struct { + sync.RWMutex + *pb.AddrBookRecord + dirty bool +} + +// flush writes the record to the datastore by calling ds.Put, unless the record is +// marked for deletion, in which case we call ds.Delete. To be called within a lock. +func (r *addrsRecord) flush(write ds.Write) (err error) { + key := addrBookBase.ChildString(b32.RawStdEncoding.EncodeToString([]byte(r.Id.ID))) + + if len(r.Addrs) == 0 { + if err = write.Delete(key); err == nil { + r.dirty = false + } + return err + } + + data, err := r.Marshal() + if err != nil { + return err + } + if err = write.Put(key, data); err != nil { + return err + } + // write succeeded; record is no longer dirty. + r.dirty = false + return nil +} + +// clean is called on records to perform housekeeping. The return value indicates if the record was changed +// as a result of this call. +// +// clean does the following: +// * sorts addresses by expiration (soonest expiring first). +// * removes expired addresses. +// +// It short-circuits optimistically when there's nothing to do. +// +// clean is called from several points: +// * when accessing an entry. +// * when performing periodic GC. +// * after an entry has been modified (e.g. addresses have been added or removed, TTLs updated, etc.) +// +// If the return value is true, the caller should perform a flush immediately to sync the record with the store. +func (r *addrsRecord) clean() (chgd bool) { + now := time.Now().Unix() + addrsLen := len(r.Addrs) + + if !r.dirty && !r.hasExpiredAddrs(now) { + // record is not dirty, and we have no expired entries to purge. + return false + } + + if addrsLen == 0 { + // this is a ghost record; let's signal it has to be written. + // flush() will take care of doing the deletion. + return true + } + + if r.dirty && addrsLen > 1 { + sort.Slice(r.Addrs, func(i, j int) bool { + return r.Addrs[i].Expiry < r.Addrs[j].Expiry + }) + } + + r.Addrs = removeExpired(r.Addrs, now) + + return r.dirty || len(r.Addrs) != addrsLen +} + +func (r *addrsRecord) hasExpiredAddrs(now int64) bool { + if len(r.Addrs) > 0 && r.Addrs[0].Expiry <= now { + return true + } + return false +} + +func removeExpired(entries []*pb.AddrBookRecord_AddrEntry, now int64) []*pb.AddrBookRecord_AddrEntry { + // since addresses are sorted by expiration, we find the first + // survivor and split the slice on its index. + pivot := -1 + for i, addr := range entries { + if addr.Expiry > now { + break + } + pivot = i + } + + return entries[pivot+1:] +} + +// dsAddrBook is an address book backed by a Datastore with a GC procedure to purge expired entries. It uses an +// in-memory address stream manager. See the NewAddrBook for more information. +type dsAddrBook struct { + ctx context.Context + opts Options + + cache cache + ds ds.Batching + gc *dsAddrBookGc + subsManager *pstoremem.AddrSubManager + + // controls children goroutine lifetime. + childrenDone sync.WaitGroup + cancelFn func() +} + +var _ pstore.AddrBook = (*dsAddrBook)(nil) +var _ pstore.CertifiedAddrBook = (*dsAddrBook)(nil) + +// NewAddrBook initializes a new datastore-backed address book. It serves as a drop-in replacement for pstoremem +// (memory-backed peerstore), and works with any datastore implementing the ds.Batching interface. +// +// Addresses and peer records are serialized into protobuf, storing one datastore entry per peer, along with metadata +// to control address expiration. To alleviate disk access and serde overhead, we internally use a read/write-through +// ARC cache, the size of which is adjustable via Options.CacheSize. +// +// The user has a choice of two GC algorithms: +// +// - lookahead GC: minimises the amount of full store traversals by maintaining a time-indexed list of entries that +// need to be visited within the period specified in Options.GCLookaheadInterval. This is useful in scenarios with +// considerable TTL variance, coupled with datastores whose native iterators return entries in lexicographical key +// order. Enable this mode by passing a value Options.GCLookaheadInterval > 0. Lookahead windows are jumpy, not +// sliding. Purges operate exclusively over the lookahead window with periodicity Options.GCPurgeInterval. +// +// - full-purge GC (default): performs a full visit of the store with periodicity Options.GCPurgeInterval. Useful when +// the range of possible TTL values is small and the values themselves are also extreme, e.g. 10 minutes or +// permanent, popular values used in other libp2p modules. In this cited case, optimizing with lookahead windows +// makes little sense. +func NewAddrBook(ctx context.Context, store ds.Batching, opts Options) (ab *dsAddrBook, err error) { + ctx, cancelFn := context.WithCancel(ctx) + ab = &dsAddrBook{ + ctx: ctx, + ds: store, + opts: opts, + cancelFn: cancelFn, + subsManager: pstoremem.NewAddrSubManager(), + } + + if opts.CacheSize > 0 { + if ab.cache, err = lru.NewARC(int(opts.CacheSize)); err != nil { + return nil, err + } + } else { + ab.cache = new(noopCache) + } + + if ab.gc, err = newAddressBookGc(ctx, ab); err != nil { + return nil, err + } + + return ab, nil +} + +func (ab *dsAddrBook) Close() error { + ab.cancelFn() + ab.childrenDone.Wait() + return nil +} + +// loadRecord is a read-through fetch. It fetches a record from cache, falling back to the +// datastore upon a miss, and returning a newly initialized record if the peer doesn't exist. +// +// loadRecord calls clean() on an existing record before returning it. If the record changes +// as a result and the update argument is true, the resulting state is saved in the datastore. +// +// If the cache argument is true, the record is inserted in the cache when loaded from the datastore. +func (ab *dsAddrBook) loadRecord(id peer.ID, cache bool, update bool) (pr *addrsRecord, err error) { + if err := id.Validate(); err != nil { + return nil, err + } + if e, ok := ab.cache.Get(id); ok { + pr = e.(*addrsRecord) + pr.Lock() + defer pr.Unlock() + + if pr.clean() && update { + err = pr.flush(ab.ds) + } + return pr, err + } + + pr = &addrsRecord{AddrBookRecord: &pb.AddrBookRecord{}} + key := addrBookBase.ChildString(b32.RawStdEncoding.EncodeToString([]byte(id))) + data, err := ab.ds.Get(key) + + switch err { + case ds.ErrNotFound: + err = nil + pr.Id = &pb.ProtoPeerID{ID: id} + case nil: + if err = pr.Unmarshal(data); err != nil { + return nil, err + } + // this record is new and local for now (not in cache), so we don't need to lock. + if pr.clean() && update { + err = pr.flush(ab.ds) + } + default: + return nil, err + } + + if cache { + ab.cache.Add(id, pr) + } + return pr, err +} + +// AddAddr will add a new address if it's not already in the AddrBook. +func (ab *dsAddrBook) AddAddr(p peer.ID, addr ma.Multiaddr, ttl time.Duration) { + ab.AddAddrs(p, []ma.Multiaddr{addr}, ttl) +} + +// AddAddrs will add many new addresses if they're not already in the AddrBook. +func (ab *dsAddrBook) AddAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) { + if ttl <= 0 { + return + } + addrs = cleanAddrs(addrs) + ab.setAddrs(p, addrs, ttl, ttlExtend, false) +} + +// ConsumePeerRecord adds addresses from a signed peer.PeerRecord (contained in +// a record.Envelope), which will expire after the given TTL. +// See https://godoc.org/github.com/libp2p/go-libp2p-core/peerstore#CertifiedAddrBook for more details. +func (ab *dsAddrBook) ConsumePeerRecord(recordEnvelope *record.Envelope, ttl time.Duration) (bool, error) { + r, err := recordEnvelope.Record() + if err != nil { + return false, err + } + rec, ok := r.(*peer.PeerRecord) + if !ok { + return false, fmt.Errorf("envelope did not contain PeerRecord") + } + if !rec.PeerID.MatchesPublicKey(recordEnvelope.PublicKey) { + return false, fmt.Errorf("signing key does not match PeerID in PeerRecord") + } + + // ensure that the seq number from envelope is >= any previously received seq no + // update when equal to extend the ttls + if ab.latestPeerRecordSeq(rec.PeerID) > rec.Seq { + return false, nil + } + + addrs := cleanAddrs(rec.Addrs) + err = ab.setAddrs(rec.PeerID, addrs, ttl, ttlExtend, true) + if err != nil { + return false, err + } + + err = ab.storeSignedPeerRecord(rec.PeerID, recordEnvelope, rec) + if err != nil { + return false, err + } + return true, nil +} + +func (ab *dsAddrBook) latestPeerRecordSeq(p peer.ID) uint64 { + pr, err := ab.loadRecord(p, true, false) + if err != nil || len(pr.Addrs) == 0 || pr.CertifiedRecord == nil || len(pr.CertifiedRecord.Raw) == 0 { + return 0 + } + return pr.CertifiedRecord.Seq +} + +func (ab *dsAddrBook) storeSignedPeerRecord(p peer.ID, envelope *record.Envelope, rec *peer.PeerRecord) error { + envelopeBytes, err := envelope.Marshal() + if err != nil { + return err + } + // reload record and add routing state + // this has to be done after we add the addresses, since if + // we try to flush a datastore record with no addresses, + // it will just get deleted + pr, err := ab.loadRecord(p, true, false) + if err != nil { + return err + } + pr.Lock() + defer pr.Unlock() + pr.CertifiedRecord = &pb.AddrBookRecord_CertifiedRecord{ + Seq: rec.Seq, + Raw: envelopeBytes, + } + pr.dirty = true + err = pr.flush(ab.ds) + return err +} + +// GetPeerRecord returns a record.Envelope containing a peer.PeerRecord for the +// given peer id, if one exists. +// Returns nil if no signed PeerRecord exists for the peer. +func (ab *dsAddrBook) GetPeerRecord(p peer.ID) *record.Envelope { + pr, err := ab.loadRecord(p, true, false) + if err != nil { + log.Errorf("unable to load record for peer %s: %v", p.Pretty(), err) + return nil + } + pr.RLock() + defer pr.RUnlock() + if pr.CertifiedRecord == nil || len(pr.CertifiedRecord.Raw) == 0 || len(pr.Addrs) == 0 { + return nil + } + state, _, err := record.ConsumeEnvelope(pr.CertifiedRecord.Raw, peer.PeerRecordEnvelopeDomain) + if err != nil { + log.Errorf("error unmarshaling stored signed peer record for peer %s: %v", p.Pretty(), err) + return nil + } + return state +} + +// SetAddr will add or update the TTL of an address in the AddrBook. +func (ab *dsAddrBook) SetAddr(p peer.ID, addr ma.Multiaddr, ttl time.Duration) { + ab.SetAddrs(p, []ma.Multiaddr{addr}, ttl) +} + +// SetAddrs will add or update the TTLs of addresses in the AddrBook. +func (ab *dsAddrBook) SetAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) { + addrs = cleanAddrs(addrs) + if ttl <= 0 { + ab.deleteAddrs(p, addrs) + return + } + ab.setAddrs(p, addrs, ttl, ttlOverride, false) +} + +// UpdateAddrs will update any addresses for a given peer and TTL combination to +// have a new TTL. +func (ab *dsAddrBook) UpdateAddrs(p peer.ID, oldTTL time.Duration, newTTL time.Duration) { + pr, err := ab.loadRecord(p, true, false) + if err != nil { + log.Errorf("failed to update ttls for peer %s: %s\n", p.Pretty(), err) + return + } + + pr.Lock() + defer pr.Unlock() + + newExp := time.Now().Add(newTTL).Unix() + for _, entry := range pr.Addrs { + if entry.Ttl != int64(oldTTL) { + continue + } + entry.Ttl, entry.Expiry = int64(newTTL), newExp + pr.dirty = true + } + + if pr.clean() { + pr.flush(ab.ds) + } +} + +// Addrs returns all of the non-expired addresses for a given peer. +func (ab *dsAddrBook) Addrs(p peer.ID) []ma.Multiaddr { + pr, err := ab.loadRecord(p, true, true) + if err != nil { + log.Warn("failed to load peerstore entry for peer %v while querying addrs, err: %v", p, err) + return nil + } + + pr.RLock() + defer pr.RUnlock() + + addrs := make([]ma.Multiaddr, len(pr.Addrs)) + for i, a := range pr.Addrs { + addrs[i] = a.Addr + } + return addrs +} + +// Peers returns all of the peer IDs for which the AddrBook has addresses. +func (ab *dsAddrBook) PeersWithAddrs() peer.IDSlice { + ids, err := uniquePeerIds(ab.ds, addrBookBase, func(result query.Result) string { + return ds.RawKey(result.Key).Name() + }) + if err != nil { + log.Errorf("error while retrieving peers with addresses: %v", err) + } + return ids +} + +// AddrStream returns a channel on which all new addresses discovered for a +// given peer ID will be published. +func (ab *dsAddrBook) AddrStream(ctx context.Context, p peer.ID) <-chan ma.Multiaddr { + initial := ab.Addrs(p) + return ab.subsManager.AddrStream(ctx, p, initial) +} + +// ClearAddrs will delete all known addresses for a peer ID. +func (ab *dsAddrBook) ClearAddrs(p peer.ID) { + if err := p.Validate(); err != nil { + // nothing to do + return + } + + ab.cache.Remove(p) + + key := addrBookBase.ChildString(b32.RawStdEncoding.EncodeToString([]byte(p))) + if err := ab.ds.Delete(key); err != nil { + log.Errorf("failed to clear addresses for peer %s: %v", p.Pretty(), err) + } +} + +func (ab *dsAddrBook) setAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration, mode ttlWriteMode, signed bool) (err error) { + pr, err := ab.loadRecord(p, true, false) + if err != nil { + return fmt.Errorf("failed to load peerstore entry for peer %v while setting addrs, err: %v", p, err) + } + + pr.Lock() + defer pr.Unlock() + + // // if we have a signed PeerRecord, ignore attempts to add unsigned addrs + // if !signed && pr.CertifiedRecord != nil { + // return nil + // } + + newExp := time.Now().Add(ttl).Unix() + // TODO this is very inefficient O(m*n); we could build a map to use as an + // index, and test against it. That would turn it into O(m+n). This code + // will be refactored entirely anyway, and it's not being used by users + // (that we know of); so OK to keep it for now. + updateExisting := func(entryList []*pb.AddrBookRecord_AddrEntry, incoming ma.Multiaddr) *pb.AddrBookRecord_AddrEntry { + for _, have := range entryList { + if incoming.Equal(have.Addr) { + switch mode { + case ttlOverride: + have.Ttl = int64(ttl) + have.Expiry = newExp + case ttlExtend: + if int64(ttl) > have.Ttl { + have.Ttl = int64(ttl) + } + if newExp > have.Expiry { + have.Expiry = newExp + } + default: + panic("BUG: unimplemented ttl mode") + } + return have + } + } + return nil + } + + var entries []*pb.AddrBookRecord_AddrEntry + for _, incoming := range addrs { + existingEntry := updateExisting(pr.Addrs, incoming) + + if existingEntry == nil { + // if signed { + // entries = append(entries, existingEntry) + // } + // } else { + // new addr, add & broadcast + entry := &pb.AddrBookRecord_AddrEntry{ + Addr: &pb.ProtoAddr{Multiaddr: incoming}, + Ttl: int64(ttl), + Expiry: newExp, + } + entries = append(entries, entry) + + // note: there's a minor chance that writing the record will fail, in which case we would've broadcast + // the addresses without persisting them. This is very unlikely and not much of an issue. + ab.subsManager.BroadcastAddr(p, incoming) + } + } + + // if signed { + // // when adding signed addrs, we want to keep _only_ the incoming addrs + // pr.Addrs = entries + // } else { + pr.Addrs = append(pr.Addrs, entries...) + // } + + pr.dirty = true + pr.clean() + return pr.flush(ab.ds) +} + +// deletes addresses in place, avoiding copies until we encounter the first deletion. +// does not preserve order, but entries are re-sorted before flushing to disk anyway. +func deleteInPlace(s []*pb.AddrBookRecord_AddrEntry, addrs []ma.Multiaddr) []*pb.AddrBookRecord_AddrEntry { + if s == nil || len(addrs) == 0 { + return s + } + survived := len(s) +Outer: + for i, addr := range s { + for _, del := range addrs { + if !addr.Addr.Equal(del) { + continue + } + survived-- + // if there are no survivors, bail out + if survived == 0 { + break Outer + } + s[i] = s[survived] + // we've already dealt with s[i], move to the next + continue Outer + } + } + return s[:survived] +} + +func (ab *dsAddrBook) deleteAddrs(p peer.ID, addrs []ma.Multiaddr) (err error) { + pr, err := ab.loadRecord(p, false, false) + if err != nil { + return fmt.Errorf("failed to load peerstore entry for peer %v while deleting addrs, err: %v", p, err) + } + + if pr.Addrs == nil { + return nil + } + + pr.Lock() + defer pr.Unlock() + + pr.Addrs = deleteInPlace(pr.Addrs, addrs) + + pr.dirty = true + pr.clean() + return pr.flush(ab.ds) +} + +func cleanAddrs(addrs []ma.Multiaddr) []ma.Multiaddr { + clean := make([]ma.Multiaddr, 0, len(addrs)) + for _, addr := range addrs { + if addr == nil { + continue + } + clean = append(clean, addr) + } + return clean +} diff --git a/vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/addr_book_gc.go b/vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/addr_book_gc.go new file mode 100644 index 000000000..808a5aa39 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/addr_book_gc.go @@ -0,0 +1,388 @@ +package pstoreds + +import ( + "context" + "fmt" + "strconv" + "time" + + ds "github.com/ipfs/go-datastore" + query "github.com/ipfs/go-datastore/query" + + peer "github.com/libp2p/go-libp2p-core/peer" + pb "github.com/libp2p/go-libp2p-peerstore/pb" + + b32 "github.com/multiformats/go-base32" +) + +var ( + // GC lookahead entries are stored in key pattern: + // /peers/gc/addrs// => nil + // in databases with lexicographical key order, this time-indexing allows us to visit + // only the timeslice we are interested in. + gcLookaheadBase = ds.NewKey("/peers/gc/addrs") + + // queries + purgeLookaheadQuery = query.Query{ + Prefix: gcLookaheadBase.String(), + Orders: []query.Order{query.OrderByKey{}}, + KeysOnly: true, + } + + purgeStoreQuery = query.Query{ + Prefix: addrBookBase.String(), + Orders: []query.Order{query.OrderByKey{}}, + KeysOnly: false, + } + + populateLookaheadQuery = query.Query{ + Prefix: addrBookBase.String(), + Orders: []query.Order{query.OrderByKey{}}, + KeysOnly: true, + } +) + +// dsAddrBookGc is responsible for garbage collection in a datastore-backed address book. +type dsAddrBookGc struct { + ctx context.Context + ab *dsAddrBook + running chan struct{} + lookaheadEnabled bool + purgeFunc func() + currWindowEnd int64 +} + +func newAddressBookGc(ctx context.Context, ab *dsAddrBook) (*dsAddrBookGc, error) { + if ab.opts.GCPurgeInterval < 0 { + return nil, fmt.Errorf("negative GC purge interval provided: %s", ab.opts.GCPurgeInterval) + } + if ab.opts.GCLookaheadInterval < 0 { + return nil, fmt.Errorf("negative GC lookahead interval provided: %s", ab.opts.GCLookaheadInterval) + } + if ab.opts.GCInitialDelay < 0 { + return nil, fmt.Errorf("negative GC initial delay provided: %s", ab.opts.GCInitialDelay) + } + if ab.opts.GCLookaheadInterval > 0 && ab.opts.GCLookaheadInterval < ab.opts.GCPurgeInterval { + return nil, fmt.Errorf("lookahead interval must be larger than purge interval, respectively: %s, %s", + ab.opts.GCLookaheadInterval, ab.opts.GCPurgeInterval) + } + + lookaheadEnabled := ab.opts.GCLookaheadInterval > 0 + gc := &dsAddrBookGc{ + ctx: ctx, + ab: ab, + running: make(chan struct{}, 1), + lookaheadEnabled: lookaheadEnabled, + } + + if lookaheadEnabled { + gc.purgeFunc = gc.purgeLookahead + } else { + gc.purgeFunc = gc.purgeStore + } + + // do not start GC timers if purge is disabled; this GC can only be triggered manually. + if ab.opts.GCPurgeInterval > 0 { + gc.ab.childrenDone.Add(1) + go gc.background() + } + + return gc, nil +} + +// gc prunes expired addresses from the datastore at regular intervals. It should be spawned as a goroutine. +func (gc *dsAddrBookGc) background() { + defer gc.ab.childrenDone.Done() + + select { + case <-time.After(gc.ab.opts.GCInitialDelay): + case <-gc.ab.ctx.Done(): + // yield if we have been cancelled/closed before the delay elapses. + return + } + + purgeTimer := time.NewTicker(gc.ab.opts.GCPurgeInterval) + defer purgeTimer.Stop() + + var lookaheadCh <-chan time.Time + if gc.lookaheadEnabled { + lookaheadTimer := time.NewTicker(gc.ab.opts.GCLookaheadInterval) + lookaheadCh = lookaheadTimer.C + gc.populateLookahead() // do a lookahead now + defer lookaheadTimer.Stop() + } + + for { + select { + case <-purgeTimer.C: + gc.purgeFunc() + + case <-lookaheadCh: + // will never trigger if lookahead is disabled (nil Duration). + gc.populateLookahead() + + case <-gc.ctx.Done(): + return + } + } +} + +// purgeCycle runs a single GC purge cycle. It operates within the lookahead window if lookahead is enabled; else it +// visits all entries in the datastore, deleting the addresses that have expired. +func (gc *dsAddrBookGc) purgeLookahead() { + select { + case gc.running <- struct{}{}: + defer func() { <-gc.running }() + default: + // yield if lookahead is running. + return + } + + var id peer.ID + record := &addrsRecord{AddrBookRecord: &pb.AddrBookRecord{}} // empty record to reuse and avoid allocs. + batch, err := newCyclicBatch(gc.ab.ds, defaultOpsPerCyclicBatch) + if err != nil { + log.Warnf("failed while creating batch to purge GC entries: %v", err) + } + + // This function drops an unparseable GC entry; this is for safety. It is an escape hatch in case + // we modify the format of keys going forward. If a user runs a new version against an old DB, + // if we don't clean up unparseable entries we'll end up accumulating garbage. + dropInError := func(key ds.Key, err error, msg string) { + if err != nil { + log.Warnf("failed while %s record with GC key: %v, err: %v; deleting", msg, key, err) + } + if err = batch.Delete(key); err != nil { + log.Warnf("failed to delete corrupt GC lookahead entry: %v, err: %v", key, err) + } + } + + // This function drops a GC key if the entry is cleaned correctly. It may reschedule another visit + // if the next earliest expiry falls within the current window again. + dropOrReschedule := func(key ds.Key, ar *addrsRecord) { + if err := batch.Delete(key); err != nil { + log.Warnf("failed to delete lookahead entry: %v, err: %v", key, err) + } + + // re-add the record if it needs to be visited again in this window. + if len(ar.Addrs) != 0 && ar.Addrs[0].Expiry <= gc.currWindowEnd { + gcKey := gcLookaheadBase.ChildString(fmt.Sprintf("%d/%s", ar.Addrs[0].Expiry, key.Name())) + if err := batch.Put(gcKey, []byte{}); err != nil { + log.Warnf("failed to add new GC key: %v, err: %v", gcKey, err) + } + } + } + + results, err := gc.ab.ds.Query(purgeLookaheadQuery) + if err != nil { + log.Warnf("failed while fetching entries to purge: %v", err) + return + } + defer results.Close() + + now := time.Now().Unix() + + // keys: /peers/gc/addrs// + // values: nil + for result := range results.Next() { + gcKey := ds.RawKey(result.Key) + ts, err := strconv.ParseInt(gcKey.Parent().Name(), 10, 64) + if err != nil { + dropInError(gcKey, err, "parsing timestamp") + log.Warnf("failed while parsing timestamp from key: %v, err: %v", result.Key, err) + continue + } else if ts > now { + // this is an ordered cursor; when we hit an entry with a timestamp beyond now, we can break. + break + } + + idb32, err := b32.RawStdEncoding.DecodeString(gcKey.Name()) + if err != nil { + dropInError(gcKey, err, "parsing peer ID") + log.Warnf("failed while parsing b32 peer ID from key: %v, err: %v", result.Key, err) + continue + } + + id, err = peer.IDFromBytes(idb32) + if err != nil { + dropInError(gcKey, err, "decoding peer ID") + log.Warnf("failed while decoding peer ID from key: %v, err: %v", result.Key, err) + continue + } + + // if the record is in cache, we clean it and flush it if necessary. + if e, ok := gc.ab.cache.Peek(id); ok { + cached := e.(*addrsRecord) + cached.Lock() + if cached.clean() { + if err = cached.flush(batch); err != nil { + log.Warnf("failed to flush entry modified by GC for peer: &v, err: %v", id.Pretty(), err) + } + } + dropOrReschedule(gcKey, cached) + cached.Unlock() + continue + } + + record.Reset() + + // otherwise, fetch it from the store, clean it and flush it. + entryKey := addrBookBase.ChildString(gcKey.Name()) + val, err := gc.ab.ds.Get(entryKey) + if err != nil { + // captures all errors, including ErrNotFound. + dropInError(gcKey, err, "fetching entry") + continue + } + err = record.Unmarshal(val) + if err != nil { + dropInError(gcKey, err, "unmarshalling entry") + continue + } + if record.clean() { + err = record.flush(batch) + if err != nil { + log.Warnf("failed to flush entry modified by GC for peer: &v, err: %v", id.Pretty(), err) + } + } + dropOrReschedule(gcKey, record) + } + + if err = batch.Commit(); err != nil { + log.Warnf("failed to commit GC purge batch: %v", err) + } +} + +func (gc *dsAddrBookGc) purgeStore() { + select { + case gc.running <- struct{}{}: + defer func() { <-gc.running }() + default: + // yield if lookahead is running. + return + } + + record := &addrsRecord{AddrBookRecord: &pb.AddrBookRecord{}} // empty record to reuse and avoid allocs. + batch, err := newCyclicBatch(gc.ab.ds, defaultOpsPerCyclicBatch) + if err != nil { + log.Warnf("failed while creating batch to purge GC entries: %v", err) + } + + results, err := gc.ab.ds.Query(purgeStoreQuery) + if err != nil { + log.Warnf("failed while opening iterator: %v", err) + return + } + defer results.Close() + + // keys: /peers/addrs/ + for result := range results.Next() { + record.Reset() + if err = record.Unmarshal(result.Value); err != nil { + // TODO log + continue + } + + id := record.Id.ID + if !record.clean() { + continue + } + + if err := record.flush(batch); err != nil { + log.Warnf("failed to flush entry modified by GC for peer: &v, err: %v", id, err) + } + gc.ab.cache.Remove(id) + } + + if err = batch.Commit(); err != nil { + log.Warnf("failed to commit GC purge batch: %v", err) + } +} + +// populateLookahead populates the lookahead window by scanning the entire store and picking entries whose earliest +// expiration falls within the window period. +// +// Those entries are stored in the lookahead region in the store, indexed by the timestamp when they need to be +// visited, to facilitate temporal range scans. +func (gc *dsAddrBookGc) populateLookahead() { + if gc.ab.opts.GCLookaheadInterval == 0 { + return + } + + select { + case gc.running <- struct{}{}: + defer func() { <-gc.running }() + default: + // yield if something's running. + return + } + + until := time.Now().Add(gc.ab.opts.GCLookaheadInterval).Unix() + + var id peer.ID + record := &addrsRecord{AddrBookRecord: &pb.AddrBookRecord{}} + results, err := gc.ab.ds.Query(populateLookaheadQuery) + if err != nil { + log.Warnf("failed while querying to populate lookahead GC window: %v", err) + return + } + defer results.Close() + + batch, err := newCyclicBatch(gc.ab.ds, defaultOpsPerCyclicBatch) + if err != nil { + log.Warnf("failed while creating batch to populate lookahead GC window: %v", err) + return + } + + for result := range results.Next() { + idb32 := ds.RawKey(result.Key).Name() + k, err := b32.RawStdEncoding.DecodeString(idb32) + if err != nil { + log.Warnf("failed while decoding peer ID from key: %v, err: %v", result.Key, err) + continue + } + if id, err = peer.IDFromBytes(k); err != nil { + log.Warnf("failed while decoding peer ID from key: %v, err: %v", result.Key, err) + } + + // if the record is in cache, use the cached version. + if e, ok := gc.ab.cache.Peek(id); ok { + cached := e.(*addrsRecord) + cached.RLock() + if len(cached.Addrs) == 0 || cached.Addrs[0].Expiry > until { + cached.RUnlock() + continue + } + gcKey := gcLookaheadBase.ChildString(fmt.Sprintf("%d/%s", cached.Addrs[0].Expiry, idb32)) + if err = batch.Put(gcKey, []byte{}); err != nil { + log.Warnf("failed while inserting GC entry for peer: %v, err: %v", id.Pretty(), err) + } + cached.RUnlock() + continue + } + + record.Reset() + + val, err := gc.ab.ds.Get(ds.RawKey(result.Key)) + if err != nil { + log.Warnf("failed which getting record from store for peer: %v, err: %v", id.Pretty(), err) + continue + } + if err := record.Unmarshal(val); err != nil { + log.Warnf("failed while unmarshalling record from store for peer: %v, err: %v", id.Pretty(), err) + continue + } + if len(record.Addrs) > 0 && record.Addrs[0].Expiry <= until { + gcKey := gcLookaheadBase.ChildString(fmt.Sprintf("%d/%s", record.Addrs[0].Expiry, idb32)) + if err = batch.Put(gcKey, []byte{}); err != nil { + log.Warnf("failed while inserting GC entry for peer: %v, err: %v", id.Pretty(), err) + } + } + } + + if err = batch.Commit(); err != nil { + log.Warnf("failed to commit GC lookahead batch: %v", err) + } + + gc.currWindowEnd = until +} diff --git a/vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/cache.go b/vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/cache.go new file mode 100644 index 000000000..d3d43aae1 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/cache.go @@ -0,0 +1,40 @@ +package pstoreds + +// cache abstracts all methods we access from ARCCache, to enable alternate +// implementations such as a no-op one. +type cache interface { + Get(key interface{}) (value interface{}, ok bool) + Add(key, value interface{}) + Remove(key interface{}) + Contains(key interface{}) bool + Peek(key interface{}) (value interface{}, ok bool) + Keys() []interface{} +} + +// noopCache is a dummy implementation that's used when the cache is disabled. +type noopCache struct { +} + +var _ cache = (*noopCache)(nil) + +func (*noopCache) Get(key interface{}) (value interface{}, ok bool) { + return nil, false +} + +func (*noopCache) Add(key, value interface{}) { +} + +func (*noopCache) Remove(key interface{}) { +} + +func (*noopCache) Contains(key interface{}) bool { + return false +} + +func (*noopCache) Peek(key interface{}) (value interface{}, ok bool) { + return nil, false +} + +func (*noopCache) Keys() (keys []interface{}) { + return keys +} diff --git a/vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/cyclic_batch.go b/vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/cyclic_batch.go new file mode 100644 index 000000000..1cf8579ca --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/cyclic_batch.go @@ -0,0 +1,76 @@ +package pstoreds + +import ( + "github.com/pkg/errors" + + ds "github.com/ipfs/go-datastore" +) + +// how many operations are queued in a cyclic batch before we flush it. +var defaultOpsPerCyclicBatch = 20 + +// cyclicBatch buffers ds write operations and automatically flushes them after defaultOpsPerCyclicBatch (20) have been +// queued. An explicit `Commit()` closes this cyclic batch, erroring all further operations. +// +// It is similar to go-ds autobatch, but it's driven by an actual Batch facility offered by the +// ds. +type cyclicBatch struct { + threshold int + ds.Batch + ds ds.Batching + pending int +} + +func newCyclicBatch(ds ds.Batching, threshold int) (ds.Batch, error) { + batch, err := ds.Batch() + if err != nil { + return nil, err + } + return &cyclicBatch{Batch: batch, ds: ds}, nil +} + +func (cb *cyclicBatch) cycle() (err error) { + if cb.Batch == nil { + return errors.New("cyclic batch is closed") + } + if cb.pending < cb.threshold { + // we haven't reached the threshold yet. + return nil + } + // commit and renew the batch. + if err = cb.Batch.Commit(); err != nil { + return errors.Wrap(err, "failed while committing cyclic batch") + } + if cb.Batch, err = cb.ds.Batch(); err != nil { + return errors.Wrap(err, "failed while renewing cyclic batch") + } + return nil +} + +func (cb *cyclicBatch) Put(key ds.Key, val []byte) error { + if err := cb.cycle(); err != nil { + return err + } + cb.pending++ + return cb.Batch.Put(key, val) +} + +func (cb *cyclicBatch) Delete(key ds.Key) error { + if err := cb.cycle(); err != nil { + return err + } + cb.pending++ + return cb.Batch.Delete(key) +} + +func (cb *cyclicBatch) Commit() error { + if cb.Batch == nil { + return errors.New("cyclic batch is closed") + } + if err := cb.Batch.Commit(); err != nil { + return err + } + cb.pending = 0 + cb.Batch = nil + return nil +} diff --git a/vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/keybook.go b/vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/keybook.go new file mode 100644 index 000000000..35021b2d0 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/keybook.go @@ -0,0 +1,134 @@ +package pstoreds + +import ( + "context" + "errors" + + base32 "github.com/multiformats/go-base32" + + ds "github.com/ipfs/go-datastore" + query "github.com/ipfs/go-datastore/query" + + ic "github.com/libp2p/go-libp2p-core/crypto" + peer "github.com/libp2p/go-libp2p-core/peer" + pstore "github.com/libp2p/go-libp2p-core/peerstore" +) + +// Public and private keys are stored under the following db key pattern: +// /peers/keys//{pub, priv} +var ( + kbBase = ds.NewKey("/peers/keys") + pubSuffix = ds.NewKey("/pub") + privSuffix = ds.NewKey("/priv") +) + +type dsKeyBook struct { + ds ds.Datastore +} + +var _ pstore.KeyBook = (*dsKeyBook)(nil) + +func NewKeyBook(_ context.Context, store ds.Datastore, _ Options) (*dsKeyBook, error) { + return &dsKeyBook{store}, nil +} + +func (kb *dsKeyBook) PubKey(p peer.ID) ic.PubKey { + key := kbBase.ChildString(base32.RawStdEncoding.EncodeToString([]byte(p))).Child(pubSuffix) + + var pk ic.PubKey + if value, err := kb.ds.Get(key); err == nil { + pk, err = ic.UnmarshalPublicKey(value) + if err != nil { + log.Errorf("error when unmarshalling pubkey from datastore for peer %s: %s\n", p.Pretty(), err) + } + } else if err == ds.ErrNotFound { + pk, err = p.ExtractPublicKey() + switch err { + case nil: + case peer.ErrNoPublicKey: + return nil + default: + log.Errorf("error when extracting pubkey from peer ID for peer %s: %s\n", p.Pretty(), err) + return nil + } + pkb, err := pk.Bytes() + if err != nil { + log.Errorf("error when turning extracted pubkey into bytes for peer %s: %s\n", p.Pretty(), err) + return nil + } + err = kb.ds.Put(key, pkb) + if err != nil { + log.Errorf("error when adding extracted pubkey to peerstore for peer %s: %s\n", p.Pretty(), err) + return nil + } + } else { + log.Errorf("error when fetching pubkey from datastore for peer %s: %s\n", p.Pretty(), err) + } + + return pk +} + +func (kb *dsKeyBook) AddPubKey(p peer.ID, pk ic.PubKey) error { + // check it's correct. + if !p.MatchesPublicKey(pk) { + return errors.New("peer ID does not match public key") + } + + key := kbBase.ChildString(base32.RawStdEncoding.EncodeToString([]byte(p))).Child(pubSuffix) + val, err := pk.Bytes() + if err != nil { + log.Errorf("error while converting pubkey byte string for peer %s: %s\n", p.Pretty(), err) + return err + } + err = kb.ds.Put(key, val) + if err != nil { + log.Errorf("error while updating pubkey in datastore for peer %s: %s\n", p.Pretty(), err) + } + return err +} + +func (kb *dsKeyBook) PrivKey(p peer.ID) ic.PrivKey { + key := kbBase.ChildString(base32.RawStdEncoding.EncodeToString([]byte(p))).Child(privSuffix) + value, err := kb.ds.Get(key) + if err != nil { + log.Errorf("error while fetching privkey from datastore for peer %s: %s\n", p.Pretty(), err) + return nil + } + sk, err := ic.UnmarshalPrivateKey(value) + if err != nil { + return nil + } + return sk +} + +func (kb *dsKeyBook) AddPrivKey(p peer.ID, sk ic.PrivKey) error { + if sk == nil { + return errors.New("private key is nil") + } + // check it's correct. + if !p.MatchesPrivateKey(sk) { + return errors.New("peer ID does not match private key") + } + + key := kbBase.ChildString(base32.RawStdEncoding.EncodeToString([]byte(p))).Child(privSuffix) + val, err := sk.Bytes() + if err != nil { + log.Errorf("error while converting privkey byte string for peer %s: %s\n", p.Pretty(), err) + return err + } + err = kb.ds.Put(key, val) + if err != nil { + log.Errorf("error while updating privkey in datastore for peer %s: %s\n", p.Pretty(), err) + } + return err +} + +func (kb *dsKeyBook) PeersWithKeys() peer.IDSlice { + ids, err := uniquePeerIds(kb.ds, kbBase, func(result query.Result) string { + return ds.RawKey(result.Key).Parent().Name() + }) + if err != nil { + log.Errorf("error while retrieving peers with keys: %v", err) + } + return ids +} diff --git a/vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/metadata.go b/vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/metadata.go new file mode 100644 index 000000000..bf7655231 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/metadata.go @@ -0,0 +1,73 @@ +package pstoreds + +import ( + "bytes" + "context" + "encoding/gob" + + base32 "github.com/multiformats/go-base32" + + ds "github.com/ipfs/go-datastore" + + pool "github.com/libp2p/go-buffer-pool" + peer "github.com/libp2p/go-libp2p-core/peer" + pstore "github.com/libp2p/go-libp2p-core/peerstore" +) + +// Metadata is stored under the following db key pattern: +// /peers/metadata// +var pmBase = ds.NewKey("/peers/metadata") + +type dsPeerMetadata struct { + ds ds.Datastore +} + +var _ pstore.PeerMetadata = (*dsPeerMetadata)(nil) + +func init() { + // Gob registers basic types by default. + // + // Register complex types used by the peerstore itself. + gob.Register(make(map[string]struct{})) +} + +// NewPeerMetadata creates a metadata store backed by a persistent db. It uses gob for serialisation. +// +// See `init()` to learn which types are registered by default. Modules wishing to store +// values of other types will need to `gob.Register()` them explicitly, or else callers +// will receive runtime errors. +func NewPeerMetadata(_ context.Context, store ds.Datastore, _ Options) (*dsPeerMetadata, error) { + return &dsPeerMetadata{store}, nil +} + +func (pm *dsPeerMetadata) Get(p peer.ID, key string) (interface{}, error) { + if err := p.Validate(); err != nil { + return nil, err + } + k := pmBase.ChildString(base32.RawStdEncoding.EncodeToString([]byte(p))).ChildString(key) + value, err := pm.ds.Get(k) + if err != nil { + if err == ds.ErrNotFound { + err = pstore.ErrNotFound + } + return nil, err + } + + var res interface{} + if err := gob.NewDecoder(bytes.NewReader(value)).Decode(&res); err != nil { + return nil, err + } + return res, nil +} + +func (pm *dsPeerMetadata) Put(p peer.ID, key string, val interface{}) error { + if err := p.Validate(); err != nil { + return err + } + k := pmBase.ChildString(base32.RawStdEncoding.EncodeToString([]byte(p))).ChildString(key) + var buf pool.Buffer + if err := gob.NewEncoder(&buf).Encode(&val); err != nil { + return err + } + return pm.ds.Put(k, buf.Bytes()) +} diff --git a/vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/peerstore.go b/vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/peerstore.go new file mode 100644 index 000000000..3c8aa0f84 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/peerstore.go @@ -0,0 +1,166 @@ +package pstoreds + +import ( + "context" + "fmt" + "io" + "time" + + base32 "github.com/multiformats/go-base32" + + ds "github.com/ipfs/go-datastore" + query "github.com/ipfs/go-datastore/query" + + peer "github.com/libp2p/go-libp2p-core/peer" + peerstore "github.com/libp2p/go-libp2p-core/peerstore" + pstore "github.com/libp2p/go-libp2p-peerstore" +) + +// Configuration object for the peerstore. +type Options struct { + // The size of the in-memory cache. A value of 0 or lower disables the cache. + CacheSize uint + + // Sweep interval to purge expired addresses from the datastore. If this is a zero value, GC will not run + // automatically, but it'll be available on demand via explicit calls. + GCPurgeInterval time.Duration + + // Interval to renew the GC lookahead window. If this is a zero value, lookahead will be disabled and we'll + // traverse the entire datastore for every purge cycle. + GCLookaheadInterval time.Duration + + // Initial delay before GC processes start. Intended to give the system breathing room to fully boot + // before starting GC. + GCInitialDelay time.Duration +} + +// DefaultOpts returns the default options for a persistent peerstore, with the full-purge GC algorithm: +// +// * Cache size: 1024. +// * GC purge interval: 2 hours. +// * GC lookahead interval: disabled. +// * GC initial delay: 60 seconds. +func DefaultOpts() Options { + return Options{ + CacheSize: 1024, + GCPurgeInterval: 2 * time.Hour, + GCLookaheadInterval: 0, + GCInitialDelay: 60 * time.Second, + } +} + +type pstoreds struct { + peerstore.Metrics + + dsKeyBook + dsAddrBook + dsProtoBook + dsPeerMetadata +} + +// NewPeerstore creates a peerstore backed by the provided persistent datastore. +func NewPeerstore(ctx context.Context, store ds.Batching, opts Options) (*pstoreds, error) { + addrBook, err := NewAddrBook(ctx, store, opts) + if err != nil { + return nil, err + } + + keyBook, err := NewKeyBook(ctx, store, opts) + if err != nil { + return nil, err + } + + peerMetadata, err := NewPeerMetadata(ctx, store, opts) + if err != nil { + return nil, err + } + + protoBook := NewProtoBook(peerMetadata) + + ps := &pstoreds{ + Metrics: pstore.NewMetrics(), + dsKeyBook: *keyBook, + dsAddrBook: *addrBook, + dsPeerMetadata: *peerMetadata, + dsProtoBook: *protoBook, + } + return ps, nil +} + +// uniquePeerIds extracts and returns unique peer IDs from database keys. +func uniquePeerIds(ds ds.Datastore, prefix ds.Key, extractor func(result query.Result) string) (peer.IDSlice, error) { + var ( + q = query.Query{Prefix: prefix.String(), KeysOnly: true} + results query.Results + err error + ) + + if results, err = ds.Query(q); err != nil { + log.Error(err) + return nil, err + } + + defer results.Close() + + idset := make(map[string]struct{}) + for result := range results.Next() { + k := extractor(result) + idset[k] = struct{}{} + } + + if len(idset) == 0 { + return peer.IDSlice{}, nil + } + + ids := make(peer.IDSlice, 0, len(idset)) + for id := range idset { + pid, _ := base32.RawStdEncoding.DecodeString(id) + id, _ := peer.IDFromBytes(pid) + ids = append(ids, id) + } + return ids, nil +} + +func (ps *pstoreds) Close() (err error) { + var errs []error + weakClose := func(name string, c interface{}) { + if cl, ok := c.(io.Closer); ok { + if err = cl.Close(); err != nil { + errs = append(errs, fmt.Errorf("%s error: %s", name, err)) + } + } + } + + weakClose("keybook", ps.dsKeyBook) + weakClose("addressbook", ps.dsAddrBook) + weakClose("protobook", ps.dsProtoBook) + weakClose("peermetadata", ps.dsPeerMetadata) + + if len(errs) > 0 { + return fmt.Errorf("failed while closing peerstore; err(s): %q", errs) + } + return nil +} + +func (ps *pstoreds) Peers() peer.IDSlice { + set := map[peer.ID]struct{}{} + for _, p := range ps.PeersWithKeys() { + set[p] = struct{}{} + } + for _, p := range ps.PeersWithAddrs() { + set[p] = struct{}{} + } + + pps := make(peer.IDSlice, 0, len(set)) + for p := range set { + pps = append(pps, p) + } + return pps +} + +func (ps *pstoreds) PeerInfo(p peer.ID) peer.AddrInfo { + return peer.AddrInfo{ + ID: p, + Addrs: ps.dsAddrBook.Addrs(p), + } +} diff --git a/vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/protobook.go b/vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/protobook.go new file mode 100644 index 000000000..6518c4e75 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/protobook.go @@ -0,0 +1,188 @@ +package pstoreds + +import ( + "fmt" + "sync" + + peer "github.com/libp2p/go-libp2p-core/peer" + + pstore "github.com/libp2p/go-libp2p-core/peerstore" +) + +type protoSegment struct { + sync.RWMutex +} + +type protoSegments [256]*protoSegment + +func (s *protoSegments) get(p peer.ID) *protoSegment { + return s[byte(p[len(p)-1])] +} + +type dsProtoBook struct { + segments protoSegments + meta pstore.PeerMetadata +} + +var _ pstore.ProtoBook = (*dsProtoBook)(nil) + +func NewProtoBook(meta pstore.PeerMetadata) *dsProtoBook { + return &dsProtoBook{ + meta: meta, + segments: func() (ret protoSegments) { + for i := range ret { + ret[i] = &protoSegment{} + } + return ret + }(), + } +} + +func (pb *dsProtoBook) SetProtocols(p peer.ID, protos ...string) error { + if err := p.Validate(); err != nil { + return err + } + + s := pb.segments.get(p) + s.Lock() + defer s.Unlock() + + protomap := make(map[string]struct{}, len(protos)) + for _, proto := range protos { + protomap[proto] = struct{}{} + } + + return pb.meta.Put(p, "protocols", protomap) +} + +func (pb *dsProtoBook) AddProtocols(p peer.ID, protos ...string) error { + if err := p.Validate(); err != nil { + return err + } + + s := pb.segments.get(p) + s.Lock() + defer s.Unlock() + + pmap, err := pb.getProtocolMap(p) + if err != nil { + return err + } + + for _, proto := range protos { + pmap[proto] = struct{}{} + } + + return pb.meta.Put(p, "protocols", pmap) +} + +func (pb *dsProtoBook) GetProtocols(p peer.ID) ([]string, error) { + if err := p.Validate(); err != nil { + return nil, err + } + + s := pb.segments.get(p) + s.RLock() + defer s.RUnlock() + + pmap, err := pb.getProtocolMap(p) + if err != nil { + return nil, err + } + + res := make([]string, 0, len(pmap)) + for proto := range pmap { + res = append(res, proto) + } + + return res, nil +} + +func (pb *dsProtoBook) SupportsProtocols(p peer.ID, protos ...string) ([]string, error) { + if err := p.Validate(); err != nil { + return nil, err + } + + s := pb.segments.get(p) + s.RLock() + defer s.RUnlock() + + pmap, err := pb.getProtocolMap(p) + if err != nil { + return nil, err + } + + res := make([]string, 0, len(protos)) + for _, proto := range protos { + if _, ok := pmap[proto]; ok { + res = append(res, proto) + } + } + + return res, nil +} + +func (pb *dsProtoBook) FirstSupportedProtocol(p peer.ID, protos ...string) (string, error) { + if err := p.Validate(); err != nil { + return "", err + } + + s := pb.segments.get(p) + s.RLock() + defer s.RUnlock() + + pmap, err := pb.getProtocolMap(p) + if err != nil { + return "", err + } + for _, proto := range protos { + if _, ok := pmap[proto]; ok { + return proto, nil + } + } + + return "", nil +} + +func (pb *dsProtoBook) RemoveProtocols(p peer.ID, protos ...string) error { + if err := p.Validate(); err != nil { + return err + } + + s := pb.segments.get(p) + s.Lock() + defer s.Unlock() + + pmap, err := pb.getProtocolMap(p) + if err != nil { + return err + } + + if len(pmap) == 0 { + // nothing to do. + return nil + } + + for _, proto := range protos { + delete(pmap, proto) + } + + return pb.meta.Put(p, "protocols", pmap) +} + +func (pb *dsProtoBook) getProtocolMap(p peer.ID) (map[string]struct{}, error) { + iprotomap, err := pb.meta.Get(p, "protocols") + switch err { + default: + return nil, err + case pstore.ErrNotFound: + return make(map[string]struct{}), nil + case nil: + cast, ok := iprotomap.(map[string]struct{}) + if !ok { + return nil, fmt.Errorf("stored protocol set was not a map") + } + + return cast, nil + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 4fa6559dd..f3c6c075c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -132,8 +132,10 @@ github.com/go-playground/universal-translator # github.com/go-stack/stack v1.8.0 github.com/go-stack/stack # github.com/gogo/protobuf v1.3.2 +github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/io github.com/gogo/protobuf/proto +github.com/gogo/protobuf/protoc-gen-gogo/descriptor # github.com/golang-migrate/migrate/v4 v4.8.0 github.com/golang-migrate/migrate/v4 github.com/golang-migrate/migrate/v4/database @@ -183,6 +185,11 @@ github.com/huin/goupnp/soap github.com/huin/goupnp/ssdp # github.com/ipfs/go-cid v0.0.7 github.com/ipfs/go-cid +# github.com/ipfs/go-datastore v0.4.4 +github.com/ipfs/go-datastore +github.com/ipfs/go-datastore/query +# github.com/ipfs/go-ds-sql v0.2.0 +github.com/ipfs/go-ds-sql # github.com/ipfs/go-ipfs-util v0.0.2 github.com/ipfs/go-ipfs-util # github.com/ipfs/go-log v1.0.4 @@ -271,6 +278,7 @@ github.com/libp2p/go-libp2p-core/routing github.com/libp2p/go-libp2p-core/sec github.com/libp2p/go-libp2p-core/sec/insecure github.com/libp2p/go-libp2p-core/sec/insecure/pb +github.com/libp2p/go-libp2p-core/test github.com/libp2p/go-libp2p-core/transport # github.com/libp2p/go-libp2p-discovery v0.5.0 github.com/libp2p/go-libp2p-discovery @@ -286,6 +294,8 @@ github.com/libp2p/go-libp2p-noise/pb # github.com/libp2p/go-libp2p-peerstore v0.2.6 github.com/libp2p/go-libp2p-peerstore github.com/libp2p/go-libp2p-peerstore/addr +github.com/libp2p/go-libp2p-peerstore/pb +github.com/libp2p/go-libp2p-peerstore/pstoreds github.com/libp2p/go-libp2p-peerstore/pstoremem # github.com/libp2p/go-libp2p-pnet v0.2.0 github.com/libp2p/go-libp2p-pnet diff --git a/wakuv2/api_test.go b/wakuv2/api_test.go index 4f8a5f4f7..6a5e7ec92 100644 --- a/wakuv2/api_test.go +++ b/wakuv2/api_test.go @@ -27,7 +27,7 @@ import ( ) func TestMultipleTopicCopyInNewMessageFilter(t *testing.T) { - w, err := New("", nil, nil) + w, err := New("", nil, nil, nil) if err != nil { t.Fatalf("Error creating WakuV2 client: %v", err) } diff --git a/wakuv2/config.go b/wakuv2/config.go index 48e61921e..2376240ae 100644 --- a/wakuv2/config.go +++ b/wakuv2/config.go @@ -28,6 +28,7 @@ type Config struct { SoftBlacklistedPeerIDs []string `toml:",omitempty"` Host string `toml:",omitempty"` Port int `toml:",omitempty"` + PersistPeers bool `toml:",omitempty"` PeerExchange bool `toml:",omitempty"` KeepAliveInterval int `toml:",omitempty"` LightClient bool `toml:",omitempty"` diff --git a/wakuv2/persistence/queries.go b/wakuv2/persistence/queries.go new file mode 100644 index 000000000..e5eeb13a0 --- /dev/null +++ b/wakuv2/persistence/queries.go @@ -0,0 +1,93 @@ +package persistence + +import ( + "database/sql" + "fmt" +) + +// Queries are the sqlite queries for a given table. +type Queries struct { + deleteQuery string + existsQuery string + getQuery string + putQuery string + queryQuery string + prefixQuery string + limitQuery string + offsetQuery string + getSizeQuery string +} + +// NewQueries creates a new set of queries for the passed table +func NewQueries(tbl string, db *sql.DB) (*Queries, error) { + err := CreateTable(db, tbl) + if err != nil { + return nil, err + } + return &Queries{ + deleteQuery: fmt.Sprintf("DELETE FROM %s WHERE key = $1", tbl), + existsQuery: fmt.Sprintf("SELECT exists(SELECT 1 FROM %s WHERE key=$1)", tbl), + getQuery: fmt.Sprintf("SELECT data FROM %s WHERE key = $1", tbl), + putQuery: fmt.Sprintf("INSERT INTO %s (key, data) VALUES ($1, $2) ON CONFLICT (key) DO UPDATE SET data = $2", tbl), + queryQuery: fmt.Sprintf("SELECT key, data FROM %s", tbl), + prefixQuery: ` WHERE key LIKE '%s%%' ORDER BY key`, + limitQuery: ` LIMIT %d`, + offsetQuery: ` OFFSET %d`, + getSizeQuery: fmt.Sprintf("SELECT length(data) FROM %s WHERE key = $1", tbl), + }, nil +} + +// Delete returns the query for deleting a row. +func (q Queries) Delete() string { + return q.deleteQuery +} + +// Exists returns the query for determining if a row exists. +func (q Queries) Exists() string { + return q.existsQuery +} + +// Get returns the query for getting a row. +func (q Queries) Get() string { + return q.getQuery +} + +// Put returns the query for putting a row. +func (q Queries) Put() string { + return q.putQuery +} + +// Query returns the query for getting multiple rows. +func (q Queries) Query() string { + return q.queryQuery +} + +// Prefix returns the query fragment for getting a rows with a key prefix. +func (q Queries) Prefix() string { + return q.prefixQuery +} + +// Limit returns the query fragment for limiting results. +func (q Queries) Limit() string { + return q.limitQuery +} + +// Offset returns the query fragment for returning rows from a given offset. +func (q Queries) Offset() string { + return q.offsetQuery +} + +// GetSize returns the query for determining the size of a value. +func (q Queries) GetSize() string { + return q.getSizeQuery +} + +// CreateTable creates the table that will persist the peers +func CreateTable(db *sql.DB, tableName string) error { + sqlStmt := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (key TEXT NOT NULL UNIQUE, data BYTEA);", tableName) + _, err := db.Exec(sqlStmt) + if err != nil { + return err + } + return nil +} diff --git a/wakuv2/waku.go b/wakuv2/waku.go index 5c14b2fe8..2a333ac98 100644 --- a/wakuv2/waku.go +++ b/wakuv2/waku.go @@ -22,6 +22,7 @@ import ( "context" "crypto/ecdsa" "crypto/sha256" + "database/sql" "errors" "fmt" "net" @@ -30,6 +31,7 @@ import ( "time" "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-peerstore/pstoreds" "github.com/multiformats/go-multiaddr" "go.uber.org/zap" @@ -37,6 +39,8 @@ import ( mapset "github.com/deckarep/golang-set" "golang.org/x/crypto/pbkdf2" + dssql "github.com/ipfs/go-ds-sql" + gethcommon "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" @@ -59,6 +63,7 @@ import ( "github.com/status-im/status-go/eth-node/types" "github.com/status-im/status-go/signal" "github.com/status-im/status-go/wakuv2/common" + "github.com/status-im/status-go/wakuv2/persistence" libp2pdisc "github.com/libp2p/go-libp2p-core/discovery" node "github.com/status-im/go-waku/waku/v2/node" @@ -115,7 +120,7 @@ type Waku struct { } // New creates a WakuV2 client ready to communicate through the LibP2P network. -func New(nodeKey string, cfg *Config, logger *zap.Logger) (*Waku, error) { +func New(nodeKey string, cfg *Config, logger *zap.Logger, appdb *sql.DB) (*Waku, error) { if logger == nil { logger = zap.NewNop() } @@ -165,15 +170,39 @@ func New(nodeKey string, cfg *Config, logger *zap.Logger) (*Waku, error) { return nil, fmt.Errorf("failed to setup the network interface: %v", err) } + ctx := context.Background() + connStatusChan := make(chan node.ConnStatus, 100) if cfg.KeepAliveInterval == 0 { cfg.KeepAliveInterval = DefaultConfig.KeepAliveInterval } + libp2pOpts := node.DefaultLibP2POptions + libp2pOpts = append(libp2pOpts, libp2p.BandwidthReporter(waku.bandwidthCounter)) + + if cfg.PersistPeers { + if appdb == nil { + return nil, fmt.Errorf("a db connection must be provided in order to persist the peers") + } + + // Create persistent peerstore + queries, err := persistence.NewQueries("peerstore", appdb) + if err != nil { + return nil, fmt.Errorf("failed to setup peerstore table: %v", err) + } + datastore := dssql.NewDatastore(appdb, queries) + opts := pstoreds.DefaultOpts() + peerStore, err := pstoreds.NewPeerstore(ctx, datastore, opts) + if err != nil { + return nil, fmt.Errorf("failed to setup peerstore: %v", err) + } + libp2pOpts = append(libp2pOpts, libp2p.Peerstore(peerStore)) + } + opts := []node.WakuNodeOption{ node.WithLibP2POptions( - libp2p.BandwidthReporter(waku.bandwidthCounter), + libp2pOpts..., ), node.WithPrivateKey(privateKey), node.WithHostAddress([]net.Addr{hostAddr}), @@ -202,7 +231,7 @@ func New(nodeKey string, cfg *Config, logger *zap.Logger) (*Waku, error) { opts = append(opts, node.WithWakuRelay(relayOpts...)) } - if waku.node, err = node.New(context.Background(), opts...); err != nil { + if waku.node, err = node.New(ctx, opts...); err != nil { return nil, fmt.Errorf("failed to create a go-waku node: %v", err) }