feat: wakuv2 peer persistence (#2287)
This commit is contained in:
parent
a8d661ee71
commit
cf11713d9c
2
go.mod
2
go.mod
|
@ -21,6 +21,7 @@ require (
|
|||
github.com/golang/mock v1.4.4
|
||||
github.com/golang/protobuf v1.5.2
|
||||
github.com/google/uuid v1.2.0
|
||||
github.com/ipfs/go-ds-sql v0.2.0
|
||||
github.com/ipfs/go-log v1.0.4
|
||||
github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a
|
||||
github.com/keighl/metabolize v0.0.0-20150915210303-97ab655d4034
|
||||
|
@ -29,6 +30,7 @@ require (
|
|||
github.com/lib/pq v1.9.0
|
||||
github.com/libp2p/go-libp2p v0.13.0
|
||||
github.com/libp2p/go-libp2p-core v0.8.5
|
||||
github.com/libp2p/go-libp2p-peerstore v0.2.6
|
||||
github.com/lucasb-eyer/go-colorful v1.0.3
|
||||
github.com/mat/besticon v0.0.0-20210314201728-1579f269edb7
|
||||
github.com/mattn/go-colorable v0.1.4 // indirect
|
||||
|
|
11
go.sum
11
go.sum
|
@ -38,6 +38,7 @@ collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
|
|||
contrib.go.opencensus.io/exporter/prometheus v0.3.0/go.mod h1:rpCPVQKhiyH8oomWgm34ZmgIdZa8OVYO5WAIygPbBBE=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
|
||||
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9 h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4=
|
||||
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
|
||||
github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
|
||||
github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
|
||||
|
@ -65,6 +66,7 @@ github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETF
|
|||
github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6nK2Q=
|
||||
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/PuerkitoBio/goquery v1.6.1 h1:FgjbQZKl5HTmcn4sKBgvx8vv63nhyhIpv7lJpFGCWpk=
|
||||
github.com/PuerkitoBio/goquery v1.6.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc=
|
||||
|
@ -213,11 +215,14 @@ github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zA
|
|||
github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ=
|
||||
github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4=
|
||||
github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4=
|
||||
github.com/dgraph-io/badger v1.6.1 h1:w9pSFNSdq/JPM1N12Fz/F/bzo993Is1W+Q7HjPzi7yg=
|
||||
github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU=
|
||||
github.com/dgraph-io/ristretto v0.0.2 h1:a5WaUrDa0qm0YrAAS1tUykT5El3kt62KNZZeMxQn3po=
|
||||
github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
|
||||
github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc=
|
||||
|
@ -234,6 +239,7 @@ github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk
|
|||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dvyukov/go-fuzz v0.0.0-20191022152526-8cb203812681/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
|
@ -464,16 +470,20 @@ github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqg
|
|||
github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE=
|
||||
github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA=
|
||||
github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA=
|
||||
github.com/ipfs/go-datastore v0.4.4 h1:rjvQ9+muFaJ+QZ7dN5B1MSDNQ0JVZKkkES/rMZmA8X8=
|
||||
github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA=
|
||||
github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk=
|
||||
github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps=
|
||||
github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8=
|
||||
github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s=
|
||||
github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE=
|
||||
github.com/ipfs/go-ds-badger v0.2.3 h1:J27YvAcpuA5IvZUbeBxOcQgqnYHUPxoygc6QxxkodZ4=
|
||||
github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk=
|
||||
github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc=
|
||||
github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s=
|
||||
github.com/ipfs/go-ds-leveldb v0.4.2 h1:QmQoAJ9WkPMUfBLnu1sBVy0xWWlJPg0m4kRAiJL9iaw=
|
||||
github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s=
|
||||
github.com/ipfs/go-ds-sql v0.2.0 h1:ZUHUbU5IydNuBWzcRMOZYkBUwTg+L56o23fEVcbWC7o=
|
||||
github.com/ipfs/go-ds-sql v0.2.0/go.mod h1:/c47NpRiHobwn+8F8EpW0yBy8d3Mx/j/tIlrVN1e1Ec=
|
||||
github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=
|
||||
github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc=
|
||||
|
@ -1060,6 +1070,7 @@ github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7A
|
|||
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU=
|
||||
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
|
|
|
@ -258,6 +258,7 @@ func (b *StatusNode) wakuV2Service(nodeConfig *params.NodeConfig) (*wakuv2.Waku,
|
|||
WakuRendezvousNodes: nodeConfig.ClusterConfig.WakuRendezvousNodes,
|
||||
PeerExchange: nodeConfig.WakuV2Config.PeerExchange,
|
||||
DiscoveryLimit: nodeConfig.WakuV2Config.DiscoveryLimit,
|
||||
PersistPeers: nodeConfig.WakuV2Config.PersistPeers,
|
||||
}
|
||||
|
||||
if cfg.Host == "" {
|
||||
|
@ -278,7 +279,7 @@ func (b *StatusNode) wakuV2Service(nodeConfig *params.NodeConfig) (*wakuv2.Waku,
|
|||
}
|
||||
logging.SetAllLoggers(lvl)
|
||||
|
||||
w, err := wakuv2.New(nodeConfig.NodeKey, cfg, logutils.ZapLogger())
|
||||
w, err := wakuv2.New(nodeConfig.NodeKey, cfg, logutils.ZapLogger(), b.appDB)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -175,6 +175,10 @@ type WakuV2Config struct {
|
|||
// DiscoveryLimit indicates the maximum number of peers to discover
|
||||
DiscoveryLimit int
|
||||
|
||||
// PersistPeers indicates if peer records are going to be stored in the DB so next time the node starts,
|
||||
//it attempts to reconnect to these peers
|
||||
PersistPeers bool
|
||||
|
||||
// EnableMailServer is mode when node is capable of delivering expired messages on demand
|
||||
EnableMailServer bool
|
||||
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
# Protocol Buffers for Go with Gadgets
|
||||
#
|
||||
# Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
# http://github.com/gogo/protobuf
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
regenerate:
|
||||
go install github.com/gogo/protobuf/protoc-gen-gogo
|
||||
protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:../../../../ --proto_path=../../../../:../protobuf/:. *.proto
|
||||
|
||||
restore:
|
||||
cp gogo.pb.golden gogo.pb.go
|
||||
|
||||
preserve:
|
||||
cp gogo.pb.go gogo.pb.golden
|
|
@ -0,0 +1,169 @@
|
|||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
/*
|
||||
Package gogoproto provides extensions for protocol buffers to achieve:
|
||||
|
||||
- fast marshalling and unmarshalling.
|
||||
- peace of mind by optionally generating test and benchmark code.
|
||||
- more canonical Go structures.
|
||||
- less typing by optionally generating extra helper code.
|
||||
- goprotobuf compatibility
|
||||
|
||||
More Canonical Go Structures
|
||||
|
||||
A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs.
|
||||
You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct.
|
||||
Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions.
|
||||
|
||||
- nullable, if false, a field is generated without a pointer (see warning below).
|
||||
- embed, if true, the field is generated as an embedded field.
|
||||
- customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128
|
||||
- customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames.
|
||||
- casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums.
|
||||
- castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps.
|
||||
- castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps.
|
||||
|
||||
Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset.
|
||||
|
||||
Let us look at:
|
||||
|
||||
github.com/gogo/protobuf/test/example/example.proto
|
||||
|
||||
for a quicker overview.
|
||||
|
||||
The following message:
|
||||
|
||||
package test;
|
||||
|
||||
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||
|
||||
message A {
|
||||
optional string Description = 1 [(gogoproto.nullable) = false];
|
||||
optional int64 Number = 2 [(gogoproto.nullable) = false];
|
||||
optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
Will generate a go struct which looks a lot like this:
|
||||
|
||||
type A struct {
|
||||
Description string
|
||||
Number int64
|
||||
Id github_com_gogo_protobuf_test_custom.Uuid
|
||||
}
|
||||
|
||||
You will see there are no pointers, since all fields are non-nullable.
|
||||
You will also see a custom type which marshals to a string.
|
||||
Be warned it is your responsibility to test your custom types thoroughly.
|
||||
You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods.
|
||||
|
||||
Next we will embed the message A in message B.
|
||||
|
||||
message B {
|
||||
optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true];
|
||||
repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
See below that A is embedded in B.
|
||||
|
||||
type B struct {
|
||||
A
|
||||
G []github_com_gogo_protobuf_test_custom.Uint128
|
||||
}
|
||||
|
||||
Also see the repeated custom type.
|
||||
|
||||
type Uint128 [2]uint64
|
||||
|
||||
Next we will create a custom name for one of our fields.
|
||||
|
||||
message C {
|
||||
optional int64 size = 1 [(gogoproto.customname) = "MySize"];
|
||||
}
|
||||
|
||||
See below that the field's name is MySize and not Size.
|
||||
|
||||
type C struct {
|
||||
MySize *int64
|
||||
}
|
||||
|
||||
The is useful when having a protocol buffer message with a field name which conflicts with a generated method.
|
||||
As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error.
|
||||
Using customname you can fix this error without changing the field name.
|
||||
This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable.
|
||||
|
||||
Gogoprotobuf also has some more subtle changes, these could be changed back:
|
||||
|
||||
- the generated package name for imports do not have the extra /filename.pb,
|
||||
but are actually the imports specified in the .proto file.
|
||||
|
||||
Gogoprotobuf also has lost some features which should be brought back with time:
|
||||
|
||||
- Marshalling and unmarshalling with reflect and without the unsafe package,
|
||||
this requires work in pointer_reflect.go
|
||||
|
||||
Why does nullable break protocol buffer specifications:
|
||||
|
||||
The protocol buffer specification states, somewhere, that you should be able to tell whether a
|
||||
field is set or unset. With the option nullable=false this feature is lost,
|
||||
since your non-nullable fields will always be set. It can be seen as a layer on top of
|
||||
protocol buffers, where before and after marshalling all non-nullable fields are set
|
||||
and they cannot be unset.
|
||||
|
||||
Goprotobuf Compatibility:
|
||||
|
||||
Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers.
|
||||
Gogoprotobuf generates the same code as goprotobuf if no extensions are used.
|
||||
The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf:
|
||||
|
||||
- gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto.
|
||||
- goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix
|
||||
- goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method.
|
||||
- goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face
|
||||
- goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method.
|
||||
- goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension
|
||||
- goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields.
|
||||
- goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway).
|
||||
|
||||
Less Typing and Peace of Mind is explained in their specific plugin folders godoc:
|
||||
|
||||
- github.com/gogo/protobuf/plugin/<extension_name>
|
||||
|
||||
If you do not use any of these extension the code that is generated
|
||||
will be the same as if goprotobuf has generated it.
|
||||
|
||||
The most complete way to see examples is to look at
|
||||
|
||||
github.com/gogo/protobuf/test/thetest.proto
|
||||
|
||||
Gogoprototest is a seperate project,
|
||||
because we want to keep gogoprotobuf independent of goprotobuf,
|
||||
but we still want to test it thoroughly.
|
||||
|
||||
*/
|
||||
package gogoproto
|
|
@ -0,0 +1,874 @@
|
|||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: gogo.proto
|
||||
|
||||
package gogoproto
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
var E_GoprotoEnumPrefix = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.EnumOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 62001,
|
||||
Name: "gogoproto.goproto_enum_prefix",
|
||||
Tag: "varint,62001,opt,name=goproto_enum_prefix",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoEnumStringer = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.EnumOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 62021,
|
||||
Name: "gogoproto.goproto_enum_stringer",
|
||||
Tag: "varint,62021,opt,name=goproto_enum_stringer",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_EnumStringer = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.EnumOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 62022,
|
||||
Name: "gogoproto.enum_stringer",
|
||||
Tag: "varint,62022,opt,name=enum_stringer",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_EnumCustomname = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.EnumOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 62023,
|
||||
Name: "gogoproto.enum_customname",
|
||||
Tag: "bytes,62023,opt,name=enum_customname",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Enumdecl = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.EnumOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 62024,
|
||||
Name: "gogoproto.enumdecl",
|
||||
Tag: "varint,62024,opt,name=enumdecl",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_EnumvalueCustomname = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.EnumValueOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 66001,
|
||||
Name: "gogoproto.enumvalue_customname",
|
||||
Tag: "bytes,66001,opt,name=enumvalue_customname",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoGettersAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63001,
|
||||
Name: "gogoproto.goproto_getters_all",
|
||||
Tag: "varint,63001,opt,name=goproto_getters_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63002,
|
||||
Name: "gogoproto.goproto_enum_prefix_all",
|
||||
Tag: "varint,63002,opt,name=goproto_enum_prefix_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoStringerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63003,
|
||||
Name: "gogoproto.goproto_stringer_all",
|
||||
Tag: "varint,63003,opt,name=goproto_stringer_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_VerboseEqualAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63004,
|
||||
Name: "gogoproto.verbose_equal_all",
|
||||
Tag: "varint,63004,opt,name=verbose_equal_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_FaceAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63005,
|
||||
Name: "gogoproto.face_all",
|
||||
Tag: "varint,63005,opt,name=face_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GostringAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63006,
|
||||
Name: "gogoproto.gostring_all",
|
||||
Tag: "varint,63006,opt,name=gostring_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_PopulateAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63007,
|
||||
Name: "gogoproto.populate_all",
|
||||
Tag: "varint,63007,opt,name=populate_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_StringerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63008,
|
||||
Name: "gogoproto.stringer_all",
|
||||
Tag: "varint,63008,opt,name=stringer_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_OnlyoneAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63009,
|
||||
Name: "gogoproto.onlyone_all",
|
||||
Tag: "varint,63009,opt,name=onlyone_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_EqualAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63013,
|
||||
Name: "gogoproto.equal_all",
|
||||
Tag: "varint,63013,opt,name=equal_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_DescriptionAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63014,
|
||||
Name: "gogoproto.description_all",
|
||||
Tag: "varint,63014,opt,name=description_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_TestgenAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63015,
|
||||
Name: "gogoproto.testgen_all",
|
||||
Tag: "varint,63015,opt,name=testgen_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_BenchgenAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63016,
|
||||
Name: "gogoproto.benchgen_all",
|
||||
Tag: "varint,63016,opt,name=benchgen_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_MarshalerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63017,
|
||||
Name: "gogoproto.marshaler_all",
|
||||
Tag: "varint,63017,opt,name=marshaler_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_UnmarshalerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63018,
|
||||
Name: "gogoproto.unmarshaler_all",
|
||||
Tag: "varint,63018,opt,name=unmarshaler_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_StableMarshalerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63019,
|
||||
Name: "gogoproto.stable_marshaler_all",
|
||||
Tag: "varint,63019,opt,name=stable_marshaler_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_SizerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63020,
|
||||
Name: "gogoproto.sizer_all",
|
||||
Tag: "varint,63020,opt,name=sizer_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63021,
|
||||
Name: "gogoproto.goproto_enum_stringer_all",
|
||||
Tag: "varint,63021,opt,name=goproto_enum_stringer_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_EnumStringerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63022,
|
||||
Name: "gogoproto.enum_stringer_all",
|
||||
Tag: "varint,63022,opt,name=enum_stringer_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_UnsafeMarshalerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63023,
|
||||
Name: "gogoproto.unsafe_marshaler_all",
|
||||
Tag: "varint,63023,opt,name=unsafe_marshaler_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63024,
|
||||
Name: "gogoproto.unsafe_unmarshaler_all",
|
||||
Tag: "varint,63024,opt,name=unsafe_unmarshaler_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63025,
|
||||
Name: "gogoproto.goproto_extensions_map_all",
|
||||
Tag: "varint,63025,opt,name=goproto_extensions_map_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63026,
|
||||
Name: "gogoproto.goproto_unrecognized_all",
|
||||
Tag: "varint,63026,opt,name=goproto_unrecognized_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GogoprotoImport = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63027,
|
||||
Name: "gogoproto.gogoproto_import",
|
||||
Tag: "varint,63027,opt,name=gogoproto_import",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_ProtosizerAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63028,
|
||||
Name: "gogoproto.protosizer_all",
|
||||
Tag: "varint,63028,opt,name=protosizer_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_CompareAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63029,
|
||||
Name: "gogoproto.compare_all",
|
||||
Tag: "varint,63029,opt,name=compare_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_TypedeclAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63030,
|
||||
Name: "gogoproto.typedecl_all",
|
||||
Tag: "varint,63030,opt,name=typedecl_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_EnumdeclAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63031,
|
||||
Name: "gogoproto.enumdecl_all",
|
||||
Tag: "varint,63031,opt,name=enumdecl_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoRegistration = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63032,
|
||||
Name: "gogoproto.goproto_registration",
|
||||
Tag: "varint,63032,opt,name=goproto_registration",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_MessagenameAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63033,
|
||||
Name: "gogoproto.messagename_all",
|
||||
Tag: "varint,63033,opt,name=messagename_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoSizecacheAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63034,
|
||||
Name: "gogoproto.goproto_sizecache_all",
|
||||
Tag: "varint,63034,opt,name=goproto_sizecache_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FileOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 63035,
|
||||
Name: "gogoproto.goproto_unkeyed_all",
|
||||
Tag: "varint,63035,opt,name=goproto_unkeyed_all",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoGetters = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64001,
|
||||
Name: "gogoproto.goproto_getters",
|
||||
Tag: "varint,64001,opt,name=goproto_getters",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoStringer = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64003,
|
||||
Name: "gogoproto.goproto_stringer",
|
||||
Tag: "varint,64003,opt,name=goproto_stringer",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_VerboseEqual = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64004,
|
||||
Name: "gogoproto.verbose_equal",
|
||||
Tag: "varint,64004,opt,name=verbose_equal",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Face = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64005,
|
||||
Name: "gogoproto.face",
|
||||
Tag: "varint,64005,opt,name=face",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Gostring = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64006,
|
||||
Name: "gogoproto.gostring",
|
||||
Tag: "varint,64006,opt,name=gostring",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Populate = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64007,
|
||||
Name: "gogoproto.populate",
|
||||
Tag: "varint,64007,opt,name=populate",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Stringer = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 67008,
|
||||
Name: "gogoproto.stringer",
|
||||
Tag: "varint,67008,opt,name=stringer",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Onlyone = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64009,
|
||||
Name: "gogoproto.onlyone",
|
||||
Tag: "varint,64009,opt,name=onlyone",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Equal = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64013,
|
||||
Name: "gogoproto.equal",
|
||||
Tag: "varint,64013,opt,name=equal",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Description = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64014,
|
||||
Name: "gogoproto.description",
|
||||
Tag: "varint,64014,opt,name=description",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Testgen = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64015,
|
||||
Name: "gogoproto.testgen",
|
||||
Tag: "varint,64015,opt,name=testgen",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Benchgen = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64016,
|
||||
Name: "gogoproto.benchgen",
|
||||
Tag: "varint,64016,opt,name=benchgen",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Marshaler = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64017,
|
||||
Name: "gogoproto.marshaler",
|
||||
Tag: "varint,64017,opt,name=marshaler",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Unmarshaler = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64018,
|
||||
Name: "gogoproto.unmarshaler",
|
||||
Tag: "varint,64018,opt,name=unmarshaler",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_StableMarshaler = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64019,
|
||||
Name: "gogoproto.stable_marshaler",
|
||||
Tag: "varint,64019,opt,name=stable_marshaler",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Sizer = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64020,
|
||||
Name: "gogoproto.sizer",
|
||||
Tag: "varint,64020,opt,name=sizer",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_UnsafeMarshaler = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64023,
|
||||
Name: "gogoproto.unsafe_marshaler",
|
||||
Tag: "varint,64023,opt,name=unsafe_marshaler",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_UnsafeUnmarshaler = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64024,
|
||||
Name: "gogoproto.unsafe_unmarshaler",
|
||||
Tag: "varint,64024,opt,name=unsafe_unmarshaler",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoExtensionsMap = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64025,
|
||||
Name: "gogoproto.goproto_extensions_map",
|
||||
Tag: "varint,64025,opt,name=goproto_extensions_map",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoUnrecognized = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64026,
|
||||
Name: "gogoproto.goproto_unrecognized",
|
||||
Tag: "varint,64026,opt,name=goproto_unrecognized",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Protosizer = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64028,
|
||||
Name: "gogoproto.protosizer",
|
||||
Tag: "varint,64028,opt,name=protosizer",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Compare = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64029,
|
||||
Name: "gogoproto.compare",
|
||||
Tag: "varint,64029,opt,name=compare",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Typedecl = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64030,
|
||||
Name: "gogoproto.typedecl",
|
||||
Tag: "varint,64030,opt,name=typedecl",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Messagename = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64033,
|
||||
Name: "gogoproto.messagename",
|
||||
Tag: "varint,64033,opt,name=messagename",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoSizecache = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64034,
|
||||
Name: "gogoproto.goproto_sizecache",
|
||||
Tag: "varint,64034,opt,name=goproto_sizecache",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_GoprotoUnkeyed = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.MessageOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 64035,
|
||||
Name: "gogoproto.goproto_unkeyed",
|
||||
Tag: "varint,64035,opt,name=goproto_unkeyed",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Nullable = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 65001,
|
||||
Name: "gogoproto.nullable",
|
||||
Tag: "varint,65001,opt,name=nullable",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Embed = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 65002,
|
||||
Name: "gogoproto.embed",
|
||||
Tag: "varint,65002,opt,name=embed",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Customtype = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 65003,
|
||||
Name: "gogoproto.customtype",
|
||||
Tag: "bytes,65003,opt,name=customtype",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Customname = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 65004,
|
||||
Name: "gogoproto.customname",
|
||||
Tag: "bytes,65004,opt,name=customname",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Jsontag = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 65005,
|
||||
Name: "gogoproto.jsontag",
|
||||
Tag: "bytes,65005,opt,name=jsontag",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Moretags = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 65006,
|
||||
Name: "gogoproto.moretags",
|
||||
Tag: "bytes,65006,opt,name=moretags",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Casttype = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 65007,
|
||||
Name: "gogoproto.casttype",
|
||||
Tag: "bytes,65007,opt,name=casttype",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Castkey = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 65008,
|
||||
Name: "gogoproto.castkey",
|
||||
Tag: "bytes,65008,opt,name=castkey",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Castvalue = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 65009,
|
||||
Name: "gogoproto.castvalue",
|
||||
Tag: "bytes,65009,opt,name=castvalue",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Stdtime = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 65010,
|
||||
Name: "gogoproto.stdtime",
|
||||
Tag: "varint,65010,opt,name=stdtime",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Stdduration = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 65011,
|
||||
Name: "gogoproto.stdduration",
|
||||
Tag: "varint,65011,opt,name=stdduration",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
var E_Wktpointer = &proto.ExtensionDesc{
|
||||
ExtendedType: (*descriptor.FieldOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 65012,
|
||||
Name: "gogoproto.wktpointer",
|
||||
Tag: "varint,65012,opt,name=wktpointer",
|
||||
Filename: "gogo.proto",
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterExtension(E_GoprotoEnumPrefix)
|
||||
proto.RegisterExtension(E_GoprotoEnumStringer)
|
||||
proto.RegisterExtension(E_EnumStringer)
|
||||
proto.RegisterExtension(E_EnumCustomname)
|
||||
proto.RegisterExtension(E_Enumdecl)
|
||||
proto.RegisterExtension(E_EnumvalueCustomname)
|
||||
proto.RegisterExtension(E_GoprotoGettersAll)
|
||||
proto.RegisterExtension(E_GoprotoEnumPrefixAll)
|
||||
proto.RegisterExtension(E_GoprotoStringerAll)
|
||||
proto.RegisterExtension(E_VerboseEqualAll)
|
||||
proto.RegisterExtension(E_FaceAll)
|
||||
proto.RegisterExtension(E_GostringAll)
|
||||
proto.RegisterExtension(E_PopulateAll)
|
||||
proto.RegisterExtension(E_StringerAll)
|
||||
proto.RegisterExtension(E_OnlyoneAll)
|
||||
proto.RegisterExtension(E_EqualAll)
|
||||
proto.RegisterExtension(E_DescriptionAll)
|
||||
proto.RegisterExtension(E_TestgenAll)
|
||||
proto.RegisterExtension(E_BenchgenAll)
|
||||
proto.RegisterExtension(E_MarshalerAll)
|
||||
proto.RegisterExtension(E_UnmarshalerAll)
|
||||
proto.RegisterExtension(E_StableMarshalerAll)
|
||||
proto.RegisterExtension(E_SizerAll)
|
||||
proto.RegisterExtension(E_GoprotoEnumStringerAll)
|
||||
proto.RegisterExtension(E_EnumStringerAll)
|
||||
proto.RegisterExtension(E_UnsafeMarshalerAll)
|
||||
proto.RegisterExtension(E_UnsafeUnmarshalerAll)
|
||||
proto.RegisterExtension(E_GoprotoExtensionsMapAll)
|
||||
proto.RegisterExtension(E_GoprotoUnrecognizedAll)
|
||||
proto.RegisterExtension(E_GogoprotoImport)
|
||||
proto.RegisterExtension(E_ProtosizerAll)
|
||||
proto.RegisterExtension(E_CompareAll)
|
||||
proto.RegisterExtension(E_TypedeclAll)
|
||||
proto.RegisterExtension(E_EnumdeclAll)
|
||||
proto.RegisterExtension(E_GoprotoRegistration)
|
||||
proto.RegisterExtension(E_MessagenameAll)
|
||||
proto.RegisterExtension(E_GoprotoSizecacheAll)
|
||||
proto.RegisterExtension(E_GoprotoUnkeyedAll)
|
||||
proto.RegisterExtension(E_GoprotoGetters)
|
||||
proto.RegisterExtension(E_GoprotoStringer)
|
||||
proto.RegisterExtension(E_VerboseEqual)
|
||||
proto.RegisterExtension(E_Face)
|
||||
proto.RegisterExtension(E_Gostring)
|
||||
proto.RegisterExtension(E_Populate)
|
||||
proto.RegisterExtension(E_Stringer)
|
||||
proto.RegisterExtension(E_Onlyone)
|
||||
proto.RegisterExtension(E_Equal)
|
||||
proto.RegisterExtension(E_Description)
|
||||
proto.RegisterExtension(E_Testgen)
|
||||
proto.RegisterExtension(E_Benchgen)
|
||||
proto.RegisterExtension(E_Marshaler)
|
||||
proto.RegisterExtension(E_Unmarshaler)
|
||||
proto.RegisterExtension(E_StableMarshaler)
|
||||
proto.RegisterExtension(E_Sizer)
|
||||
proto.RegisterExtension(E_UnsafeMarshaler)
|
||||
proto.RegisterExtension(E_UnsafeUnmarshaler)
|
||||
proto.RegisterExtension(E_GoprotoExtensionsMap)
|
||||
proto.RegisterExtension(E_GoprotoUnrecognized)
|
||||
proto.RegisterExtension(E_Protosizer)
|
||||
proto.RegisterExtension(E_Compare)
|
||||
proto.RegisterExtension(E_Typedecl)
|
||||
proto.RegisterExtension(E_Messagename)
|
||||
proto.RegisterExtension(E_GoprotoSizecache)
|
||||
proto.RegisterExtension(E_GoprotoUnkeyed)
|
||||
proto.RegisterExtension(E_Nullable)
|
||||
proto.RegisterExtension(E_Embed)
|
||||
proto.RegisterExtension(E_Customtype)
|
||||
proto.RegisterExtension(E_Customname)
|
||||
proto.RegisterExtension(E_Jsontag)
|
||||
proto.RegisterExtension(E_Moretags)
|
||||
proto.RegisterExtension(E_Casttype)
|
||||
proto.RegisterExtension(E_Castkey)
|
||||
proto.RegisterExtension(E_Castvalue)
|
||||
proto.RegisterExtension(E_Stdtime)
|
||||
proto.RegisterExtension(E_Stdduration)
|
||||
proto.RegisterExtension(E_Wktpointer)
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("gogo.proto", fileDescriptor_592445b5231bc2b9) }
|
||||
|
||||
var fileDescriptor_592445b5231bc2b9 = []byte{
|
||||
// 1328 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45,
|
||||
0x14, 0x80, 0x85, 0x48, 0x64, 0x4f, 0x79, 0x8b, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9,
|
||||
0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x0e, 0xc6, 0x89, 0xc3, 0x76, 0x18,
|
||||
0xf5, 0xf4, 0x94, 0xdb, 0x8d, 0xbb, 0xbb, 0x9a, 0xee, 0xea, 0x10, 0xe7, 0x86, 0xc2, 0x22, 0x84,
|
||||
0xd8, 0x91, 0x20, 0x21, 0x09, 0x04, 0xc4, 0xbe, 0x86, 0x7d, 0xb9, 0x70, 0x61, 0xb9, 0xf2, 0x1f,
|
||||
0xb8, 0x00, 0x66, 0xf7, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x69, 0x8f, 0x54, 0x35, 0xb7,
|
||||
0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xef, 0x4d, 0x33, 0xe6, 0x49, 0x4f, 0x4e, 0xc6,
|
||||
0x89, 0x54, 0xb2, 0x5e, 0x83, 0xeb, 0xfc, 0x72, 0xdf, 0x7e, 0x4f, 0x4a, 0x2f, 0x10, 0x53, 0xf9,
|
||||
0x5f, 0xcd, 0x6c, 0x75, 0xaa, 0x25, 0x52, 0x37, 0xf1, 0x63, 0x25, 0x93, 0x62, 0x31, 0x3f, 0xc6,
|
||||
0xc6, 0x70, 0x71, 0x43, 0x44, 0x59, 0xd8, 0x88, 0x13, 0xb1, 0xea, 0x9f, 0xae, 0x5f, 0x3f, 0x59,
|
||||
0x90, 0x93, 0x44, 0x4e, 0xce, 0x47, 0x59, 0x78, 0x47, 0xac, 0x7c, 0x19, 0xa5, 0x7b, 0xaf, 0xfc,
|
||||
0x72, 0xf5, 0xfe, 0xab, 0x6e, 0xe9, 0x5f, 0x1e, 0x45, 0x14, 0xfe, 0xb7, 0x94, 0x83, 0x7c, 0x99,
|
||||
0x5d, 0xd3, 0xe1, 0x4b, 0x55, 0xe2, 0x47, 0x9e, 0x48, 0x0c, 0xc6, 0xef, 0xd1, 0x38, 0xa6, 0x19,
|
||||
0x8f, 0x23, 0xca, 0xe7, 0xd8, 0x50, 0x2f, 0xae, 0x1f, 0xd0, 0x35, 0x28, 0x74, 0xc9, 0x02, 0x1b,
|
||||
0xc9, 0x25, 0x6e, 0x96, 0x2a, 0x19, 0x46, 0x4e, 0x28, 0x0c, 0x9a, 0x1f, 0x73, 0x4d, 0x6d, 0x79,
|
||||
0x18, 0xb0, 0xb9, 0x92, 0xe2, 0x9c, 0xf5, 0xc3, 0x27, 0x2d, 0xe1, 0x06, 0x06, 0xc3, 0x4f, 0xb8,
|
||||
0x91, 0x72, 0x3d, 0x3f, 0xc9, 0xc6, 0xe1, 0xfa, 0x94, 0x13, 0x64, 0x42, 0xdf, 0xc9, 0x4d, 0x5d,
|
||||
0x3d, 0x27, 0x61, 0x19, 0xc9, 0x7e, 0x3e, 0xbb, 0x2b, 0xdf, 0xce, 0x58, 0x29, 0xd0, 0xf6, 0xa4,
|
||||
0x65, 0xd1, 0x13, 0x4a, 0x89, 0x24, 0x6d, 0x38, 0x41, 0xb7, 0xed, 0x1d, 0xf1, 0x83, 0xd2, 0x78,
|
||||
0x6e, 0xb3, 0x33, 0x8b, 0x0b, 0x05, 0x39, 0x1b, 0x04, 0x7c, 0x85, 0x5d, 0xdb, 0xe5, 0xa9, 0xb0,
|
||||
0x70, 0x9e, 0x47, 0xe7, 0xf8, 0x8e, 0x27, 0x03, 0xb4, 0x4b, 0x8c, 0x3e, 0x2f, 0x73, 0x69, 0xe1,
|
||||
0x7c, 0x19, 0x9d, 0x75, 0x64, 0x29, 0xa5, 0x60, 0xbc, 0x8d, 0x8d, 0x9e, 0x12, 0x49, 0x53, 0xa6,
|
||||
0xa2, 0x21, 0x1e, 0xc8, 0x9c, 0xc0, 0x42, 0x77, 0x01, 0x75, 0x23, 0x08, 0xce, 0x03, 0x07, 0xae,
|
||||
0x83, 0xac, 0x7f, 0xd5, 0x71, 0x85, 0x85, 0xe2, 0x22, 0x2a, 0xfa, 0x60, 0x3d, 0xa0, 0xb3, 0x6c,
|
||||
0xd0, 0x93, 0xc5, 0x2d, 0x59, 0xe0, 0x97, 0x10, 0x1f, 0x20, 0x06, 0x15, 0xb1, 0x8c, 0xb3, 0xc0,
|
||||
0x51, 0x36, 0x3b, 0x78, 0x85, 0x14, 0xc4, 0xa0, 0xa2, 0x87, 0xb0, 0xbe, 0x4a, 0x8a, 0x54, 0x8b,
|
||||
0xe7, 0x0c, 0x1b, 0x90, 0x51, 0xb0, 0x21, 0x23, 0x9b, 0x4d, 0x5c, 0x46, 0x03, 0x43, 0x04, 0x04,
|
||||
0xd3, 0xac, 0x66, 0x9b, 0x88, 0x37, 0x36, 0xe9, 0x78, 0x50, 0x06, 0x16, 0xd8, 0x08, 0x15, 0x28,
|
||||
0x5f, 0x46, 0x16, 0x8a, 0x37, 0x51, 0x31, 0xac, 0x61, 0x78, 0x1b, 0x4a, 0xa4, 0xca, 0x13, 0x36,
|
||||
0x92, 0xb7, 0xe8, 0x36, 0x10, 0xc1, 0x50, 0x36, 0x45, 0xe4, 0xae, 0xd9, 0x19, 0xde, 0xa6, 0x50,
|
||||
0x12, 0x03, 0x8a, 0x39, 0x36, 0x14, 0x3a, 0x49, 0xba, 0xe6, 0x04, 0x56, 0xe9, 0x78, 0x07, 0x1d,
|
||||
0x83, 0x25, 0x84, 0x11, 0xc9, 0xa2, 0x5e, 0x34, 0xef, 0x52, 0x44, 0x34, 0x0c, 0x8f, 0x5e, 0xaa,
|
||||
0x9c, 0x66, 0x20, 0x1a, 0xbd, 0xd8, 0xde, 0xa3, 0xa3, 0x57, 0xb0, 0x8b, 0xba, 0x71, 0x9a, 0xd5,
|
||||
0x52, 0xff, 0x8c, 0x95, 0xe6, 0x7d, 0xca, 0x74, 0x0e, 0x00, 0x7c, 0x0f, 0xbb, 0xae, 0x6b, 0x9b,
|
||||
0xb0, 0x90, 0x7d, 0x80, 0xb2, 0x89, 0x2e, 0xad, 0x02, 0x4b, 0x42, 0xaf, 0xca, 0x0f, 0xa9, 0x24,
|
||||
0x88, 0x8a, 0x6b, 0x89, 0x8d, 0x67, 0x51, 0xea, 0xac, 0xf6, 0x16, 0xb5, 0x8f, 0x28, 0x6a, 0x05,
|
||||
0xdb, 0x11, 0xb5, 0x13, 0x6c, 0x02, 0x8d, 0xbd, 0xe5, 0xf5, 0x63, 0x2a, 0xac, 0x05, 0xbd, 0xd2,
|
||||
0x99, 0xdd, 0xfb, 0xd8, 0xbe, 0x32, 0x9c, 0xa7, 0x95, 0x88, 0x52, 0x60, 0x1a, 0xa1, 0x13, 0x5b,
|
||||
0x98, 0xaf, 0xa0, 0x99, 0x2a, 0xfe, 0x7c, 0x29, 0x58, 0x74, 0x62, 0x90, 0xdf, 0xcd, 0xf6, 0x92,
|
||||
0x3c, 0x8b, 0x12, 0xe1, 0x4a, 0x2f, 0xf2, 0xcf, 0x88, 0x96, 0x85, 0xfa, 0x93, 0x4a, 0xaa, 0x56,
|
||||
0x34, 0x1c, 0xcc, 0x47, 0xd9, 0x9e, 0x72, 0x56, 0x69, 0xf8, 0x61, 0x2c, 0x13, 0x65, 0x30, 0x7e,
|
||||
0x4a, 0x99, 0x2a, 0xb9, 0xa3, 0x39, 0xc6, 0xe7, 0xd9, 0x70, 0xfe, 0xa7, 0xed, 0x23, 0xf9, 0x19,
|
||||
0x8a, 0x86, 0xda, 0x14, 0x16, 0x0e, 0x57, 0x86, 0xb1, 0x93, 0xd8, 0xd4, 0xbf, 0xcf, 0xa9, 0x70,
|
||||
0x20, 0x82, 0x85, 0x43, 0x6d, 0xc4, 0x02, 0xba, 0xbd, 0x85, 0xe1, 0x0b, 0x2a, 0x1c, 0xc4, 0xa0,
|
||||
0x82, 0x06, 0x06, 0x0b, 0xc5, 0x97, 0xa4, 0x20, 0x06, 0x14, 0x77, 0xb6, 0x1b, 0x6d, 0x22, 0x3c,
|
||||
0x3f, 0x55, 0x89, 0x03, 0xab, 0x0d, 0xaa, 0xaf, 0x36, 0x3b, 0x87, 0xb0, 0x65, 0x0d, 0x85, 0x4a,
|
||||
0x14, 0x8a, 0x34, 0x75, 0x3c, 0x01, 0x13, 0x87, 0xc5, 0xc6, 0xbe, 0xa6, 0x4a, 0xa4, 0x61, 0xb0,
|
||||
0x37, 0x6d, 0x42, 0x84, 0xb0, 0xbb, 0x8e, 0xbb, 0x66, 0xa3, 0xfb, 0xa6, 0xb2, 0xb9, 0xe3, 0xc4,
|
||||
0x82, 0x53, 0x9b, 0x7f, 0xb2, 0x68, 0x5d, 0x6c, 0x58, 0x3d, 0x9d, 0xdf, 0x56, 0xe6, 0x9f, 0x95,
|
||||
0x82, 0x2c, 0x6a, 0xc8, 0x48, 0x65, 0x9e, 0xaa, 0xdf, 0xb8, 0xc3, 0xb5, 0x58, 0xdc, 0x17, 0xe9,
|
||||
0x1e, 0xda, 0xc2, 0xfb, 0xed, 0x1c, 0xa7, 0xf8, 0xed, 0xf0, 0x90, 0x77, 0x0e, 0x3d, 0x66, 0xd9,
|
||||
0xd9, 0xad, 0xf2, 0x39, 0xef, 0x98, 0x79, 0xf8, 0x11, 0x36, 0xd4, 0x31, 0xf0, 0x98, 0x55, 0x0f,
|
||||
0xa3, 0x6a, 0x50, 0x9f, 0x77, 0xf8, 0x01, 0xb6, 0x0b, 0x86, 0x17, 0x33, 0xfe, 0x08, 0xe2, 0xf9,
|
||||
0x72, 0x7e, 0x88, 0xf5, 0xd3, 0xd0, 0x62, 0x46, 0x1f, 0x45, 0xb4, 0x44, 0x00, 0xa7, 0x81, 0xc5,
|
||||
0x8c, 0x3f, 0x46, 0x38, 0x21, 0x80, 0xdb, 0x87, 0xf0, 0xbb, 0x27, 0x76, 0x61, 0xd3, 0xa1, 0xd8,
|
||||
0x4d, 0xb3, 0x3e, 0x9c, 0x54, 0xcc, 0xf4, 0xe3, 0xf8, 0xe5, 0x44, 0xf0, 0x5b, 0xd9, 0x6e, 0xcb,
|
||||
0x80, 0x3f, 0x89, 0x68, 0xb1, 0x9e, 0xcf, 0xb1, 0x01, 0x6d, 0x3a, 0x31, 0xe3, 0x4f, 0x21, 0xae,
|
||||
0x53, 0xb0, 0x75, 0x9c, 0x4e, 0xcc, 0x82, 0xa7, 0x69, 0xeb, 0x48, 0x40, 0xd8, 0x68, 0x30, 0x31,
|
||||
0xd3, 0xcf, 0x50, 0xd4, 0x09, 0xe1, 0x33, 0xac, 0x56, 0x36, 0x1b, 0x33, 0xff, 0x2c, 0xf2, 0x6d,
|
||||
0x06, 0x22, 0xa0, 0x35, 0x3b, 0xb3, 0xe2, 0x39, 0x8a, 0x80, 0x46, 0xc1, 0x31, 0xaa, 0x0e, 0x30,
|
||||
0x66, 0xd3, 0xf3, 0x74, 0x8c, 0x2a, 0xf3, 0x0b, 0x64, 0x33, 0xaf, 0xf9, 0x66, 0xc5, 0x0b, 0x94,
|
||||
0xcd, 0x7c, 0x3d, 0x6c, 0xa3, 0x3a, 0x11, 0x98, 0x1d, 0x2f, 0xd2, 0x36, 0x2a, 0x03, 0x01, 0x5f,
|
||||
0x62, 0xf5, 0x9d, 0xd3, 0x80, 0xd9, 0xf7, 0x12, 0xfa, 0x46, 0x77, 0x0c, 0x03, 0xfc, 0x2e, 0x36,
|
||||
0xd1, 0x7d, 0x12, 0x30, 0x5b, 0xcf, 0x6d, 0x55, 0x7e, 0xbb, 0xe9, 0x83, 0x00, 0x3f, 0xd1, 0x6e,
|
||||
0x29, 0xfa, 0x14, 0x60, 0xd6, 0x9e, 0xdf, 0xea, 0x2c, 0xdc, 0xfa, 0x10, 0xc0, 0x67, 0x19, 0x6b,
|
||||
0x37, 0x60, 0xb3, 0xeb, 0x02, 0xba, 0x34, 0x08, 0x8e, 0x06, 0xf6, 0x5f, 0x33, 0x7f, 0x91, 0x8e,
|
||||
0x06, 0x12, 0x70, 0x34, 0xa8, 0xf5, 0x9a, 0xe9, 0x4b, 0x74, 0x34, 0x08, 0x81, 0x27, 0x5b, 0xeb,
|
||||
0x6e, 0x66, 0xc3, 0x65, 0x7a, 0xb2, 0x35, 0x8a, 0x1f, 0x63, 0xa3, 0x3b, 0x1a, 0xa2, 0x59, 0xf5,
|
||||
0x1a, 0xaa, 0xf6, 0x54, 0xfb, 0xa1, 0xde, 0xbc, 0xb0, 0x19, 0x9a, 0x6d, 0xaf, 0x57, 0x9a, 0x17,
|
||||
0xf6, 0x42, 0x3e, 0xcd, 0xfa, 0xa3, 0x2c, 0x08, 0xe0, 0xf0, 0xd4, 0x6f, 0xe8, 0xd2, 0x4d, 0x45,
|
||||
0xd0, 0x22, 0xc5, 0xaf, 0xdb, 0x18, 0x1d, 0x02, 0xf8, 0x01, 0xb6, 0x5b, 0x84, 0x4d, 0xd1, 0x32,
|
||||
0x91, 0xbf, 0x6d, 0x53, 0xc1, 0x84, 0xd5, 0x7c, 0x86, 0xb1, 0xe2, 0xd5, 0x08, 0x84, 0xd9, 0xc4,
|
||||
0xfe, 0xbe, 0x5d, 0xbc, 0xa5, 0xd1, 0x90, 0xb6, 0x20, 0x4f, 0x8a, 0x41, 0xb0, 0xd9, 0x29, 0xc8,
|
||||
0x33, 0x72, 0x90, 0xf5, 0xdd, 0x9f, 0xca, 0x48, 0x39, 0x9e, 0x89, 0xfe, 0x03, 0x69, 0x5a, 0x0f,
|
||||
0x01, 0x0b, 0x65, 0x22, 0x94, 0xe3, 0xa5, 0x26, 0xf6, 0x4f, 0x64, 0x4b, 0x00, 0x60, 0xd7, 0x49,
|
||||
0x95, 0xcd, 0x7d, 0xff, 0x45, 0x30, 0x01, 0xb0, 0x69, 0xb8, 0x5e, 0x17, 0x1b, 0x26, 0xf6, 0x6f,
|
||||
0xda, 0x34, 0xae, 0xe7, 0x87, 0x58, 0x0d, 0x2e, 0xf3, 0xb7, 0x4a, 0x26, 0xf8, 0x1f, 0x84, 0xdb,
|
||||
0x04, 0x7c, 0x73, 0xaa, 0x5a, 0xca, 0x37, 0x07, 0xfb, 0x5f, 0xcc, 0x34, 0xad, 0xe7, 0xb3, 0x6c,
|
||||
0x20, 0x55, 0xad, 0x56, 0x86, 0xf3, 0xa9, 0x01, 0xff, 0x6f, 0xbb, 0x7c, 0x65, 0x51, 0x32, 0x90,
|
||||
0xed, 0x07, 0xd7, 0x55, 0x2c, 0xfd, 0x48, 0x89, 0xc4, 0x64, 0xd8, 0x42, 0x83, 0x86, 0x1c, 0x9e,
|
||||
0x67, 0x63, 0xae, 0x0c, 0xab, 0xdc, 0x61, 0xb6, 0x20, 0x17, 0xe4, 0x52, 0x5e, 0x67, 0xee, 0xbd,
|
||||
0xd9, 0xf3, 0xd5, 0x5a, 0xd6, 0x9c, 0x74, 0x65, 0x38, 0x05, 0xbf, 0x3c, 0xda, 0x2f, 0x54, 0xcb,
|
||||
0xdf, 0x21, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x70, 0x4e, 0x83, 0x15, 0x00, 0x00,
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// source: gogo.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
package gogoproto
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import json "encoding/json"
|
||||
import math "math"
|
||||
import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
|
||||
|
||||
// Reference proto, json, and math imports to suppress error if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = &json.SyntaxError{}
|
||||
var _ = math.Inf
|
||||
|
||||
var E_Nullable = &proto.ExtensionDesc{
|
||||
ExtendedType: (*google_protobuf.FieldOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 51235,
|
||||
Name: "gogoproto.nullable",
|
||||
Tag: "varint,51235,opt,name=nullable",
|
||||
}
|
||||
|
||||
var E_Embed = &proto.ExtensionDesc{
|
||||
ExtendedType: (*google_protobuf.FieldOptions)(nil),
|
||||
ExtensionType: (*bool)(nil),
|
||||
Field: 51236,
|
||||
Name: "gogoproto.embed",
|
||||
Tag: "varint,51236,opt,name=embed",
|
||||
}
|
||||
|
||||
var E_Customtype = &proto.ExtensionDesc{
|
||||
ExtendedType: (*google_protobuf.FieldOptions)(nil),
|
||||
ExtensionType: (*string)(nil),
|
||||
Field: 51237,
|
||||
Name: "gogoproto.customtype",
|
||||
Tag: "bytes,51237,opt,name=customtype",
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterExtension(E_Nullable)
|
||||
proto.RegisterExtension(E_Embed)
|
||||
proto.RegisterExtension(E_Customtype)
|
||||
}
|
|
@ -0,0 +1,144 @@
|
|||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto2";
|
||||
package gogoproto;
|
||||
|
||||
import "google/protobuf/descriptor.proto";
|
||||
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "GoGoProtos";
|
||||
option go_package = "github.com/gogo/protobuf/gogoproto";
|
||||
|
||||
extend google.protobuf.EnumOptions {
|
||||
optional bool goproto_enum_prefix = 62001;
|
||||
optional bool goproto_enum_stringer = 62021;
|
||||
optional bool enum_stringer = 62022;
|
||||
optional string enum_customname = 62023;
|
||||
optional bool enumdecl = 62024;
|
||||
}
|
||||
|
||||
extend google.protobuf.EnumValueOptions {
|
||||
optional string enumvalue_customname = 66001;
|
||||
}
|
||||
|
||||
extend google.protobuf.FileOptions {
|
||||
optional bool goproto_getters_all = 63001;
|
||||
optional bool goproto_enum_prefix_all = 63002;
|
||||
optional bool goproto_stringer_all = 63003;
|
||||
optional bool verbose_equal_all = 63004;
|
||||
optional bool face_all = 63005;
|
||||
optional bool gostring_all = 63006;
|
||||
optional bool populate_all = 63007;
|
||||
optional bool stringer_all = 63008;
|
||||
optional bool onlyone_all = 63009;
|
||||
|
||||
optional bool equal_all = 63013;
|
||||
optional bool description_all = 63014;
|
||||
optional bool testgen_all = 63015;
|
||||
optional bool benchgen_all = 63016;
|
||||
optional bool marshaler_all = 63017;
|
||||
optional bool unmarshaler_all = 63018;
|
||||
optional bool stable_marshaler_all = 63019;
|
||||
|
||||
optional bool sizer_all = 63020;
|
||||
|
||||
optional bool goproto_enum_stringer_all = 63021;
|
||||
optional bool enum_stringer_all = 63022;
|
||||
|
||||
optional bool unsafe_marshaler_all = 63023;
|
||||
optional bool unsafe_unmarshaler_all = 63024;
|
||||
|
||||
optional bool goproto_extensions_map_all = 63025;
|
||||
optional bool goproto_unrecognized_all = 63026;
|
||||
optional bool gogoproto_import = 63027;
|
||||
optional bool protosizer_all = 63028;
|
||||
optional bool compare_all = 63029;
|
||||
optional bool typedecl_all = 63030;
|
||||
optional bool enumdecl_all = 63031;
|
||||
|
||||
optional bool goproto_registration = 63032;
|
||||
optional bool messagename_all = 63033;
|
||||
|
||||
optional bool goproto_sizecache_all = 63034;
|
||||
optional bool goproto_unkeyed_all = 63035;
|
||||
}
|
||||
|
||||
extend google.protobuf.MessageOptions {
|
||||
optional bool goproto_getters = 64001;
|
||||
optional bool goproto_stringer = 64003;
|
||||
optional bool verbose_equal = 64004;
|
||||
optional bool face = 64005;
|
||||
optional bool gostring = 64006;
|
||||
optional bool populate = 64007;
|
||||
optional bool stringer = 67008;
|
||||
optional bool onlyone = 64009;
|
||||
|
||||
optional bool equal = 64013;
|
||||
optional bool description = 64014;
|
||||
optional bool testgen = 64015;
|
||||
optional bool benchgen = 64016;
|
||||
optional bool marshaler = 64017;
|
||||
optional bool unmarshaler = 64018;
|
||||
optional bool stable_marshaler = 64019;
|
||||
|
||||
optional bool sizer = 64020;
|
||||
|
||||
optional bool unsafe_marshaler = 64023;
|
||||
optional bool unsafe_unmarshaler = 64024;
|
||||
|
||||
optional bool goproto_extensions_map = 64025;
|
||||
optional bool goproto_unrecognized = 64026;
|
||||
|
||||
optional bool protosizer = 64028;
|
||||
optional bool compare = 64029;
|
||||
|
||||
optional bool typedecl = 64030;
|
||||
|
||||
optional bool messagename = 64033;
|
||||
|
||||
optional bool goproto_sizecache = 64034;
|
||||
optional bool goproto_unkeyed = 64035;
|
||||
}
|
||||
|
||||
extend google.protobuf.FieldOptions {
|
||||
optional bool nullable = 65001;
|
||||
optional bool embed = 65002;
|
||||
optional string customtype = 65003;
|
||||
optional string customname = 65004;
|
||||
optional string jsontag = 65005;
|
||||
optional string moretags = 65006;
|
||||
optional string casttype = 65007;
|
||||
optional string castkey = 65008;
|
||||
optional string castvalue = 65009;
|
||||
|
||||
optional bool stdtime = 65010;
|
||||
optional bool stdduration = 65011;
|
||||
optional bool wktpointer = 65012;
|
||||
|
||||
}
|
|
@ -0,0 +1,415 @@
|
|||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package gogoproto
|
||||
|
||||
import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
|
||||
func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Embed, false)
|
||||
}
|
||||
|
||||
func IsNullable(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Nullable, true)
|
||||
}
|
||||
|
||||
func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Stdtime, false)
|
||||
}
|
||||
|
||||
func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Stdduration, false)
|
||||
}
|
||||
|
||||
func IsStdDouble(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.DoubleValue"
|
||||
}
|
||||
|
||||
func IsStdFloat(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.FloatValue"
|
||||
}
|
||||
|
||||
func IsStdInt64(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int64Value"
|
||||
}
|
||||
|
||||
func IsStdUInt64(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt64Value"
|
||||
}
|
||||
|
||||
func IsStdInt32(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int32Value"
|
||||
}
|
||||
|
||||
func IsStdUInt32(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt32Value"
|
||||
}
|
||||
|
||||
func IsStdBool(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BoolValue"
|
||||
}
|
||||
|
||||
func IsStdString(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.StringValue"
|
||||
}
|
||||
|
||||
func IsStdBytes(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BytesValue"
|
||||
}
|
||||
|
||||
func IsStdType(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return (IsStdTime(field) || IsStdDuration(field) ||
|
||||
IsStdDouble(field) || IsStdFloat(field) ||
|
||||
IsStdInt64(field) || IsStdUInt64(field) ||
|
||||
IsStdInt32(field) || IsStdUInt32(field) ||
|
||||
IsStdBool(field) ||
|
||||
IsStdString(field) || IsStdBytes(field))
|
||||
}
|
||||
|
||||
func IsWktPtr(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(field.Options, E_Wktpointer, false)
|
||||
}
|
||||
|
||||
func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool {
|
||||
nullable := IsNullable(field)
|
||||
if field.IsMessage() || IsCustomType(field) {
|
||||
return nullable
|
||||
}
|
||||
if proto3 {
|
||||
return false
|
||||
}
|
||||
return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES
|
||||
}
|
||||
|
||||
func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
typ := GetCustomType(field)
|
||||
if len(typ) > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func IsCastType(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
typ := GetCastType(field)
|
||||
if len(typ) > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
typ := GetCastKey(field)
|
||||
if len(typ) > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
typ := GetCastValue(field)
|
||||
if len(typ) > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true))
|
||||
}
|
||||
|
||||
func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true))
|
||||
}
|
||||
|
||||
func GetCustomType(field *google_protobuf.FieldDescriptorProto) string {
|
||||
if field == nil {
|
||||
return ""
|
||||
}
|
||||
if field.Options != nil {
|
||||
v, err := proto.GetExtension(field.Options, E_Customtype)
|
||||
if err == nil && v.(*string) != nil {
|
||||
return *(v.(*string))
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func GetCastType(field *google_protobuf.FieldDescriptorProto) string {
|
||||
if field == nil {
|
||||
return ""
|
||||
}
|
||||
if field.Options != nil {
|
||||
v, err := proto.GetExtension(field.Options, E_Casttype)
|
||||
if err == nil && v.(*string) != nil {
|
||||
return *(v.(*string))
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func GetCastKey(field *google_protobuf.FieldDescriptorProto) string {
|
||||
if field == nil {
|
||||
return ""
|
||||
}
|
||||
if field.Options != nil {
|
||||
v, err := proto.GetExtension(field.Options, E_Castkey)
|
||||
if err == nil && v.(*string) != nil {
|
||||
return *(v.(*string))
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func GetCastValue(field *google_protobuf.FieldDescriptorProto) string {
|
||||
if field == nil {
|
||||
return ""
|
||||
}
|
||||
if field.Options != nil {
|
||||
v, err := proto.GetExtension(field.Options, E_Castvalue)
|
||||
if err == nil && v.(*string) != nil {
|
||||
return *(v.(*string))
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool {
|
||||
name := GetCustomName(field)
|
||||
if len(name) > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool {
|
||||
name := GetEnumCustomName(field)
|
||||
if len(name) > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool {
|
||||
name := GetEnumValueCustomName(field)
|
||||
if len(name) > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func GetCustomName(field *google_protobuf.FieldDescriptorProto) string {
|
||||
if field == nil {
|
||||
return ""
|
||||
}
|
||||
if field.Options != nil {
|
||||
v, err := proto.GetExtension(field.Options, E_Customname)
|
||||
if err == nil && v.(*string) != nil {
|
||||
return *(v.(*string))
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string {
|
||||
if field == nil {
|
||||
return ""
|
||||
}
|
||||
if field.Options != nil {
|
||||
v, err := proto.GetExtension(field.Options, E_EnumCustomname)
|
||||
if err == nil && v.(*string) != nil {
|
||||
return *(v.(*string))
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string {
|
||||
if field == nil {
|
||||
return ""
|
||||
}
|
||||
if field.Options != nil {
|
||||
v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname)
|
||||
if err == nil && v.(*string) != nil {
|
||||
return *(v.(*string))
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string {
|
||||
if field == nil {
|
||||
return nil
|
||||
}
|
||||
if field.Options != nil {
|
||||
v, err := proto.GetExtension(field.Options, E_Jsontag)
|
||||
if err == nil && v.(*string) != nil {
|
||||
return (v.(*string))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string {
|
||||
if field == nil {
|
||||
return nil
|
||||
}
|
||||
if field.Options != nil {
|
||||
v, err := proto.GetExtension(field.Options, E_Moretags)
|
||||
if err == nil && v.(*string) != nil {
|
||||
return (v.(*string))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool
|
||||
|
||||
func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true))
|
||||
}
|
||||
|
||||
func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true))
|
||||
}
|
||||
|
||||
func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true))
|
||||
}
|
||||
|
||||
func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false))
|
||||
}
|
||||
|
||||
func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false))
|
||||
}
|
||||
|
||||
func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false))
|
||||
}
|
||||
|
||||
func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false))
|
||||
}
|
||||
|
||||
func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false))
|
||||
}
|
||||
|
||||
func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false))
|
||||
}
|
||||
|
||||
func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false))
|
||||
}
|
||||
|
||||
func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false))
|
||||
}
|
||||
|
||||
func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false))
|
||||
}
|
||||
|
||||
func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false))
|
||||
}
|
||||
|
||||
func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false))
|
||||
}
|
||||
|
||||
func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false))
|
||||
}
|
||||
|
||||
func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false))
|
||||
}
|
||||
|
||||
func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false))
|
||||
}
|
||||
|
||||
func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false))
|
||||
}
|
||||
|
||||
func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true))
|
||||
}
|
||||
|
||||
func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false))
|
||||
}
|
||||
|
||||
func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false))
|
||||
}
|
||||
|
||||
func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false))
|
||||
}
|
||||
|
||||
func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true))
|
||||
}
|
||||
|
||||
func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true))
|
||||
}
|
||||
|
||||
func IsProto3(file *google_protobuf.FileDescriptorProto) bool {
|
||||
return file.GetSyntax() == "proto3"
|
||||
}
|
||||
|
||||
func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true)
|
||||
}
|
||||
|
||||
func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false))
|
||||
}
|
||||
|
||||
func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool {
|
||||
return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false)
|
||||
}
|
||||
|
||||
func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false))
|
||||
}
|
||||
|
||||
func HasSizecache(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_GoprotoSizecache, proto.GetBoolExtension(file.Options, E_GoprotoSizecacheAll, true))
|
||||
}
|
||||
|
||||
func HasUnkeyed(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
|
||||
return proto.GetBoolExtension(message.Options, E_GoprotoUnkeyed, proto.GetBoolExtension(file.Options, E_GoprotoUnkeyedAll, true))
|
||||
}
|
36
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile
generated
vendored
Normal file
36
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
# Go support for Protocol Buffers - Google's data interchange format
|
||||
#
|
||||
# Copyright 2010 The Go Authors. All rights reserved.
|
||||
# https://github.com/golang/protobuf
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
regenerate:
|
||||
go install github.com/gogo/protobuf/protoc-gen-gogo
|
||||
go install github.com/gogo/protobuf/protoc-gen-gostring
|
||||
protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto
|
||||
protoc --gostring_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto
|
118
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go
generated
vendored
Normal file
118
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go
generated
vendored
Normal file
|
@ -0,0 +1,118 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Package descriptor provides functions for obtaining protocol buffer
|
||||
// descriptors for generated Go types.
|
||||
//
|
||||
// These functions cannot go in package proto because they depend on the
|
||||
// generated protobuf descriptor messages, which themselves depend on proto.
|
||||
package descriptor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
// extractFile extracts a FileDescriptorProto from a gzip'd buffer.
|
||||
func extractFile(gz []byte) (*FileDescriptorProto, error) {
|
||||
r, err := gzip.NewReader(bytes.NewReader(gz))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open gzip reader: %v", err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to uncompress descriptor: %v", err)
|
||||
}
|
||||
|
||||
fd := new(FileDescriptorProto)
|
||||
if err := proto.Unmarshal(b, fd); err != nil {
|
||||
return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err)
|
||||
}
|
||||
|
||||
return fd, nil
|
||||
}
|
||||
|
||||
// Message is a proto.Message with a method to return its descriptor.
|
||||
//
|
||||
// Message types generated by the protocol compiler always satisfy
|
||||
// the Message interface.
|
||||
type Message interface {
|
||||
proto.Message
|
||||
Descriptor() ([]byte, []int)
|
||||
}
|
||||
|
||||
// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it
|
||||
// describing the given message.
|
||||
func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) {
|
||||
gz, path := msg.Descriptor()
|
||||
fd, err := extractFile(gz)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err))
|
||||
}
|
||||
|
||||
md = fd.MessageType[path[0]]
|
||||
for _, i := range path[1:] {
|
||||
md = md.NestedType[i]
|
||||
}
|
||||
return fd, md
|
||||
}
|
||||
|
||||
// Is this field a scalar numeric type?
|
||||
func (field *FieldDescriptorProto) IsScalar() bool {
|
||||
if field.Type == nil {
|
||||
return false
|
||||
}
|
||||
switch *field.Type {
|
||||
case FieldDescriptorProto_TYPE_DOUBLE,
|
||||
FieldDescriptorProto_TYPE_FLOAT,
|
||||
FieldDescriptorProto_TYPE_INT64,
|
||||
FieldDescriptorProto_TYPE_UINT64,
|
||||
FieldDescriptorProto_TYPE_INT32,
|
||||
FieldDescriptorProto_TYPE_FIXED64,
|
||||
FieldDescriptorProto_TYPE_FIXED32,
|
||||
FieldDescriptorProto_TYPE_BOOL,
|
||||
FieldDescriptorProto_TYPE_UINT32,
|
||||
FieldDescriptorProto_TYPE_ENUM,
|
||||
FieldDescriptorProto_TYPE_SFIXED32,
|
||||
FieldDescriptorProto_TYPE_SFIXED64,
|
||||
FieldDescriptorProto_TYPE_SINT32,
|
||||
FieldDescriptorProto_TYPE_SINT64:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
2865
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
generated
vendored
Normal file
2865
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
752
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go
generated
vendored
Normal file
752
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go
generated
vendored
Normal file
|
@ -0,0 +1,752 @@
|
|||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: descriptor.proto
|
||||
|
||||
package descriptor
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
math "math"
|
||||
reflect "reflect"
|
||||
sort "sort"
|
||||
strconv "strconv"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
func (this *FileDescriptorSet) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 5)
|
||||
s = append(s, "&descriptor.FileDescriptorSet{")
|
||||
if this.File != nil {
|
||||
s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *FileDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 16)
|
||||
s = append(s, "&descriptor.FileDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Package != nil {
|
||||
s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n")
|
||||
}
|
||||
if this.Dependency != nil {
|
||||
s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n")
|
||||
}
|
||||
if this.PublicDependency != nil {
|
||||
s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n")
|
||||
}
|
||||
if this.WeakDependency != nil {
|
||||
s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n")
|
||||
}
|
||||
if this.MessageType != nil {
|
||||
s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n")
|
||||
}
|
||||
if this.EnumType != nil {
|
||||
s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n")
|
||||
}
|
||||
if this.Service != nil {
|
||||
s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n")
|
||||
}
|
||||
if this.Extension != nil {
|
||||
s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.SourceCodeInfo != nil {
|
||||
s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n")
|
||||
}
|
||||
if this.Syntax != nil {
|
||||
s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *DescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 14)
|
||||
s = append(s, "&descriptor.DescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Field != nil {
|
||||
s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n")
|
||||
}
|
||||
if this.Extension != nil {
|
||||
s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n")
|
||||
}
|
||||
if this.NestedType != nil {
|
||||
s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n")
|
||||
}
|
||||
if this.EnumType != nil {
|
||||
s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n")
|
||||
}
|
||||
if this.ExtensionRange != nil {
|
||||
s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n")
|
||||
}
|
||||
if this.OneofDecl != nil {
|
||||
s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.ReservedRange != nil {
|
||||
s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n")
|
||||
}
|
||||
if this.ReservedName != nil {
|
||||
s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *DescriptorProto_ExtensionRange) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 7)
|
||||
s = append(s, "&descriptor.DescriptorProto_ExtensionRange{")
|
||||
if this.Start != nil {
|
||||
s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
|
||||
}
|
||||
if this.End != nil {
|
||||
s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *DescriptorProto_ReservedRange) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&descriptor.DescriptorProto_ReservedRange{")
|
||||
if this.Start != nil {
|
||||
s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
|
||||
}
|
||||
if this.End != nil {
|
||||
s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *ExtensionRangeOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 5)
|
||||
s = append(s, "&descriptor.ExtensionRangeOptions{")
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *FieldDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 14)
|
||||
s = append(s, "&descriptor.FieldDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Number != nil {
|
||||
s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n")
|
||||
}
|
||||
if this.Label != nil {
|
||||
s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n")
|
||||
}
|
||||
if this.Type != nil {
|
||||
s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n")
|
||||
}
|
||||
if this.TypeName != nil {
|
||||
s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n")
|
||||
}
|
||||
if this.Extendee != nil {
|
||||
s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n")
|
||||
}
|
||||
if this.DefaultValue != nil {
|
||||
s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n")
|
||||
}
|
||||
if this.OneofIndex != nil {
|
||||
s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n")
|
||||
}
|
||||
if this.JsonName != nil {
|
||||
s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *OneofDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&descriptor.OneofDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *EnumDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 9)
|
||||
s = append(s, "&descriptor.EnumDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Value != nil {
|
||||
s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.ReservedRange != nil {
|
||||
s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n")
|
||||
}
|
||||
if this.ReservedName != nil {
|
||||
s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *EnumDescriptorProto_EnumReservedRange) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{")
|
||||
if this.Start != nil {
|
||||
s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
|
||||
}
|
||||
if this.End != nil {
|
||||
s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *EnumValueDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 7)
|
||||
s = append(s, "&descriptor.EnumValueDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Number != nil {
|
||||
s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *ServiceDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 7)
|
||||
s = append(s, "&descriptor.ServiceDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.Method != nil {
|
||||
s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *MethodDescriptorProto) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 10)
|
||||
s = append(s, "&descriptor.MethodDescriptorProto{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
|
||||
}
|
||||
if this.InputType != nil {
|
||||
s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n")
|
||||
}
|
||||
if this.OutputType != nil {
|
||||
s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n")
|
||||
}
|
||||
if this.Options != nil {
|
||||
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
||||
}
|
||||
if this.ClientStreaming != nil {
|
||||
s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n")
|
||||
}
|
||||
if this.ServerStreaming != nil {
|
||||
s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *FileOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 25)
|
||||
s = append(s, "&descriptor.FileOptions{")
|
||||
if this.JavaPackage != nil {
|
||||
s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n")
|
||||
}
|
||||
if this.JavaOuterClassname != nil {
|
||||
s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n")
|
||||
}
|
||||
if this.JavaMultipleFiles != nil {
|
||||
s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n")
|
||||
}
|
||||
if this.JavaGenerateEqualsAndHash != nil {
|
||||
s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n")
|
||||
}
|
||||
if this.JavaStringCheckUtf8 != nil {
|
||||
s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n")
|
||||
}
|
||||
if this.OptimizeFor != nil {
|
||||
s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n")
|
||||
}
|
||||
if this.GoPackage != nil {
|
||||
s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n")
|
||||
}
|
||||
if this.CcGenericServices != nil {
|
||||
s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n")
|
||||
}
|
||||
if this.JavaGenericServices != nil {
|
||||
s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n")
|
||||
}
|
||||
if this.PyGenericServices != nil {
|
||||
s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n")
|
||||
}
|
||||
if this.PhpGenericServices != nil {
|
||||
s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n")
|
||||
}
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.CcEnableArenas != nil {
|
||||
s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n")
|
||||
}
|
||||
if this.ObjcClassPrefix != nil {
|
||||
s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n")
|
||||
}
|
||||
if this.CsharpNamespace != nil {
|
||||
s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n")
|
||||
}
|
||||
if this.SwiftPrefix != nil {
|
||||
s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n")
|
||||
}
|
||||
if this.PhpClassPrefix != nil {
|
||||
s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n")
|
||||
}
|
||||
if this.PhpNamespace != nil {
|
||||
s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n")
|
||||
}
|
||||
if this.PhpMetadataNamespace != nil {
|
||||
s = append(s, "PhpMetadataNamespace: "+valueToGoStringDescriptor(this.PhpMetadataNamespace, "string")+",\n")
|
||||
}
|
||||
if this.RubyPackage != nil {
|
||||
s = append(s, "RubyPackage: "+valueToGoStringDescriptor(this.RubyPackage, "string")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *MessageOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 9)
|
||||
s = append(s, "&descriptor.MessageOptions{")
|
||||
if this.MessageSetWireFormat != nil {
|
||||
s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n")
|
||||
}
|
||||
if this.NoStandardDescriptorAccessor != nil {
|
||||
s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n")
|
||||
}
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.MapEntry != nil {
|
||||
s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *FieldOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 11)
|
||||
s = append(s, "&descriptor.FieldOptions{")
|
||||
if this.Ctype != nil {
|
||||
s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n")
|
||||
}
|
||||
if this.Packed != nil {
|
||||
s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n")
|
||||
}
|
||||
if this.Jstype != nil {
|
||||
s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n")
|
||||
}
|
||||
if this.Lazy != nil {
|
||||
s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n")
|
||||
}
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.Weak != nil {
|
||||
s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *OneofOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 5)
|
||||
s = append(s, "&descriptor.OneofOptions{")
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *EnumOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 7)
|
||||
s = append(s, "&descriptor.EnumOptions{")
|
||||
if this.AllowAlias != nil {
|
||||
s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n")
|
||||
}
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *EnumValueOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&descriptor.EnumValueOptions{")
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *ServiceOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&descriptor.ServiceOptions{")
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *MethodOptions) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 7)
|
||||
s = append(s, "&descriptor.MethodOptions{")
|
||||
if this.Deprecated != nil {
|
||||
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
|
||||
}
|
||||
if this.IdempotencyLevel != nil {
|
||||
s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n")
|
||||
}
|
||||
if this.UninterpretedOption != nil {
|
||||
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
|
||||
}
|
||||
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *UninterpretedOption) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 11)
|
||||
s = append(s, "&descriptor.UninterpretedOption{")
|
||||
if this.Name != nil {
|
||||
s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
|
||||
}
|
||||
if this.IdentifierValue != nil {
|
||||
s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n")
|
||||
}
|
||||
if this.PositiveIntValue != nil {
|
||||
s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n")
|
||||
}
|
||||
if this.NegativeIntValue != nil {
|
||||
s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n")
|
||||
}
|
||||
if this.DoubleValue != nil {
|
||||
s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n")
|
||||
}
|
||||
if this.StringValue != nil {
|
||||
s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n")
|
||||
}
|
||||
if this.AggregateValue != nil {
|
||||
s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *UninterpretedOption_NamePart) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&descriptor.UninterpretedOption_NamePart{")
|
||||
if this.NamePart != nil {
|
||||
s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n")
|
||||
}
|
||||
if this.IsExtension != nil {
|
||||
s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *SourceCodeInfo) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 5)
|
||||
s = append(s, "&descriptor.SourceCodeInfo{")
|
||||
if this.Location != nil {
|
||||
s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *SourceCodeInfo_Location) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 9)
|
||||
s = append(s, "&descriptor.SourceCodeInfo_Location{")
|
||||
if this.Path != nil {
|
||||
s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n")
|
||||
}
|
||||
if this.Span != nil {
|
||||
s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n")
|
||||
}
|
||||
if this.LeadingComments != nil {
|
||||
s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n")
|
||||
}
|
||||
if this.TrailingComments != nil {
|
||||
s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n")
|
||||
}
|
||||
if this.LeadingDetachedComments != nil {
|
||||
s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *GeneratedCodeInfo) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 5)
|
||||
s = append(s, "&descriptor.GeneratedCodeInfo{")
|
||||
if this.Annotation != nil {
|
||||
s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func (this *GeneratedCodeInfo_Annotation) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 8)
|
||||
s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{")
|
||||
if this.Path != nil {
|
||||
s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n")
|
||||
}
|
||||
if this.SourceFile != nil {
|
||||
s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n")
|
||||
}
|
||||
if this.Begin != nil {
|
||||
s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n")
|
||||
}
|
||||
if this.End != nil {
|
||||
s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
|
||||
}
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func valueToGoStringDescriptor(v interface{}, typ string) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||
}
|
||||
func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string {
|
||||
e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
|
||||
if e == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
|
||||
keys := make([]int, 0, len(e))
|
||||
for k := range e {
|
||||
keys = append(keys, int(k))
|
||||
}
|
||||
sort.Ints(keys)
|
||||
ss := []string{}
|
||||
for _, k := range keys {
|
||||
ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
|
||||
}
|
||||
s += strings.Join(ss, ",") + "})"
|
||||
return s
|
||||
}
|
390
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go
generated
vendored
Normal file
390
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go
generated
vendored
Normal file
|
@ -0,0 +1,390 @@
|
|||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package descriptor
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) {
|
||||
if !msg.GetOptions().GetMapEntry() {
|
||||
return nil, nil
|
||||
}
|
||||
return msg.GetField()[0], msg.GetField()[1]
|
||||
}
|
||||
|
||||
func dotToUnderscore(r rune) rune {
|
||||
if r == '.' {
|
||||
return '_'
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func (field *FieldDescriptorProto) WireType() (wire int) {
|
||||
switch *field.Type {
|
||||
case FieldDescriptorProto_TYPE_DOUBLE:
|
||||
return 1
|
||||
case FieldDescriptorProto_TYPE_FLOAT:
|
||||
return 5
|
||||
case FieldDescriptorProto_TYPE_INT64:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_UINT64:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_INT32:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_UINT32:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_FIXED64:
|
||||
return 1
|
||||
case FieldDescriptorProto_TYPE_FIXED32:
|
||||
return 5
|
||||
case FieldDescriptorProto_TYPE_BOOL:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_STRING:
|
||||
return 2
|
||||
case FieldDescriptorProto_TYPE_GROUP:
|
||||
return 2
|
||||
case FieldDescriptorProto_TYPE_MESSAGE:
|
||||
return 2
|
||||
case FieldDescriptorProto_TYPE_BYTES:
|
||||
return 2
|
||||
case FieldDescriptorProto_TYPE_ENUM:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_SFIXED32:
|
||||
return 5
|
||||
case FieldDescriptorProto_TYPE_SFIXED64:
|
||||
return 1
|
||||
case FieldDescriptorProto_TYPE_SINT32:
|
||||
return 0
|
||||
case FieldDescriptorProto_TYPE_SINT64:
|
||||
return 0
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) {
|
||||
packed := field.IsPacked()
|
||||
wireType := field.WireType()
|
||||
fieldNumber := field.GetNumber()
|
||||
if packed {
|
||||
wireType = 2
|
||||
}
|
||||
x = uint64(uint32(fieldNumber)<<3 | uint32(wireType))
|
||||
return x
|
||||
}
|
||||
|
||||
func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) {
|
||||
packed := field.IsPacked3()
|
||||
wireType := field.WireType()
|
||||
fieldNumber := field.GetNumber()
|
||||
if packed {
|
||||
wireType = 2
|
||||
}
|
||||
x = uint64(uint32(fieldNumber)<<3 | uint32(wireType))
|
||||
return x
|
||||
}
|
||||
|
||||
func (field *FieldDescriptorProto) GetKey() []byte {
|
||||
x := field.GetKeyUint64()
|
||||
i := 0
|
||||
keybuf := make([]byte, 0)
|
||||
for i = 0; x > 127; i++ {
|
||||
keybuf = append(keybuf, 0x80|uint8(x&0x7F))
|
||||
x >>= 7
|
||||
}
|
||||
keybuf = append(keybuf, uint8(x))
|
||||
return keybuf
|
||||
}
|
||||
|
||||
func (field *FieldDescriptorProto) GetKey3() []byte {
|
||||
x := field.GetKey3Uint64()
|
||||
i := 0
|
||||
keybuf := make([]byte, 0)
|
||||
for i = 0; x > 127; i++ {
|
||||
keybuf = append(keybuf, 0x80|uint8(x&0x7F))
|
||||
x >>= 7
|
||||
}
|
||||
keybuf = append(keybuf, uint8(x))
|
||||
return keybuf
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto {
|
||||
msg := desc.GetMessage(packageName, messageName)
|
||||
if msg == nil {
|
||||
return nil
|
||||
}
|
||||
for _, field := range msg.GetField() {
|
||||
if field.GetName() == fieldName {
|
||||
return field
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto {
|
||||
for _, msg := range file.GetMessageType() {
|
||||
if msg.GetName() == typeName {
|
||||
return msg
|
||||
}
|
||||
nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+"."))
|
||||
if nes != nil {
|
||||
return nes
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto {
|
||||
for _, nes := range msg.GetNestedType() {
|
||||
if nes.GetName() == typeName {
|
||||
return nes
|
||||
}
|
||||
res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+"."))
|
||||
if res != nil {
|
||||
return res
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto {
|
||||
for _, file := range desc.GetFile() {
|
||||
if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
|
||||
continue
|
||||
}
|
||||
for _, msg := range file.GetMessageType() {
|
||||
if msg.GetName() == typeName {
|
||||
return msg
|
||||
}
|
||||
}
|
||||
for _, msg := range file.GetMessageType() {
|
||||
for _, nes := range msg.GetNestedType() {
|
||||
if nes.GetName() == typeName {
|
||||
return nes
|
||||
}
|
||||
if msg.GetName()+"."+nes.GetName() == typeName {
|
||||
return nes
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool {
|
||||
for _, file := range desc.GetFile() {
|
||||
if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
|
||||
continue
|
||||
}
|
||||
for _, msg := range file.GetMessageType() {
|
||||
if msg.GetName() == typeName {
|
||||
return file.GetSyntax() == "proto3"
|
||||
}
|
||||
}
|
||||
for _, msg := range file.GetMessageType() {
|
||||
for _, nes := range msg.GetNestedType() {
|
||||
if nes.GetName() == typeName {
|
||||
return file.GetSyntax() == "proto3"
|
||||
}
|
||||
if msg.GetName()+"."+nes.GetName() == typeName {
|
||||
return file.GetSyntax() == "proto3"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (msg *DescriptorProto) IsExtendable() bool {
|
||||
return len(msg.GetExtensionRange()) > 0
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) {
|
||||
parent := desc.GetMessage(packageName, typeName)
|
||||
if parent == nil {
|
||||
return "", nil
|
||||
}
|
||||
if !parent.IsExtendable() {
|
||||
return "", nil
|
||||
}
|
||||
extendee := "." + packageName + "." + typeName
|
||||
for _, file := range desc.GetFile() {
|
||||
for _, ext := range file.GetExtension() {
|
||||
if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) {
|
||||
if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if ext.GetExtendee() != extendee {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if ext.GetName() == fieldName {
|
||||
return file.GetPackage(), ext
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) {
|
||||
parent := desc.GetMessage(packageName, typeName)
|
||||
if parent == nil {
|
||||
return "", nil
|
||||
}
|
||||
if !parent.IsExtendable() {
|
||||
return "", nil
|
||||
}
|
||||
extendee := "." + packageName + "." + typeName
|
||||
for _, file := range desc.GetFile() {
|
||||
for _, ext := range file.GetExtension() {
|
||||
if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) {
|
||||
if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if ext.GetExtendee() != extendee {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if ext.GetNumber() == fieldNum {
|
||||
return file.GetPackage(), ext
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) {
|
||||
parent := desc.GetMessage(packageName, typeName)
|
||||
if parent == nil {
|
||||
return "", ""
|
||||
}
|
||||
field := parent.GetFieldDescriptor(fieldName)
|
||||
if field == nil {
|
||||
var extPackageName string
|
||||
extPackageName, field = desc.FindExtension(packageName, typeName, fieldName)
|
||||
if field == nil {
|
||||
return "", ""
|
||||
}
|
||||
packageName = extPackageName
|
||||
}
|
||||
typeNames := strings.Split(field.GetTypeName(), ".")
|
||||
if len(typeNames) == 1 {
|
||||
msg := desc.GetMessage(packageName, typeName)
|
||||
if msg == nil {
|
||||
return "", ""
|
||||
}
|
||||
return packageName, msg.GetName()
|
||||
}
|
||||
if len(typeNames) > 2 {
|
||||
for i := 1; i < len(typeNames)-1; i++ {
|
||||
packageName = strings.Join(typeNames[1:len(typeNames)-i], ".")
|
||||
typeName = strings.Join(typeNames[len(typeNames)-i:], ".")
|
||||
msg := desc.GetMessage(packageName, typeName)
|
||||
if msg != nil {
|
||||
typeNames := strings.Split(msg.GetName(), ".")
|
||||
if len(typeNames) == 1 {
|
||||
return packageName, msg.GetName()
|
||||
}
|
||||
return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1]
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
|
||||
func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto {
|
||||
for _, field := range msg.GetField() {
|
||||
if field.GetName() == fieldName {
|
||||
return field
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto {
|
||||
for _, file := range desc.GetFile() {
|
||||
if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
|
||||
continue
|
||||
}
|
||||
for _, enum := range file.GetEnumType() {
|
||||
if enum.GetName() == typeName {
|
||||
return enum
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsEnum() bool {
|
||||
return *f.Type == FieldDescriptorProto_TYPE_ENUM
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsMessage() bool {
|
||||
return *f.Type == FieldDescriptorProto_TYPE_MESSAGE
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsBytes() bool {
|
||||
return *f.Type == FieldDescriptorProto_TYPE_BYTES
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsRepeated() bool {
|
||||
return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsString() bool {
|
||||
return *f.Type == FieldDescriptorProto_TYPE_STRING
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsBool() bool {
|
||||
return *f.Type == FieldDescriptorProto_TYPE_BOOL
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsRequired() bool {
|
||||
return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsPacked() bool {
|
||||
return f.Options != nil && f.GetOptions().GetPacked()
|
||||
}
|
||||
|
||||
func (f *FieldDescriptorProto) IsPacked3() bool {
|
||||
if f.IsRepeated() && f.IsScalar() {
|
||||
if f.Options == nil || f.GetOptions().Packed == nil {
|
||||
return true
|
||||
}
|
||||
return f.Options != nil && f.GetOptions().GetPacked()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *DescriptorProto) HasExtension() bool {
|
||||
return len(m.ExtensionRange) > 0
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
*.swp
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License
|
||||
|
||||
Copyright (c) 2016 Juan Batiz-Benet
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
|
@ -0,0 +1,9 @@
|
|||
export IPFS_API ?= v04x.ipfs.io
|
||||
|
||||
gx:
|
||||
go get -u github.com/whyrusleeping/gx
|
||||
go get -u github.com/whyrusleeping/gx-go
|
||||
|
||||
deps: gx
|
||||
gx --verbose install --global
|
||||
gx-go rewrite
|
|
@ -0,0 +1,47 @@
|
|||
# go-datastore
|
||||
|
||||
[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io)
|
||||
[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/)
|
||||
[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs)
|
||||
[![standard-readme compliant](https://img.shields.io/badge/standard--readme-OK-green.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme)
|
||||
[![GoDoc](https://godoc.org/github.com/ipfs/go-datastore?status.svg)](https://godoc.org/github.com/ipfs/go-datastore)
|
||||
|
||||
> key-value datastore interfaces
|
||||
|
||||
## Lead Maintainer
|
||||
|
||||
[Steven Allen](https://github.com/Stebalien)
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Background](#background)
|
||||
- [Documentation](#documentation)
|
||||
- [Contribute](#contribute)
|
||||
- [License](#license)
|
||||
|
||||
## Background
|
||||
|
||||
Datastore is a generic layer of abstraction for data store and database access. It is a simple API with the aim to enable application development in a datastore-agnostic way, allowing datastores to be swapped seamlessly without changing application code. Thus, one can leverage different datastores with different strengths without committing the application to one datastore throughout its lifetime.
|
||||
|
||||
In addition, grouped datastores significantly simplify interesting data access patterns (such as caching and sharding).
|
||||
|
||||
Based on [datastore.py](https://github.com/datastore/datastore).
|
||||
|
||||
## Documentation
|
||||
|
||||
https://godoc.org/github.com/ipfs/go-datastore
|
||||
|
||||
## Contribute
|
||||
|
||||
Feel free to join in. All welcome. Open an [issue](https://github.com/ipfs/go-datastore/issues)!
|
||||
|
||||
This repository falls under the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md).
|
||||
|
||||
### Want to hack on IPFS?
|
||||
|
||||
[![](https://cdn.rawgit.com/jbenet/contribute-ipfs-gif/master/img/contribute.gif)](https://github.com/ipfs/community/blob/master/contributing.md)
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
|
|
@ -0,0 +1,287 @@
|
|||
package datastore
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
dsq "github.com/ipfs/go-datastore/query"
|
||||
)
|
||||
|
||||
// Here are some basic datastore implementations.
|
||||
|
||||
// MapDatastore uses a standard Go map for internal storage.
|
||||
type MapDatastore struct {
|
||||
values map[Key][]byte
|
||||
}
|
||||
|
||||
// NewMapDatastore constructs a MapDatastore. It is _not_ thread-safe by
|
||||
// default, wrap using sync.MutexWrap if you need thread safety (the answer here
|
||||
// is usually yes).
|
||||
func NewMapDatastore() (d *MapDatastore) {
|
||||
return &MapDatastore{
|
||||
values: make(map[Key][]byte),
|
||||
}
|
||||
}
|
||||
|
||||
// Put implements Datastore.Put
|
||||
func (d *MapDatastore) Put(key Key, value []byte) (err error) {
|
||||
d.values[key] = value
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sync implements Datastore.Sync
|
||||
func (d *MapDatastore) Sync(prefix Key) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get implements Datastore.Get
|
||||
func (d *MapDatastore) Get(key Key) (value []byte, err error) {
|
||||
val, found := d.values[key]
|
||||
if !found {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// Has implements Datastore.Has
|
||||
func (d *MapDatastore) Has(key Key) (exists bool, err error) {
|
||||
_, found := d.values[key]
|
||||
return found, nil
|
||||
}
|
||||
|
||||
// GetSize implements Datastore.GetSize
|
||||
func (d *MapDatastore) GetSize(key Key) (size int, err error) {
|
||||
if v, found := d.values[key]; found {
|
||||
return len(v), nil
|
||||
}
|
||||
return -1, ErrNotFound
|
||||
}
|
||||
|
||||
// Delete implements Datastore.Delete
|
||||
func (d *MapDatastore) Delete(key Key) (err error) {
|
||||
delete(d.values, key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Query implements Datastore.Query
|
||||
func (d *MapDatastore) Query(q dsq.Query) (dsq.Results, error) {
|
||||
re := make([]dsq.Entry, 0, len(d.values))
|
||||
for k, v := range d.values {
|
||||
e := dsq.Entry{Key: k.String(), Size: len(v)}
|
||||
if !q.KeysOnly {
|
||||
e.Value = v
|
||||
}
|
||||
re = append(re, e)
|
||||
}
|
||||
r := dsq.ResultsWithEntries(q, re)
|
||||
r = dsq.NaiveQueryApply(q, r)
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (d *MapDatastore) Batch() (Batch, error) {
|
||||
return NewBasicBatch(d), nil
|
||||
}
|
||||
|
||||
func (d *MapDatastore) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NullDatastore stores nothing, but conforms to the API.
|
||||
// Useful to test with.
|
||||
type NullDatastore struct {
|
||||
}
|
||||
|
||||
// NewNullDatastore constructs a null datastoe
|
||||
func NewNullDatastore() *NullDatastore {
|
||||
return &NullDatastore{}
|
||||
}
|
||||
|
||||
// Put implements Datastore.Put
|
||||
func (d *NullDatastore) Put(key Key, value []byte) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sync implements Datastore.Sync
|
||||
func (d *NullDatastore) Sync(prefix Key) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get implements Datastore.Get
|
||||
func (d *NullDatastore) Get(key Key) (value []byte, err error) {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
|
||||
// Has implements Datastore.Has
|
||||
func (d *NullDatastore) Has(key Key) (exists bool, err error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Has implements Datastore.GetSize
|
||||
func (d *NullDatastore) GetSize(key Key) (size int, err error) {
|
||||
return -1, ErrNotFound
|
||||
}
|
||||
|
||||
// Delete implements Datastore.Delete
|
||||
func (d *NullDatastore) Delete(key Key) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Query implements Datastore.Query
|
||||
func (d *NullDatastore) Query(q dsq.Query) (dsq.Results, error) {
|
||||
return dsq.ResultsWithEntries(q, nil), nil
|
||||
}
|
||||
|
||||
func (d *NullDatastore) Batch() (Batch, error) {
|
||||
return NewBasicBatch(d), nil
|
||||
}
|
||||
|
||||
func (d *NullDatastore) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// LogDatastore logs all accesses through the datastore.
|
||||
type LogDatastore struct {
|
||||
Name string
|
||||
child Datastore
|
||||
}
|
||||
|
||||
// Shim is a datastore which has a child.
|
||||
type Shim interface {
|
||||
Datastore
|
||||
|
||||
Children() []Datastore
|
||||
}
|
||||
|
||||
// NewLogDatastore constructs a log datastore.
|
||||
func NewLogDatastore(ds Datastore, name string) *LogDatastore {
|
||||
if len(name) < 1 {
|
||||
name = "LogDatastore"
|
||||
}
|
||||
return &LogDatastore{Name: name, child: ds}
|
||||
}
|
||||
|
||||
// Children implements Shim
|
||||
func (d *LogDatastore) Children() []Datastore {
|
||||
return []Datastore{d.child}
|
||||
}
|
||||
|
||||
// Put implements Datastore.Put
|
||||
func (d *LogDatastore) Put(key Key, value []byte) (err error) {
|
||||
log.Printf("%s: Put %s\n", d.Name, key)
|
||||
// log.Printf("%s: Put %s ```%s```", d.Name, key, value)
|
||||
return d.child.Put(key, value)
|
||||
}
|
||||
|
||||
// Sync implements Datastore.Sync
|
||||
func (d *LogDatastore) Sync(prefix Key) error {
|
||||
log.Printf("%s: Sync %s\n", d.Name, prefix)
|
||||
return d.child.Sync(prefix)
|
||||
}
|
||||
|
||||
// Get implements Datastore.Get
|
||||
func (d *LogDatastore) Get(key Key) (value []byte, err error) {
|
||||
log.Printf("%s: Get %s\n", d.Name, key)
|
||||
return d.child.Get(key)
|
||||
}
|
||||
|
||||
// Has implements Datastore.Has
|
||||
func (d *LogDatastore) Has(key Key) (exists bool, err error) {
|
||||
log.Printf("%s: Has %s\n", d.Name, key)
|
||||
return d.child.Has(key)
|
||||
}
|
||||
|
||||
// GetSize implements Datastore.GetSize
|
||||
func (d *LogDatastore) GetSize(key Key) (size int, err error) {
|
||||
log.Printf("%s: GetSize %s\n", d.Name, key)
|
||||
return d.child.GetSize(key)
|
||||
}
|
||||
|
||||
// Delete implements Datastore.Delete
|
||||
func (d *LogDatastore) Delete(key Key) (err error) {
|
||||
log.Printf("%s: Delete %s\n", d.Name, key)
|
||||
return d.child.Delete(key)
|
||||
}
|
||||
|
||||
// DiskUsage implements the PersistentDatastore interface.
|
||||
func (d *LogDatastore) DiskUsage() (uint64, error) {
|
||||
log.Printf("%s: DiskUsage\n", d.Name)
|
||||
return DiskUsage(d.child)
|
||||
}
|
||||
|
||||
// Query implements Datastore.Query
|
||||
func (d *LogDatastore) Query(q dsq.Query) (dsq.Results, error) {
|
||||
log.Printf("%s: Query\n", d.Name)
|
||||
log.Printf("%s: q.Prefix: %s\n", d.Name, q.Prefix)
|
||||
log.Printf("%s: q.KeysOnly: %v\n", d.Name, q.KeysOnly)
|
||||
log.Printf("%s: q.Filters: %d\n", d.Name, len(q.Filters))
|
||||
log.Printf("%s: q.Orders: %d\n", d.Name, len(q.Orders))
|
||||
log.Printf("%s: q.Offset: %d\n", d.Name, q.Offset)
|
||||
|
||||
return d.child.Query(q)
|
||||
}
|
||||
|
||||
// LogBatch logs all accesses through the batch.
|
||||
type LogBatch struct {
|
||||
Name string
|
||||
child Batch
|
||||
}
|
||||
|
||||
func (d *LogDatastore) Batch() (Batch, error) {
|
||||
log.Printf("%s: Batch\n", d.Name)
|
||||
if bds, ok := d.child.(Batching); ok {
|
||||
b, err := bds.Batch()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &LogBatch{
|
||||
Name: d.Name,
|
||||
child: b,
|
||||
}, nil
|
||||
}
|
||||
return nil, ErrBatchUnsupported
|
||||
}
|
||||
|
||||
// Put implements Batch.Put
|
||||
func (d *LogBatch) Put(key Key, value []byte) (err error) {
|
||||
log.Printf("%s: BatchPut %s\n", d.Name, key)
|
||||
// log.Printf("%s: Put %s ```%s```", d.Name, key, value)
|
||||
return d.child.Put(key, value)
|
||||
}
|
||||
|
||||
// Delete implements Batch.Delete
|
||||
func (d *LogBatch) Delete(key Key) (err error) {
|
||||
log.Printf("%s: BatchDelete %s\n", d.Name, key)
|
||||
return d.child.Delete(key)
|
||||
}
|
||||
|
||||
// Commit implements Batch.Commit
|
||||
func (d *LogBatch) Commit() (err error) {
|
||||
log.Printf("%s: BatchCommit\n", d.Name)
|
||||
return d.child.Commit()
|
||||
}
|
||||
|
||||
func (d *LogDatastore) Close() error {
|
||||
log.Printf("%s: Close\n", d.Name)
|
||||
return d.child.Close()
|
||||
}
|
||||
|
||||
func (d *LogDatastore) Check() error {
|
||||
if c, ok := d.child.(CheckedDatastore); ok {
|
||||
return c.Check()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *LogDatastore) Scrub() error {
|
||||
if c, ok := d.child.(ScrubbedDatastore); ok {
|
||||
return c.Scrub()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *LogDatastore) CollectGarbage() error {
|
||||
if c, ok := d.child.(GCDatastore); ok {
|
||||
return c.CollectGarbage()
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
package datastore
|
||||
|
||||
type op struct {
|
||||
delete bool
|
||||
value []byte
|
||||
}
|
||||
|
||||
// basicBatch implements the transaction interface for datastores who do
|
||||
// not have any sort of underlying transactional support
|
||||
type basicBatch struct {
|
||||
ops map[Key]op
|
||||
|
||||
target Datastore
|
||||
}
|
||||
|
||||
func NewBasicBatch(ds Datastore) Batch {
|
||||
return &basicBatch{
|
||||
ops: make(map[Key]op),
|
||||
target: ds,
|
||||
}
|
||||
}
|
||||
|
||||
func (bt *basicBatch) Put(key Key, val []byte) error {
|
||||
bt.ops[key] = op{value: val}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bt *basicBatch) Delete(key Key) error {
|
||||
bt.ops[key] = op{delete: true}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bt *basicBatch) Commit() error {
|
||||
var err error
|
||||
for k, op := range bt.ops {
|
||||
if op.delete {
|
||||
err = bt.target.Delete(k)
|
||||
} else {
|
||||
err = bt.target.Put(k, op.value)
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
|
@ -0,0 +1,251 @@
|
|||
package datastore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
query "github.com/ipfs/go-datastore/query"
|
||||
)
|
||||
|
||||
/*
|
||||
Datastore represents storage for any key-value pair.
|
||||
|
||||
Datastores are general enough to be backed by all kinds of different storage:
|
||||
in-memory caches, databases, a remote datastore, flat files on disk, etc.
|
||||
|
||||
The general idea is to wrap a more complicated storage facility in a simple,
|
||||
uniform interface, keeping the freedom of using the right tools for the job.
|
||||
In particular, a Datastore can aggregate other datastores in interesting ways,
|
||||
like sharded (to distribute load) or tiered access (caches before databases).
|
||||
|
||||
While Datastores should be written general enough to accept all sorts of
|
||||
values, some implementations will undoubtedly have to be specific (e.g. SQL
|
||||
databases where fields should be decomposed into columns), particularly to
|
||||
support queries efficiently. Moreover, certain datastores may enforce certain
|
||||
types of values (e.g. requiring an io.Reader, a specific struct, etc) or
|
||||
serialization formats (JSON, Protobufs, etc).
|
||||
|
||||
IMPORTANT: No Datastore should ever Panic! This is a cross-module interface,
|
||||
and thus it should behave predictably and handle exceptional conditions with
|
||||
proper error reporting. Thus, all Datastore calls may return errors, which
|
||||
should be checked by callers.
|
||||
*/
|
||||
type Datastore interface {
|
||||
Read
|
||||
Write
|
||||
// Sync guarantees that any Put or Delete calls under prefix that returned
|
||||
// before Sync(prefix) was called will be observed after Sync(prefix)
|
||||
// returns, even if the program crashes. If Put/Delete operations already
|
||||
// satisfy these requirements then Sync may be a no-op.
|
||||
//
|
||||
// If the prefix fails to Sync this method returns an error.
|
||||
Sync(prefix Key) error
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// Write is the write-side of the Datastore interface.
|
||||
type Write interface {
|
||||
// Put stores the object `value` named by `key`.
|
||||
//
|
||||
// The generalized Datastore interface does not impose a value type,
|
||||
// allowing various datastore middleware implementations (which do not
|
||||
// handle the values directly) to be composed together.
|
||||
//
|
||||
// Ultimately, the lowest-level datastore will need to do some value checking
|
||||
// or risk getting incorrect values. It may also be useful to expose a more
|
||||
// type-safe interface to your application, and do the checking up-front.
|
||||
Put(key Key, value []byte) error
|
||||
|
||||
// Delete removes the value for given `key`. If the key is not in the
|
||||
// datastore, this method returns no error.
|
||||
Delete(key Key) error
|
||||
}
|
||||
|
||||
// Read is the read-side of the Datastore interface.
|
||||
type Read interface {
|
||||
// Get retrieves the object `value` named by `key`.
|
||||
// Get will return ErrNotFound if the key is not mapped to a value.
|
||||
Get(key Key) (value []byte, err error)
|
||||
|
||||
// Has returns whether the `key` is mapped to a `value`.
|
||||
// In some contexts, it may be much cheaper only to check for existence of
|
||||
// a value, rather than retrieving the value itself. (e.g. HTTP HEAD).
|
||||
// The default implementation is found in `GetBackedHas`.
|
||||
Has(key Key) (exists bool, err error)
|
||||
|
||||
// GetSize returns the size of the `value` named by `key`.
|
||||
// In some contexts, it may be much cheaper to only get the size of the
|
||||
// value rather than retrieving the value itself.
|
||||
GetSize(key Key) (size int, err error)
|
||||
|
||||
// Query searches the datastore and returns a query result. This function
|
||||
// may return before the query actually runs. To wait for the query:
|
||||
//
|
||||
// result, _ := ds.Query(q)
|
||||
//
|
||||
// // use the channel interface; result may come in at different times
|
||||
// for entry := range result.Next() { ... }
|
||||
//
|
||||
// // or wait for the query to be completely done
|
||||
// entries, _ := result.Rest()
|
||||
// for entry := range entries { ... }
|
||||
//
|
||||
Query(q query.Query) (query.Results, error)
|
||||
}
|
||||
|
||||
// Batching datastores support deferred, grouped updates to the database.
|
||||
// `Batch`es do NOT have transactional semantics: updates to the underlying
|
||||
// datastore are not guaranteed to occur in the same iota of time. Similarly,
|
||||
// batched updates will not be flushed to the underlying datastore until
|
||||
// `Commit` has been called. `Txn`s from a `TxnDatastore` have all the
|
||||
// capabilities of a `Batch`, but the reverse is NOT true.
|
||||
type Batching interface {
|
||||
Datastore
|
||||
|
||||
Batch() (Batch, error)
|
||||
}
|
||||
|
||||
// ErrBatchUnsupported is returned if the by Batch if the Datastore doesn't
|
||||
// actually support batching.
|
||||
var ErrBatchUnsupported = errors.New("this datastore does not support batching")
|
||||
|
||||
// CheckedDatastore is an interface that should be implemented by datastores
|
||||
// which may need checking on-disk data integrity.
|
||||
type CheckedDatastore interface {
|
||||
Datastore
|
||||
|
||||
Check() error
|
||||
}
|
||||
|
||||
// ScrubbedDatastore is an interface that should be implemented by datastores
|
||||
// which want to provide a mechanism to check data integrity and/or
|
||||
// error correction.
|
||||
type ScrubbedDatastore interface {
|
||||
Datastore
|
||||
|
||||
Scrub() error
|
||||
}
|
||||
|
||||
// GCDatastore is an interface that should be implemented by datastores which
|
||||
// don't free disk space by just removing data from them.
|
||||
type GCDatastore interface {
|
||||
Datastore
|
||||
|
||||
CollectGarbage() error
|
||||
}
|
||||
|
||||
// PersistentDatastore is an interface that should be implemented by datastores
|
||||
// which can report disk usage.
|
||||
type PersistentDatastore interface {
|
||||
Datastore
|
||||
|
||||
// DiskUsage returns the space used by a datastore, in bytes.
|
||||
DiskUsage() (uint64, error)
|
||||
}
|
||||
|
||||
// DiskUsage checks if a Datastore is a
|
||||
// PersistentDatastore and returns its DiskUsage(),
|
||||
// otherwise returns 0.
|
||||
func DiskUsage(d Datastore) (uint64, error) {
|
||||
persDs, ok := d.(PersistentDatastore)
|
||||
if !ok {
|
||||
return 0, nil
|
||||
}
|
||||
return persDs.DiskUsage()
|
||||
}
|
||||
|
||||
// TTLDatastore is an interface that should be implemented by datastores that
|
||||
// support expiring entries.
|
||||
type TTLDatastore interface {
|
||||
Datastore
|
||||
TTL
|
||||
}
|
||||
|
||||
// TTL encapulates the methods that deal with entries with time-to-live.
|
||||
type TTL interface {
|
||||
PutWithTTL(key Key, value []byte, ttl time.Duration) error
|
||||
SetTTL(key Key, ttl time.Duration) error
|
||||
GetExpiration(key Key) (time.Time, error)
|
||||
}
|
||||
|
||||
// Txn extends the Datastore type. Txns allow users to batch queries and
|
||||
// mutations to the Datastore into atomic groups, or transactions. Actions
|
||||
// performed on a transaction will not take hold until a successful call to
|
||||
// Commit has been made. Likewise, transactions can be aborted by calling
|
||||
// Discard before a successful Commit has been made.
|
||||
type Txn interface {
|
||||
Read
|
||||
Write
|
||||
|
||||
// Commit finalizes a transaction, attempting to commit it to the Datastore.
|
||||
// May return an error if the transaction has gone stale. The presence of an
|
||||
// error is an indication that the data was not committed to the Datastore.
|
||||
Commit() error
|
||||
// Discard throws away changes recorded in a transaction without committing
|
||||
// them to the underlying Datastore. Any calls made to Discard after Commit
|
||||
// has been successfully called will have no effect on the transaction and
|
||||
// state of the Datastore, making it safe to defer.
|
||||
Discard()
|
||||
}
|
||||
|
||||
// TxnDatastore is an interface that should be implemented by datastores that
|
||||
// support transactions.
|
||||
type TxnDatastore interface {
|
||||
Datastore
|
||||
|
||||
NewTransaction(readOnly bool) (Txn, error)
|
||||
}
|
||||
|
||||
// Errors
|
||||
|
||||
type dsError struct {
|
||||
error
|
||||
isNotFound bool
|
||||
}
|
||||
|
||||
func (e *dsError) NotFound() bool {
|
||||
return e.isNotFound
|
||||
}
|
||||
|
||||
// ErrNotFound is returned by Get and GetSize when a datastore does not map the
|
||||
// given key to a value.
|
||||
var ErrNotFound error = &dsError{error: errors.New("datastore: key not found"), isNotFound: true}
|
||||
|
||||
// GetBackedHas provides a default Datastore.Has implementation.
|
||||
// It exists so Datastore.Has implementations can use it, like so:
|
||||
//
|
||||
// func (*d SomeDatastore) Has(key Key) (exists bool, err error) {
|
||||
// return GetBackedHas(d, key)
|
||||
// }
|
||||
func GetBackedHas(ds Read, key Key) (bool, error) {
|
||||
_, err := ds.Get(key)
|
||||
switch err {
|
||||
case nil:
|
||||
return true, nil
|
||||
case ErrNotFound:
|
||||
return false, nil
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
// GetBackedSize provides a default Datastore.GetSize implementation.
|
||||
// It exists so Datastore.GetSize implementations can use it, like so:
|
||||
//
|
||||
// func (*d SomeDatastore) GetSize(key Key) (size int, err error) {
|
||||
// return GetBackedSize(d, key)
|
||||
// }
|
||||
func GetBackedSize(ds Read, key Key) (int, error) {
|
||||
value, err := ds.Get(key)
|
||||
if err == nil {
|
||||
return len(value), nil
|
||||
}
|
||||
return -1, err
|
||||
}
|
||||
|
||||
type Batch interface {
|
||||
Write
|
||||
|
||||
Commit() error
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
module github.com/ipfs/go-datastore
|
||||
|
||||
require (
|
||||
github.com/google/uuid v1.1.1
|
||||
github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8
|
||||
github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8
|
||||
github.com/kr/pretty v0.1.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15
|
||||
)
|
||||
|
||||
go 1.12
|
|
@ -0,0 +1,16 @@
|
|||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8 h1:NAviDvJ0WXgD+yiL2Rj35AmnfgI11+pHXbdciD917U0=
|
||||
github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=
|
||||
github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8 h1:bspPhN+oKYFk5fcGNuQzp6IGzYQSenLEgH3s6jkXrWw=
|
||||
github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
@ -0,0 +1,309 @@
|
|||
package datastore
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
dsq "github.com/ipfs/go-datastore/query"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
/*
|
||||
A Key represents the unique identifier of an object.
|
||||
Our Key scheme is inspired by file systems and Google App Engine key model.
|
||||
|
||||
Keys are meant to be unique across a system. Keys are hierarchical,
|
||||
incorporating more and more specific namespaces. Thus keys can be deemed
|
||||
'children' or 'ancestors' of other keys::
|
||||
|
||||
Key("/Comedy")
|
||||
Key("/Comedy/MontyPython")
|
||||
|
||||
Also, every namespace can be parametrized to embed relevant object
|
||||
information. For example, the Key `name` (most specific namespace) could
|
||||
include the object type::
|
||||
|
||||
Key("/Comedy/MontyPython/Actor:JohnCleese")
|
||||
Key("/Comedy/MontyPython/Sketch:CheeseShop")
|
||||
Key("/Comedy/MontyPython/Sketch:CheeseShop/Character:Mousebender")
|
||||
|
||||
*/
|
||||
type Key struct {
|
||||
string
|
||||
}
|
||||
|
||||
// NewKey constructs a key from string. it will clean the value.
|
||||
func NewKey(s string) Key {
|
||||
k := Key{s}
|
||||
k.Clean()
|
||||
return k
|
||||
}
|
||||
|
||||
// RawKey creates a new Key without safety checking the input. Use with care.
|
||||
func RawKey(s string) Key {
|
||||
// accept an empty string and fix it to avoid special cases
|
||||
// elsewhere
|
||||
if len(s) == 0 {
|
||||
return Key{"/"}
|
||||
}
|
||||
|
||||
// perform a quick sanity check that the key is in the correct
|
||||
// format, if it is not then it is a programmer error and it is
|
||||
// okay to panic
|
||||
if len(s) == 0 || s[0] != '/' || (len(s) > 1 && s[len(s)-1] == '/') {
|
||||
panic("invalid datastore key: " + s)
|
||||
}
|
||||
|
||||
return Key{s}
|
||||
}
|
||||
|
||||
// KeyWithNamespaces constructs a key out of a namespace slice.
|
||||
func KeyWithNamespaces(ns []string) Key {
|
||||
return NewKey(strings.Join(ns, "/"))
|
||||
}
|
||||
|
||||
// Clean up a Key, using path.Clean.
|
||||
func (k *Key) Clean() {
|
||||
switch {
|
||||
case len(k.string) == 0:
|
||||
k.string = "/"
|
||||
case k.string[0] == '/':
|
||||
k.string = path.Clean(k.string)
|
||||
default:
|
||||
k.string = path.Clean("/" + k.string)
|
||||
}
|
||||
}
|
||||
|
||||
// Strings is the string value of Key
|
||||
func (k Key) String() string {
|
||||
return k.string
|
||||
}
|
||||
|
||||
// Bytes returns the string value of Key as a []byte
|
||||
func (k Key) Bytes() []byte {
|
||||
return []byte(k.string)
|
||||
}
|
||||
|
||||
// Equal checks equality of two keys
|
||||
func (k Key) Equal(k2 Key) bool {
|
||||
return k.string == k2.string
|
||||
}
|
||||
|
||||
// Less checks whether this key is sorted lower than another.
|
||||
func (k Key) Less(k2 Key) bool {
|
||||
list1 := k.List()
|
||||
list2 := k2.List()
|
||||
for i, c1 := range list1 {
|
||||
if len(list2) < (i + 1) {
|
||||
return false
|
||||
}
|
||||
|
||||
c2 := list2[i]
|
||||
if c1 < c2 {
|
||||
return true
|
||||
} else if c1 > c2 {
|
||||
return false
|
||||
}
|
||||
// c1 == c2, continue
|
||||
}
|
||||
|
||||
// list1 is shorter or exactly the same.
|
||||
return len(list1) < len(list2)
|
||||
}
|
||||
|
||||
// List returns the `list` representation of this Key.
|
||||
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").List()
|
||||
// ["Comedy", "MontyPythong", "Actor:JohnCleese"]
|
||||
func (k Key) List() []string {
|
||||
return strings.Split(k.string, "/")[1:]
|
||||
}
|
||||
|
||||
// Reverse returns the reverse of this Key.
|
||||
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Reverse()
|
||||
// NewKey("/Actor:JohnCleese/MontyPython/Comedy")
|
||||
func (k Key) Reverse() Key {
|
||||
l := k.List()
|
||||
r := make([]string, len(l))
|
||||
for i, e := range l {
|
||||
r[len(l)-i-1] = e
|
||||
}
|
||||
return KeyWithNamespaces(r)
|
||||
}
|
||||
|
||||
// Namespaces returns the `namespaces` making up this Key.
|
||||
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Namespaces()
|
||||
// ["Comedy", "MontyPython", "Actor:JohnCleese"]
|
||||
func (k Key) Namespaces() []string {
|
||||
return k.List()
|
||||
}
|
||||
|
||||
// BaseNamespace returns the "base" namespace of this key (path.Base(filename))
|
||||
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").BaseNamespace()
|
||||
// "Actor:JohnCleese"
|
||||
func (k Key) BaseNamespace() string {
|
||||
n := k.Namespaces()
|
||||
return n[len(n)-1]
|
||||
}
|
||||
|
||||
// Type returns the "type" of this key (value of last namespace).
|
||||
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Type()
|
||||
// "Actor"
|
||||
func (k Key) Type() string {
|
||||
return NamespaceType(k.BaseNamespace())
|
||||
}
|
||||
|
||||
// Name returns the "name" of this key (field of last namespace).
|
||||
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Name()
|
||||
// "JohnCleese"
|
||||
func (k Key) Name() string {
|
||||
return NamespaceValue(k.BaseNamespace())
|
||||
}
|
||||
|
||||
// Instance returns an "instance" of this type key (appends value to namespace).
|
||||
// NewKey("/Comedy/MontyPython/Actor").Instance("JohnClesse")
|
||||
// NewKey("/Comedy/MontyPython/Actor:JohnCleese")
|
||||
func (k Key) Instance(s string) Key {
|
||||
return NewKey(k.string + ":" + s)
|
||||
}
|
||||
|
||||
// Path returns the "path" of this key (parent + type).
|
||||
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Path()
|
||||
// NewKey("/Comedy/MontyPython/Actor")
|
||||
func (k Key) Path() Key {
|
||||
s := k.Parent().string + "/" + NamespaceType(k.BaseNamespace())
|
||||
return NewKey(s)
|
||||
}
|
||||
|
||||
// Parent returns the `parent` Key of this Key.
|
||||
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Parent()
|
||||
// NewKey("/Comedy/MontyPython")
|
||||
func (k Key) Parent() Key {
|
||||
n := k.List()
|
||||
if len(n) == 1 {
|
||||
return RawKey("/")
|
||||
}
|
||||
return NewKey(strings.Join(n[:len(n)-1], "/"))
|
||||
}
|
||||
|
||||
// Child returns the `child` Key of this Key.
|
||||
// NewKey("/Comedy/MontyPython").Child(NewKey("Actor:JohnCleese"))
|
||||
// NewKey("/Comedy/MontyPython/Actor:JohnCleese")
|
||||
func (k Key) Child(k2 Key) Key {
|
||||
switch {
|
||||
case k.string == "/":
|
||||
return k2
|
||||
case k2.string == "/":
|
||||
return k
|
||||
default:
|
||||
return RawKey(k.string + k2.string)
|
||||
}
|
||||
}
|
||||
|
||||
// ChildString returns the `child` Key of this Key -- string helper.
|
||||
// NewKey("/Comedy/MontyPython").ChildString("Actor:JohnCleese")
|
||||
// NewKey("/Comedy/MontyPython/Actor:JohnCleese")
|
||||
func (k Key) ChildString(s string) Key {
|
||||
return NewKey(k.string + "/" + s)
|
||||
}
|
||||
|
||||
// IsAncestorOf returns whether this key is a prefix of `other`
|
||||
// NewKey("/Comedy").IsAncestorOf("/Comedy/MontyPython")
|
||||
// true
|
||||
func (k Key) IsAncestorOf(other Key) bool {
|
||||
// equivalent to HasPrefix(other, k.string + "/")
|
||||
|
||||
if len(other.string) <= len(k.string) {
|
||||
// We're not long enough to be a child.
|
||||
return false
|
||||
}
|
||||
|
||||
if k.string == "/" {
|
||||
// We're the root and the other key is longer.
|
||||
return true
|
||||
}
|
||||
|
||||
// "other" starts with /k.string/
|
||||
return other.string[len(k.string)] == '/' && other.string[:len(k.string)] == k.string
|
||||
}
|
||||
|
||||
// IsDescendantOf returns whether this key contains another as a prefix.
|
||||
// NewKey("/Comedy/MontyPython").IsDescendantOf("/Comedy")
|
||||
// true
|
||||
func (k Key) IsDescendantOf(other Key) bool {
|
||||
return other.IsAncestorOf(k)
|
||||
}
|
||||
|
||||
// IsTopLevel returns whether this key has only one namespace.
|
||||
func (k Key) IsTopLevel() bool {
|
||||
return len(k.List()) == 1
|
||||
}
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface,
|
||||
// keys are represented as JSON strings
|
||||
func (k Key) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(k.String())
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface,
|
||||
// keys will parse any value specified as a key to a string
|
||||
func (k *Key) UnmarshalJSON(data []byte) error {
|
||||
var key string
|
||||
if err := json.Unmarshal(data, &key); err != nil {
|
||||
return err
|
||||
}
|
||||
*k = NewKey(key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RandomKey returns a randomly (uuid) generated key.
|
||||
// RandomKey()
|
||||
// NewKey("/f98719ea086343f7b71f32ea9d9d521d")
|
||||
func RandomKey() Key {
|
||||
return NewKey(strings.Replace(uuid.New().String(), "-", "", -1))
|
||||
}
|
||||
|
||||
/*
|
||||
A Key Namespace is like a path element.
|
||||
A namespace can optionally include a type (delimited by ':')
|
||||
|
||||
> NamespaceValue("Song:PhilosopherSong")
|
||||
PhilosopherSong
|
||||
> NamespaceType("Song:PhilosopherSong")
|
||||
Song
|
||||
> NamespaceType("Music:Song:PhilosopherSong")
|
||||
Music:Song
|
||||
*/
|
||||
|
||||
// NamespaceType is the first component of a namespace. `foo` in `foo:bar`
|
||||
func NamespaceType(namespace string) string {
|
||||
parts := strings.Split(namespace, ":")
|
||||
if len(parts) < 2 {
|
||||
return ""
|
||||
}
|
||||
return strings.Join(parts[0:len(parts)-1], ":")
|
||||
}
|
||||
|
||||
// NamespaceValue returns the last component of a namespace. `baz` in `f:b:baz`
|
||||
func NamespaceValue(namespace string) string {
|
||||
parts := strings.Split(namespace, ":")
|
||||
return parts[len(parts)-1]
|
||||
}
|
||||
|
||||
// KeySlice attaches the methods of sort.Interface to []Key,
|
||||
// sorting in increasing order.
|
||||
type KeySlice []Key
|
||||
|
||||
func (p KeySlice) Len() int { return len(p) }
|
||||
func (p KeySlice) Less(i, j int) bool { return p[i].Less(p[j]) }
|
||||
func (p KeySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
// EntryKeys
|
||||
func EntryKeys(e []dsq.Entry) []Key {
|
||||
ks := make([]Key, len(e))
|
||||
for i, e := range e {
|
||||
ks[i] = NewKey(e.Key)
|
||||
}
|
||||
return ks
|
||||
}
|
|
@ -0,0 +1,102 @@
|
|||
package query
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Filter is an object that tests ResultEntries
|
||||
type Filter interface {
|
||||
// Filter returns whether an entry passes the filter
|
||||
Filter(e Entry) bool
|
||||
}
|
||||
|
||||
// Op is a comparison operator
|
||||
type Op string
|
||||
|
||||
var (
|
||||
Equal = Op("==")
|
||||
NotEqual = Op("!=")
|
||||
GreaterThan = Op(">")
|
||||
GreaterThanOrEqual = Op(">=")
|
||||
LessThan = Op("<")
|
||||
LessThanOrEqual = Op("<=")
|
||||
)
|
||||
|
||||
// FilterValueCompare is used to signal to datastores they
|
||||
// should apply internal comparisons. unfortunately, there
|
||||
// is no way to apply comparisons* to interface{} types in
|
||||
// Go, so if the datastore doesnt have a special way to
|
||||
// handle these comparisons, you must provided the
|
||||
// TypedFilter to actually do filtering.
|
||||
//
|
||||
// [*] other than == and !=, which use reflect.DeepEqual.
|
||||
type FilterValueCompare struct {
|
||||
Op Op
|
||||
Value []byte
|
||||
}
|
||||
|
||||
func (f FilterValueCompare) Filter(e Entry) bool {
|
||||
cmp := bytes.Compare(e.Value, f.Value)
|
||||
switch f.Op {
|
||||
case Equal:
|
||||
return cmp == 0
|
||||
case NotEqual:
|
||||
return cmp != 0
|
||||
case LessThan:
|
||||
return cmp < 0
|
||||
case LessThanOrEqual:
|
||||
return cmp <= 0
|
||||
case GreaterThan:
|
||||
return cmp > 0
|
||||
case GreaterThanOrEqual:
|
||||
return cmp >= 0
|
||||
default:
|
||||
panic(fmt.Errorf("unknown operation: %s", f.Op))
|
||||
}
|
||||
}
|
||||
|
||||
func (f FilterValueCompare) String() string {
|
||||
return fmt.Sprintf("VALUE %s %q", f.Op, string(f.Value))
|
||||
}
|
||||
|
||||
type FilterKeyCompare struct {
|
||||
Op Op
|
||||
Key string
|
||||
}
|
||||
|
||||
func (f FilterKeyCompare) Filter(e Entry) bool {
|
||||
switch f.Op {
|
||||
case Equal:
|
||||
return e.Key == f.Key
|
||||
case NotEqual:
|
||||
return e.Key != f.Key
|
||||
case GreaterThan:
|
||||
return e.Key > f.Key
|
||||
case GreaterThanOrEqual:
|
||||
return e.Key >= f.Key
|
||||
case LessThan:
|
||||
return e.Key < f.Key
|
||||
case LessThanOrEqual:
|
||||
return e.Key <= f.Key
|
||||
default:
|
||||
panic(fmt.Errorf("unknown op '%s'", f.Op))
|
||||
}
|
||||
}
|
||||
|
||||
func (f FilterKeyCompare) String() string {
|
||||
return fmt.Sprintf("KEY %s %q", f.Op, f.Key)
|
||||
}
|
||||
|
||||
type FilterKeyPrefix struct {
|
||||
Prefix string
|
||||
}
|
||||
|
||||
func (f FilterKeyPrefix) Filter(e Entry) bool {
|
||||
return strings.HasPrefix(e.Key, f.Prefix)
|
||||
}
|
||||
|
||||
func (f FilterKeyPrefix) String() string {
|
||||
return fmt.Sprintf("PREFIX(%q)", f.Prefix)
|
||||
}
|
|
@ -0,0 +1,94 @@
|
|||
package query
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Order is an object used to order objects
|
||||
type Order interface {
|
||||
Compare(a, b Entry) int
|
||||
}
|
||||
|
||||
// OrderByFunction orders the results based on the result of the given function.
|
||||
type OrderByFunction func(a, b Entry) int
|
||||
|
||||
func (o OrderByFunction) Compare(a, b Entry) int {
|
||||
return o(a, b)
|
||||
}
|
||||
|
||||
func (OrderByFunction) String() string {
|
||||
return "FN"
|
||||
}
|
||||
|
||||
// OrderByValue is used to signal to datastores they should apply internal
|
||||
// orderings.
|
||||
type OrderByValue struct{}
|
||||
|
||||
func (o OrderByValue) Compare(a, b Entry) int {
|
||||
return bytes.Compare(a.Value, b.Value)
|
||||
}
|
||||
|
||||
func (OrderByValue) String() string {
|
||||
return "VALUE"
|
||||
}
|
||||
|
||||
// OrderByValueDescending is used to signal to datastores they
|
||||
// should apply internal orderings.
|
||||
type OrderByValueDescending struct{}
|
||||
|
||||
func (o OrderByValueDescending) Compare(a, b Entry) int {
|
||||
return -bytes.Compare(a.Value, b.Value)
|
||||
}
|
||||
|
||||
func (OrderByValueDescending) String() string {
|
||||
return "desc(VALUE)"
|
||||
}
|
||||
|
||||
// OrderByKey
|
||||
type OrderByKey struct{}
|
||||
|
||||
func (o OrderByKey) Compare(a, b Entry) int {
|
||||
return strings.Compare(a.Key, b.Key)
|
||||
}
|
||||
|
||||
func (OrderByKey) String() string {
|
||||
return "KEY"
|
||||
}
|
||||
|
||||
// OrderByKeyDescending
|
||||
type OrderByKeyDescending struct{}
|
||||
|
||||
func (o OrderByKeyDescending) Compare(a, b Entry) int {
|
||||
return -strings.Compare(a.Key, b.Key)
|
||||
}
|
||||
|
||||
func (OrderByKeyDescending) String() string {
|
||||
return "desc(KEY)"
|
||||
}
|
||||
|
||||
// Less returns true if a comes before b with the requested orderings.
|
||||
func Less(orders []Order, a, b Entry) bool {
|
||||
for _, cmp := range orders {
|
||||
switch cmp.Compare(a, b) {
|
||||
case 0:
|
||||
case -1:
|
||||
return true
|
||||
case 1:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// This gives us a *stable* sort for free. We don't care
|
||||
// preserving the order from the underlying datastore
|
||||
// because it's undefined.
|
||||
return a.Key < b.Key
|
||||
}
|
||||
|
||||
// Sort sorts the given entries using the given orders.
|
||||
func Sort(orders []Order, entries []Entry) {
|
||||
sort.Slice(entries, func(i int, j int) bool {
|
||||
return Less(orders, entries[i], entries[j])
|
||||
})
|
||||
}
|
|
@ -0,0 +1,426 @@
|
|||
package query
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
goprocess "github.com/jbenet/goprocess"
|
||||
)
|
||||
|
||||
/*
|
||||
Query represents storage for any key-value pair.
|
||||
|
||||
tl;dr:
|
||||
|
||||
queries are supported across datastores.
|
||||
Cheap on top of relational dbs, and expensive otherwise.
|
||||
Pick the right tool for the job!
|
||||
|
||||
In addition to the key-value store get and set semantics, datastore
|
||||
provides an interface to retrieve multiple records at a time through
|
||||
the use of queries. The datastore Query model gleans a common set of
|
||||
operations performed when querying. To avoid pasting here years of
|
||||
database research, let’s summarize the operations datastore supports.
|
||||
|
||||
Query Operations, applied in-order:
|
||||
|
||||
* prefix - scope the query to a given path prefix
|
||||
* filters - select a subset of values by applying constraints
|
||||
* orders - sort the results by applying sort conditions, hierarchically.
|
||||
* offset - skip a number of results (for efficient pagination)
|
||||
* limit - impose a numeric limit on the number of results
|
||||
|
||||
Datastore combines these operations into a simple Query class that allows
|
||||
applications to define their constraints in a simple, generic, way without
|
||||
introducing datastore specific calls, languages, etc.
|
||||
|
||||
However, take heed: not all datastores support efficiently performing these
|
||||
operations. Pick a datastore based on your needs. If you need efficient look-ups,
|
||||
go for a simple key/value store. If you need efficient queries, consider an SQL
|
||||
backed datastore.
|
||||
|
||||
Notes:
|
||||
|
||||
* Prefix: When a query filters by prefix, it selects keys that are strict
|
||||
children of the prefix. For example, a prefix "/foo" would select "/foo/bar"
|
||||
but not "/foobar" or "/foo",
|
||||
* Orders: Orders are applied hierarchically. Results are sorted by the first
|
||||
ordering, then entries equal under the first ordering are sorted with the
|
||||
second ordering, etc.
|
||||
* Limits & Offset: Limits and offsets are applied after everything else.
|
||||
*/
|
||||
type Query struct {
|
||||
Prefix string // namespaces the query to results whose keys have Prefix
|
||||
Filters []Filter // filter results. apply sequentially
|
||||
Orders []Order // order results. apply hierarchically
|
||||
Limit int // maximum number of results
|
||||
Offset int // skip given number of results
|
||||
KeysOnly bool // return only keys.
|
||||
ReturnExpirations bool // return expirations (see TTLDatastore)
|
||||
ReturnsSizes bool // always return sizes. If not set, datastore impl can return
|
||||
// // it anyway if it doesn't involve a performance cost. If KeysOnly
|
||||
// // is not set, Size should always be set.
|
||||
}
|
||||
|
||||
// String returns a string representation of the Query for debugging/validation
|
||||
// purposes. Do not use it for SQL queries.
|
||||
func (q Query) String() string {
|
||||
s := "SELECT keys"
|
||||
if !q.KeysOnly {
|
||||
s += ",vals"
|
||||
}
|
||||
if q.ReturnExpirations {
|
||||
s += ",exps"
|
||||
}
|
||||
|
||||
s += " "
|
||||
|
||||
if q.Prefix != "" {
|
||||
s += fmt.Sprintf("FROM %q ", q.Prefix)
|
||||
}
|
||||
|
||||
if len(q.Filters) > 0 {
|
||||
s += fmt.Sprintf("FILTER [%s", q.Filters[0])
|
||||
for _, f := range q.Filters[1:] {
|
||||
s += fmt.Sprintf(", %s", f)
|
||||
}
|
||||
s += "] "
|
||||
}
|
||||
|
||||
if len(q.Orders) > 0 {
|
||||
s += fmt.Sprintf("ORDER [%s", q.Orders[0])
|
||||
for _, f := range q.Orders[1:] {
|
||||
s += fmt.Sprintf(", %s", f)
|
||||
}
|
||||
s += "] "
|
||||
}
|
||||
|
||||
if q.Offset > 0 {
|
||||
s += fmt.Sprintf("OFFSET %d ", q.Offset)
|
||||
}
|
||||
|
||||
if q.Limit > 0 {
|
||||
s += fmt.Sprintf("LIMIT %d ", q.Limit)
|
||||
}
|
||||
// Will always end with a space, strip it.
|
||||
return s[:len(s)-1]
|
||||
}
|
||||
|
||||
// Entry is a query result entry.
|
||||
type Entry struct {
|
||||
Key string // cant be ds.Key because circular imports ...!!!
|
||||
Value []byte // Will be nil if KeysOnly has been passed.
|
||||
Expiration time.Time // Entry expiration timestamp if requested and supported (see TTLDatastore).
|
||||
Size int // Might be -1 if the datastore doesn't support listing the size with KeysOnly
|
||||
// // or if ReturnsSizes is not set
|
||||
}
|
||||
|
||||
// Result is a special entry that includes an error, so that the client
|
||||
// may be warned about internal errors. If Error is non-nil, Entry must be
|
||||
// empty.
|
||||
type Result struct {
|
||||
Entry
|
||||
|
||||
Error error
|
||||
}
|
||||
|
||||
// Results is a set of Query results. This is the interface for clients.
|
||||
// Example:
|
||||
//
|
||||
// qr, _ := myds.Query(q)
|
||||
// for r := range qr.Next() {
|
||||
// if r.Error != nil {
|
||||
// // handle.
|
||||
// break
|
||||
// }
|
||||
//
|
||||
// fmt.Println(r.Entry.Key, r.Entry.Value)
|
||||
// }
|
||||
//
|
||||
// or, wait on all results at once:
|
||||
//
|
||||
// qr, _ := myds.Query(q)
|
||||
// es, _ := qr.Rest()
|
||||
// for _, e := range es {
|
||||
// fmt.Println(e.Key, e.Value)
|
||||
// }
|
||||
//
|
||||
type Results interface {
|
||||
Query() Query // the query these Results correspond to
|
||||
Next() <-chan Result // returns a channel to wait for the next result
|
||||
NextSync() (Result, bool) // blocks and waits to return the next result, second parameter returns false when results are exhausted
|
||||
Rest() ([]Entry, error) // waits till processing finishes, returns all entries at once.
|
||||
Close() error // client may call Close to signal early exit
|
||||
|
||||
// Process returns a goprocess.Process associated with these results.
|
||||
// most users will not need this function (Close is all they want),
|
||||
// but it's here in case you want to connect the results to other
|
||||
// goprocess-friendly things.
|
||||
Process() goprocess.Process
|
||||
}
|
||||
|
||||
// results implements Results
|
||||
type results struct {
|
||||
query Query
|
||||
proc goprocess.Process
|
||||
res <-chan Result
|
||||
}
|
||||
|
||||
func (r *results) Next() <-chan Result {
|
||||
return r.res
|
||||
}
|
||||
|
||||
func (r *results) NextSync() (Result, bool) {
|
||||
val, ok := <-r.res
|
||||
return val, ok
|
||||
}
|
||||
|
||||
func (r *results) Rest() ([]Entry, error) {
|
||||
var es []Entry
|
||||
for e := range r.res {
|
||||
if e.Error != nil {
|
||||
return es, e.Error
|
||||
}
|
||||
es = append(es, e.Entry)
|
||||
}
|
||||
<-r.proc.Closed() // wait till the processing finishes.
|
||||
return es, nil
|
||||
}
|
||||
|
||||
func (r *results) Process() goprocess.Process {
|
||||
return r.proc
|
||||
}
|
||||
|
||||
func (r *results) Close() error {
|
||||
return r.proc.Close()
|
||||
}
|
||||
|
||||
func (r *results) Query() Query {
|
||||
return r.query
|
||||
}
|
||||
|
||||
// ResultBuilder is what implementors use to construct results
|
||||
// Implementors of datastores and their clients must respect the
|
||||
// Process of the Request:
|
||||
//
|
||||
// * clients must call r.Process().Close() on an early exit, so
|
||||
// implementations can reclaim resources.
|
||||
// * if the Entries are read to completion (channel closed), Process
|
||||
// should be closed automatically.
|
||||
// * datastores must respect <-Process.Closing(), which intermediates
|
||||
// an early close signal from the client.
|
||||
//
|
||||
type ResultBuilder struct {
|
||||
Query Query
|
||||
Process goprocess.Process
|
||||
Output chan Result
|
||||
}
|
||||
|
||||
// Results returns a Results to to this builder.
|
||||
func (rb *ResultBuilder) Results() Results {
|
||||
return &results{
|
||||
query: rb.Query,
|
||||
proc: rb.Process,
|
||||
res: rb.Output,
|
||||
}
|
||||
}
|
||||
|
||||
const NormalBufSize = 1
|
||||
const KeysOnlyBufSize = 128
|
||||
|
||||
func NewResultBuilder(q Query) *ResultBuilder {
|
||||
bufSize := NormalBufSize
|
||||
if q.KeysOnly {
|
||||
bufSize = KeysOnlyBufSize
|
||||
}
|
||||
b := &ResultBuilder{
|
||||
Query: q,
|
||||
Output: make(chan Result, bufSize),
|
||||
}
|
||||
b.Process = goprocess.WithTeardown(func() error {
|
||||
close(b.Output)
|
||||
return nil
|
||||
})
|
||||
return b
|
||||
}
|
||||
|
||||
// ResultsWithChan returns a Results object from a channel
|
||||
// of Result entries.
|
||||
//
|
||||
// DEPRECATED: This iterator is impossible to cancel correctly. Canceling it
|
||||
// will leave anything trying to write to the result channel hanging.
|
||||
func ResultsWithChan(q Query, res <-chan Result) Results {
|
||||
return ResultsWithProcess(q, func(worker goprocess.Process, out chan<- Result) {
|
||||
for {
|
||||
select {
|
||||
case <-worker.Closing(): // client told us to close early
|
||||
return
|
||||
case e, more := <-res:
|
||||
if !more {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case out <- e:
|
||||
case <-worker.Closing(): // client told us to close early
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// ResultsWithProcess returns a Results object with the results generated by the
|
||||
// passed subprocess.
|
||||
func ResultsWithProcess(q Query, proc func(goprocess.Process, chan<- Result)) Results {
|
||||
b := NewResultBuilder(q)
|
||||
|
||||
// go consume all the entries and add them to the results.
|
||||
b.Process.Go(func(worker goprocess.Process) {
|
||||
proc(worker, b.Output)
|
||||
})
|
||||
|
||||
go b.Process.CloseAfterChildren() //nolint
|
||||
return b.Results()
|
||||
}
|
||||
|
||||
// ResultsWithEntries returns a Results object from a list of entries
|
||||
func ResultsWithEntries(q Query, res []Entry) Results {
|
||||
i := 0
|
||||
return ResultsFromIterator(q, Iterator{
|
||||
Next: func() (Result, bool) {
|
||||
if i >= len(res) {
|
||||
return Result{}, false
|
||||
}
|
||||
next := res[i]
|
||||
i++
|
||||
return Result{Entry: next}, true
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func ResultsReplaceQuery(r Results, q Query) Results {
|
||||
switch r := r.(type) {
|
||||
case *results:
|
||||
// note: not using field names to make sure all fields are copied
|
||||
return &results{q, r.proc, r.res}
|
||||
case *resultsIter:
|
||||
// note: not using field names to make sure all fields are copied
|
||||
lr := r.legacyResults
|
||||
if lr != nil {
|
||||
lr = &results{q, lr.proc, lr.res}
|
||||
}
|
||||
return &resultsIter{q, r.next, r.close, lr}
|
||||
default:
|
||||
panic("unknown results type")
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// ResultFromIterator provides an alternative way to to construct
|
||||
// results without the use of channels.
|
||||
//
|
||||
|
||||
func ResultsFromIterator(q Query, iter Iterator) Results {
|
||||
if iter.Close == nil {
|
||||
iter.Close = noopClose
|
||||
}
|
||||
return &resultsIter{
|
||||
query: q,
|
||||
next: iter.Next,
|
||||
close: iter.Close,
|
||||
}
|
||||
}
|
||||
|
||||
func noopClose() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type Iterator struct {
|
||||
Next func() (Result, bool)
|
||||
Close func() error // note: might be called more than once
|
||||
}
|
||||
|
||||
type resultsIter struct {
|
||||
query Query
|
||||
next func() (Result, bool)
|
||||
close func() error
|
||||
legacyResults *results
|
||||
}
|
||||
|
||||
func (r *resultsIter) Next() <-chan Result {
|
||||
r.useLegacyResults()
|
||||
return r.legacyResults.Next()
|
||||
}
|
||||
|
||||
func (r *resultsIter) NextSync() (Result, bool) {
|
||||
if r.legacyResults != nil {
|
||||
return r.legacyResults.NextSync()
|
||||
} else {
|
||||
res, ok := r.next()
|
||||
if !ok {
|
||||
r.close()
|
||||
}
|
||||
return res, ok
|
||||
}
|
||||
}
|
||||
|
||||
func (r *resultsIter) Rest() ([]Entry, error) {
|
||||
var es []Entry
|
||||
for {
|
||||
e, ok := r.NextSync()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
if e.Error != nil {
|
||||
return es, e.Error
|
||||
}
|
||||
es = append(es, e.Entry)
|
||||
}
|
||||
return es, nil
|
||||
}
|
||||
|
||||
func (r *resultsIter) Process() goprocess.Process {
|
||||
r.useLegacyResults()
|
||||
return r.legacyResults.Process()
|
||||
}
|
||||
|
||||
func (r *resultsIter) Close() error {
|
||||
if r.legacyResults != nil {
|
||||
return r.legacyResults.Close()
|
||||
} else {
|
||||
return r.close()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *resultsIter) Query() Query {
|
||||
return r.query
|
||||
}
|
||||
|
||||
func (r *resultsIter) useLegacyResults() {
|
||||
if r.legacyResults != nil {
|
||||
return
|
||||
}
|
||||
|
||||
b := NewResultBuilder(r.query)
|
||||
|
||||
// go consume all the entries and add them to the results.
|
||||
b.Process.Go(func(worker goprocess.Process) {
|
||||
defer r.close()
|
||||
for {
|
||||
e, ok := r.next()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
select {
|
||||
case b.Output <- e:
|
||||
case <-worker.Closing(): // client told us to close early
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
go b.Process.CloseAfterChildren() //nolint
|
||||
|
||||
r.legacyResults = b.Results().(*results)
|
||||
}
|
|
@ -0,0 +1,158 @@
|
|||
package query
|
||||
|
||||
import (
|
||||
"path"
|
||||
|
||||
goprocess "github.com/jbenet/goprocess"
|
||||
)
|
||||
|
||||
// NaiveFilter applies a filter to the results.
|
||||
func NaiveFilter(qr Results, filter Filter) Results {
|
||||
return ResultsFromIterator(qr.Query(), Iterator{
|
||||
Next: func() (Result, bool) {
|
||||
for {
|
||||
e, ok := qr.NextSync()
|
||||
if !ok {
|
||||
return Result{}, false
|
||||
}
|
||||
if e.Error != nil || filter.Filter(e.Entry) {
|
||||
return e, true
|
||||
}
|
||||
}
|
||||
},
|
||||
Close: func() error {
|
||||
return qr.Close()
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// NaiveLimit truncates the results to a given int limit
|
||||
func NaiveLimit(qr Results, limit int) Results {
|
||||
if limit == 0 {
|
||||
// 0 means no limit
|
||||
return qr
|
||||
}
|
||||
closed := false
|
||||
return ResultsFromIterator(qr.Query(), Iterator{
|
||||
Next: func() (Result, bool) {
|
||||
if limit == 0 {
|
||||
if !closed {
|
||||
closed = true
|
||||
err := qr.Close()
|
||||
if err != nil {
|
||||
return Result{Error: err}, true
|
||||
}
|
||||
}
|
||||
return Result{}, false
|
||||
}
|
||||
limit--
|
||||
return qr.NextSync()
|
||||
},
|
||||
Close: func() error {
|
||||
if closed {
|
||||
return nil
|
||||
}
|
||||
closed = true
|
||||
return qr.Close()
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// NaiveOffset skips a given number of results
|
||||
func NaiveOffset(qr Results, offset int) Results {
|
||||
return ResultsFromIterator(qr.Query(), Iterator{
|
||||
Next: func() (Result, bool) {
|
||||
for ; offset > 0; offset-- {
|
||||
res, ok := qr.NextSync()
|
||||
if !ok || res.Error != nil {
|
||||
return res, ok
|
||||
}
|
||||
}
|
||||
return qr.NextSync()
|
||||
},
|
||||
Close: func() error {
|
||||
return qr.Close()
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// NaiveOrder reorders results according to given orders.
|
||||
// WARNING: this is the only non-stream friendly operation!
|
||||
func NaiveOrder(qr Results, orders ...Order) Results {
|
||||
// Short circuit.
|
||||
if len(orders) == 0 {
|
||||
return qr
|
||||
}
|
||||
|
||||
return ResultsWithProcess(qr.Query(), func(worker goprocess.Process, out chan<- Result) {
|
||||
defer qr.Close()
|
||||
var entries []Entry
|
||||
collect:
|
||||
for {
|
||||
select {
|
||||
case <-worker.Closing():
|
||||
return
|
||||
case e, ok := <-qr.Next():
|
||||
if !ok {
|
||||
break collect
|
||||
}
|
||||
if e.Error != nil {
|
||||
out <- e
|
||||
continue
|
||||
}
|
||||
entries = append(entries, e.Entry)
|
||||
}
|
||||
}
|
||||
|
||||
Sort(orders, entries)
|
||||
|
||||
for _, e := range entries {
|
||||
select {
|
||||
case <-worker.Closing():
|
||||
return
|
||||
case out <- Result{Entry: e}:
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func NaiveQueryApply(q Query, qr Results) Results {
|
||||
if q.Prefix != "" {
|
||||
// Clean the prefix as a key and append / so a prefix of /bar
|
||||
// only finds /bar/baz, not /barbaz.
|
||||
prefix := q.Prefix
|
||||
if len(prefix) == 0 {
|
||||
prefix = "/"
|
||||
} else {
|
||||
if prefix[0] != '/' {
|
||||
prefix = "/" + prefix
|
||||
}
|
||||
prefix = path.Clean(prefix)
|
||||
}
|
||||
// If the prefix is empty, ignore it.
|
||||
if prefix != "/" {
|
||||
qr = NaiveFilter(qr, FilterKeyPrefix{prefix + "/"})
|
||||
}
|
||||
}
|
||||
for _, f := range q.Filters {
|
||||
qr = NaiveFilter(qr, f)
|
||||
}
|
||||
if len(q.Orders) > 0 {
|
||||
qr = NaiveOrder(qr, q.Orders...)
|
||||
}
|
||||
if q.Offset != 0 {
|
||||
qr = NaiveOffset(qr, q.Offset)
|
||||
}
|
||||
if q.Limit != 0 {
|
||||
qr = NaiveLimit(qr, q.Limit)
|
||||
}
|
||||
return qr
|
||||
}
|
||||
|
||||
func ResultEntriesFrom(keys []string, vals [][]byte) []Entry {
|
||||
re := make([]Entry, len(keys))
|
||||
for i, k := range keys {
|
||||
re[i] = Entry{Key: k, Size: len(vals[i]), Value: vals[i]}
|
||||
}
|
||||
return re
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2016 Jeromy Johnson
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
|
@ -0,0 +1,61 @@
|
|||
# SQL Datastore
|
||||
|
||||
[![CircleCI](https://circleci.com/gh/ipfs/go-ds-sql.svg?style=shield)](https://circleci.com/gh/ipfs/go-ds-sql)
|
||||
[![Coverage](https://codecov.io/gh/ipfs/go-ds-sql/branch/master/graph/badge.svg)](https://codecov.io/gh/ipfs/go-ds-sql)
|
||||
[![Standard README](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg)](https://github.com/RichardLitt/standard-readme)
|
||||
[![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg)](https://godoc.org/github.com/ipfs/go-ds-sql)
|
||||
[![golang version](https://img.shields.io/badge/golang-%3E%3D1.14.0-orange.svg)](https://golang.org/)
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/ipfs/go-ds-sql)](https://goreportcard.com/report/github.com/ipfs/go-ds-sql)
|
||||
|
||||
An implementation of [the datastore interface](https://github.com/ipfs/go-datastore)
|
||||
that can be backed by any sql database.
|
||||
|
||||
## Install
|
||||
|
||||
```sh
|
||||
go get github.com/ipfs/go-ds-sql
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Ensure a database is created and a table exists with `key` and `data` columns. For example, in PostgreSQL you can create a table with the following structure (replacing `table_name` with the name of the table the datastore will use - by default this is `blocks`):
|
||||
|
||||
```sql
|
||||
CREATE TABLE IF NOT EXISTS table_name (key TEXT NOT NULL UNIQUE, data BYTEA)
|
||||
```
|
||||
|
||||
It's recommended to create an index on the `key` column that is optimised for prefix scans. For example, in PostgreSQL you can create a `text_pattern_ops` index on the table:
|
||||
|
||||
```sql
|
||||
CREATE INDEX IF NOT EXISTS table_name_key_text_pattern_ops_idx ON table_name (key text_pattern_ops)
|
||||
```
|
||||
|
||||
Import and use in your application:
|
||||
|
||||
```go
|
||||
import (
|
||||
"database/sql"
|
||||
"github.com/ipfs/go-ds-sql"
|
||||
pg "github.com/ipfs/go-ds-sql/postgres"
|
||||
)
|
||||
|
||||
mydb, _ := sql.Open("yourdb", "yourdbparameters")
|
||||
|
||||
// Implement the Queries interface for your SQL impl.
|
||||
// ...or use the provided PostgreSQL queries
|
||||
queries := pg.NewQueries("blocks")
|
||||
|
||||
ds := sqlds.NewDatastore(mydb, queries)
|
||||
```
|
||||
|
||||
## API
|
||||
|
||||
[GoDoc Reference](https://godoc.org/github.com/ipfs/go-ds-sql)
|
||||
|
||||
## Contribute
|
||||
|
||||
Feel free to dive in! [Open an issue](https://github.com/ipfs/go-ds-sql/issues/new) or submit PRs.
|
||||
|
||||
## License
|
||||
|
||||
[MIT](LICENSE)
|
|
@ -0,0 +1,65 @@
|
|||
package sqlds
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
)
|
||||
|
||||
type op struct {
|
||||
delete bool
|
||||
value []byte
|
||||
}
|
||||
|
||||
type batch struct {
|
||||
ds *Datastore
|
||||
ops map[ds.Key]op
|
||||
}
|
||||
|
||||
// Batch creates a set of deferred updates to the database.
|
||||
// Since SQL does not support a true batch of updates,
|
||||
// operations are buffered and then executed sequentially
|
||||
// over a single connection when Commit is called.
|
||||
func (d *Datastore) Batch() (ds.Batch, error) {
|
||||
return &batch{
|
||||
ds: d,
|
||||
ops: make(map[ds.Key]op),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (bt *batch) Put(key ds.Key, val []byte) error {
|
||||
bt.ops[key] = op{value: val}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bt *batch) Delete(key ds.Key) error {
|
||||
bt.ops[key] = op{delete: true}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bt *batch) Commit() error {
|
||||
return bt.CommitContext(context.Background())
|
||||
}
|
||||
|
||||
func (bt *batch) CommitContext(ctx context.Context) error {
|
||||
conn, err := bt.ds.db.Conn(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
for k, op := range bt.ops {
|
||||
if op.delete {
|
||||
_, err = conn.ExecContext(ctx, bt.ds.queries.Delete(), k.String())
|
||||
} else {
|
||||
_, err = conn.ExecContext(ctx, bt.ds.queries.Put(), k.String(), op.value)
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
var _ ds.Batching = (*Datastore)(nil)
|
|
@ -0,0 +1,203 @@
|
|||
package sqlds
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
dsq "github.com/ipfs/go-datastore/query"
|
||||
)
|
||||
|
||||
// Queries generates SQL queries for datastore operations.
|
||||
type Queries interface {
|
||||
Delete() string
|
||||
Exists() string
|
||||
Get() string
|
||||
Put() string
|
||||
Query() string
|
||||
Prefix() string
|
||||
Limit() string
|
||||
Offset() string
|
||||
GetSize() string
|
||||
}
|
||||
|
||||
// Datastore is a SQL backed datastore.
|
||||
type Datastore struct {
|
||||
db *sql.DB
|
||||
queries Queries
|
||||
}
|
||||
|
||||
// NewDatastore returns a new SQL datastore.
|
||||
func NewDatastore(db *sql.DB, queries Queries) *Datastore {
|
||||
return &Datastore{db: db, queries: queries}
|
||||
}
|
||||
|
||||
// Close closes the underying SQL database.
|
||||
func (d *Datastore) Close() error {
|
||||
return d.db.Close()
|
||||
}
|
||||
|
||||
// Delete removes a row from the SQL database by the given key.
|
||||
func (d *Datastore) Delete(key ds.Key) error {
|
||||
_, err := d.db.Exec(d.queries.Delete(), key.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves a value from the SQL database by the given key.
|
||||
func (d *Datastore) Get(key ds.Key) (value []byte, err error) {
|
||||
row := d.db.QueryRow(d.queries.Get(), key.String())
|
||||
var out []byte
|
||||
|
||||
switch err := row.Scan(&out); err {
|
||||
case sql.ErrNoRows:
|
||||
return nil, ds.ErrNotFound
|
||||
case nil:
|
||||
return out, nil
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Has determines if a value for the given key exists in the SQL database.
|
||||
func (d *Datastore) Has(key ds.Key) (exists bool, err error) {
|
||||
row := d.db.QueryRow(d.queries.Exists(), key.String())
|
||||
|
||||
switch err := row.Scan(&exists); err {
|
||||
case sql.ErrNoRows:
|
||||
return exists, nil
|
||||
case nil:
|
||||
return exists, nil
|
||||
default:
|
||||
return exists, err
|
||||
}
|
||||
}
|
||||
|
||||
// Put "upserts" a row into the SQL database.
|
||||
func (d *Datastore) Put(key ds.Key, value []byte) error {
|
||||
_, err := d.db.Exec(d.queries.Put(), key.String(), value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Query returns multiple rows from the SQL database based on the passed query parameters.
|
||||
func (d *Datastore) Query(q dsq.Query) (dsq.Results, error) {
|
||||
raw, err := d.rawQuery(q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, f := range q.Filters {
|
||||
raw = dsq.NaiveFilter(raw, f)
|
||||
}
|
||||
|
||||
raw = dsq.NaiveOrder(raw, q.Orders...)
|
||||
|
||||
// if we have filters or orders, offset and limit won't have been applied in the query
|
||||
if len(q.Filters) > 0 || len(q.Orders) > 0 {
|
||||
if q.Offset != 0 {
|
||||
raw = dsq.NaiveOffset(raw, q.Offset)
|
||||
}
|
||||
if q.Limit != 0 {
|
||||
raw = dsq.NaiveLimit(raw, q.Limit)
|
||||
}
|
||||
}
|
||||
|
||||
return raw, nil
|
||||
}
|
||||
|
||||
func (d *Datastore) rawQuery(q dsq.Query) (dsq.Results, error) {
|
||||
var rows *sql.Rows
|
||||
var err error
|
||||
|
||||
rows, err = queryWithParams(d, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
it := dsq.Iterator{
|
||||
Next: func() (dsq.Result, bool) {
|
||||
if !rows.Next() {
|
||||
return dsq.Result{}, false
|
||||
}
|
||||
|
||||
var key string
|
||||
var out []byte
|
||||
|
||||
err := rows.Scan(&key, &out)
|
||||
if err != nil {
|
||||
return dsq.Result{Error: err}, false
|
||||
}
|
||||
|
||||
entry := dsq.Entry{Key: key}
|
||||
|
||||
if !q.KeysOnly {
|
||||
entry.Value = out
|
||||
}
|
||||
if q.ReturnsSizes {
|
||||
entry.Size = len(out)
|
||||
}
|
||||
|
||||
return dsq.Result{Entry: entry}, true
|
||||
},
|
||||
Close: func() error {
|
||||
return rows.Close()
|
||||
},
|
||||
}
|
||||
|
||||
return dsq.ResultsFromIterator(q, it), nil
|
||||
}
|
||||
|
||||
// Sync is noop for SQL databases.
|
||||
func (d *Datastore) Sync(key ds.Key) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetSize determines the size in bytes of the value for a given key.
|
||||
func (d *Datastore) GetSize(key ds.Key) (int, error) {
|
||||
row := d.db.QueryRow(d.queries.GetSize(), key.String())
|
||||
var size int
|
||||
|
||||
switch err := row.Scan(&size); err {
|
||||
case sql.ErrNoRows:
|
||||
return -1, ds.ErrNotFound
|
||||
case nil:
|
||||
return size, nil
|
||||
default:
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
// queryWithParams applies prefix, limit, and offset params in pg query
|
||||
func queryWithParams(d *Datastore, q dsq.Query) (*sql.Rows, error) {
|
||||
var qNew = d.queries.Query()
|
||||
|
||||
if q.Prefix != "" {
|
||||
// normalize
|
||||
prefix := ds.NewKey(q.Prefix).String()
|
||||
if prefix != "/" {
|
||||
qNew += fmt.Sprintf(d.queries.Prefix(), prefix+"/")
|
||||
}
|
||||
}
|
||||
|
||||
// only apply limit and offset if we do not have to naive filter/order the results
|
||||
if len(q.Filters) == 0 && len(q.Orders) == 0 {
|
||||
if q.Limit != 0 {
|
||||
qNew += fmt.Sprintf(d.queries.Limit(), q.Limit)
|
||||
}
|
||||
if q.Offset != 0 {
|
||||
qNew += fmt.Sprintf(d.queries.Offset(), q.Offset)
|
||||
}
|
||||
}
|
||||
|
||||
return d.db.Query(qNew)
|
||||
|
||||
}
|
||||
|
||||
var _ ds.Datastore = (*Datastore)(nil)
|
|
@ -0,0 +1,8 @@
|
|||
module github.com/ipfs/go-ds-sql
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/ipfs/go-datastore v0.4.4
|
||||
github.com/lib/pq v1.3.0
|
||||
)
|
|
@ -0,0 +1,17 @@
|
|||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/ipfs/go-datastore v0.4.4 h1:rjvQ9+muFaJ+QZ7dN5B1MSDNQ0JVZKkkES/rMZmA8X8=
|
||||
github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA=
|
||||
github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=
|
||||
github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8 h1:bspPhN+oKYFk5fcGNuQzp6IGzYQSenLEgH3s6jkXrWw=
|
||||
github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU=
|
||||
github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
@ -0,0 +1,30 @@
|
|||
{
|
||||
"author": "whyrusleeping",
|
||||
"bugs": {
|
||||
"url": "https://github.com/whyrusleeping/sql-datastore"
|
||||
},
|
||||
"gx": {
|
||||
"dvcsimport": "github.com/whyrusleeping/sql-datastore"
|
||||
},
|
||||
"gxDependencies": [
|
||||
{
|
||||
"author": "magik6k",
|
||||
"hash": "QmfJhaxwzBCorUmZNRmY87z4mD6roRrHFMqddhiS4D4XWr",
|
||||
"name": "pq",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"author": "jbenet",
|
||||
"hash": "QmPGYyi1DtuWyUkG3PtvLz1xb4ScjnUvwJMCoX3cxeyxNr",
|
||||
"name": "go-datastore",
|
||||
"version": "3.5.0"
|
||||
}
|
||||
],
|
||||
"gxVersion": "0.14.0",
|
||||
"language": "go",
|
||||
"license": "",
|
||||
"name": "sql-datastore",
|
||||
"releaseCmd": "git commit -a -m \"gx publish $VERSION\"",
|
||||
"version": "1.0.2"
|
||||
}
|
||||
|
|
@ -0,0 +1,122 @@
|
|||
package sqlds
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
datastore "github.com/ipfs/go-datastore"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
dsq "github.com/ipfs/go-datastore/query"
|
||||
)
|
||||
|
||||
// ErrNotImplemented is returned when the SQL datastore does not yet implement the function call.
|
||||
var ErrNotImplemented = fmt.Errorf("not implemented")
|
||||
|
||||
type txn struct {
|
||||
db *sql.DB
|
||||
queries Queries
|
||||
txn *sql.Tx
|
||||
}
|
||||
|
||||
// NewTransaction creates a new database transaction, note the readOnly parameter is ignored by this implementation.
|
||||
func (ds *Datastore) NewTransaction(_ bool) (datastore.Txn, error) {
|
||||
sqlTxn, err := ds.db.Begin()
|
||||
if err != nil {
|
||||
if sqlTxn != nil {
|
||||
// nothing we can do about this error.
|
||||
_ = sqlTxn.Rollback()
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &txn{
|
||||
db: ds.db,
|
||||
queries: ds.queries,
|
||||
txn: sqlTxn,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (t *txn) Get(key ds.Key) ([]byte, error) {
|
||||
row := t.txn.QueryRow(t.queries.Get(), key.String())
|
||||
var out []byte
|
||||
|
||||
switch err := row.Scan(&out); err {
|
||||
case sql.ErrNoRows:
|
||||
return nil, ds.ErrNotFound
|
||||
case nil:
|
||||
return out, nil
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
func (t *txn) Has(key ds.Key) (bool, error) {
|
||||
row := t.txn.QueryRow(t.queries.Exists(), key.String())
|
||||
var exists bool
|
||||
|
||||
switch err := row.Scan(&exists); err {
|
||||
case sql.ErrNoRows:
|
||||
return exists, nil
|
||||
case nil:
|
||||
return exists, nil
|
||||
default:
|
||||
return exists, err
|
||||
}
|
||||
}
|
||||
|
||||
func (t *txn) GetSize(key ds.Key) (int, error) {
|
||||
row := t.txn.QueryRow(t.queries.GetSize(), key.String())
|
||||
var size int
|
||||
|
||||
switch err := row.Scan(&size); err {
|
||||
case sql.ErrNoRows:
|
||||
return -1, ds.ErrNotFound
|
||||
case nil:
|
||||
return size, nil
|
||||
default:
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
func (t *txn) Query(q dsq.Query) (dsq.Results, error) {
|
||||
return nil, ErrNotImplemented
|
||||
}
|
||||
|
||||
// Put adds a value to the datastore identified by the given key.
|
||||
func (t *txn) Put(key ds.Key, val []byte) error {
|
||||
_, err := t.txn.Exec(t.queries.Put(), key.String(), val)
|
||||
if err != nil {
|
||||
_ = t.txn.Rollback()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes a value from the datastore that matches the given key.
|
||||
func (t *txn) Delete(key ds.Key) error {
|
||||
_, err := t.txn.Exec(t.queries.Delete(), key.String())
|
||||
if err != nil {
|
||||
_ = t.txn.Rollback()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Commit finalizes a transaction.
|
||||
func (t *txn) Commit() error {
|
||||
err := t.txn.Commit()
|
||||
if err != nil {
|
||||
_ = t.txn.Rollback()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Discard throws away changes recorded in a transaction without committing
|
||||
// them to the underlying Datastore.
|
||||
func (t *txn) Discard() {
|
||||
_ = t.txn.Rollback()
|
||||
}
|
||||
|
||||
var _ ds.TxnDatastore = (*Datastore)(nil)
|
|
@ -0,0 +1,42 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
func GenerateTestAddrs(n int) []ma.Multiaddr {
|
||||
out := make([]ma.Multiaddr, n)
|
||||
for i := 0; i < n; i++ {
|
||||
a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/1.2.3.4/tcp/%d", i))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
out[i] = a
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func AssertAddressesEqual(t *testing.T, exp, act []ma.Multiaddr) {
|
||||
t.Helper()
|
||||
if len(exp) != len(act) {
|
||||
t.Fatalf("lengths not the same. expected %d, got %d\n", len(exp), len(act))
|
||||
}
|
||||
|
||||
for _, a := range exp {
|
||||
found := false
|
||||
|
||||
for _, b := range act {
|
||||
if a.Equal(b) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
t.Fatalf("expected address %s not found", a)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync/atomic"
|
||||
|
||||
ci "github.com/libp2p/go-libp2p-core/crypto"
|
||||
)
|
||||
|
||||
var globalSeed int64
|
||||
|
||||
func RandTestKeyPair(typ, bits int) (ci.PrivKey, ci.PubKey, error) {
|
||||
// workaround for low time resolution
|
||||
seed := atomic.AddInt64(&globalSeed, 1)
|
||||
return SeededTestKeyPair(typ, bits, seed)
|
||||
}
|
||||
|
||||
func SeededTestKeyPair(typ, bits int, seed int64) (ci.PrivKey, ci.PubKey, error) {
|
||||
r := rand.New(rand.NewSource(seed))
|
||||
return ci.GenerateKeyPairWithReader(typ, bits, r)
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func AssertNilError(t *testing.T, err error) {
|
||||
t.Helper()
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func ExpectError(t *testing.T, err error, msg string) {
|
||||
t.Helper()
|
||||
if err == nil {
|
||||
t.Error(msg)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
func RandPeerID() (peer.ID, error) {
|
||||
buf := make([]byte, 16)
|
||||
rand.Read(buf)
|
||||
h, _ := mh.Sum(buf, mh.SHA2_256, -1)
|
||||
return peer.ID(h), nil
|
||||
}
|
||||
|
||||
func RandPeerIDFatal(t testing.TB) peer.ID {
|
||||
p, err := RandPeerID()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return p
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
PB = $(wildcard *.proto)
|
||||
GO = $(PB:.proto=.pb.go)
|
||||
|
||||
all: $(GO)
|
||||
|
||||
%.pb.go: %.proto
|
||||
protoc --proto_path=$(GOPATH)/src:. --gogofaster_out=. $<
|
||||
|
||||
clean:
|
||||
rm -f *.pb.go
|
||||
|
||||
.PHONY: clean
|
|
@ -0,0 +1,110 @@
|
|||
package pstore_pb
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
peer "github.com/libp2p/go-libp2p-core/peer"
|
||||
pt "github.com/libp2p/go-libp2p-core/test"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
// customGogoType aggregates the interfaces that custom Gogo types need to implement.
|
||||
// it is only used for type assertions.
|
||||
type customGogoType interface {
|
||||
proto.Marshaler
|
||||
proto.Unmarshaler
|
||||
json.Marshaler
|
||||
json.Unmarshaler
|
||||
proto.Sizer
|
||||
}
|
||||
|
||||
// ProtoAddr is a custom type used by gogo to serde raw peer IDs into the peer.ID type, and back.
|
||||
type ProtoPeerID struct {
|
||||
peer.ID
|
||||
}
|
||||
|
||||
var _ customGogoType = (*ProtoPeerID)(nil)
|
||||
|
||||
func (id ProtoPeerID) Marshal() ([]byte, error) {
|
||||
return []byte(id.ID), nil
|
||||
}
|
||||
|
||||
func (id ProtoPeerID) MarshalTo(data []byte) (n int, err error) {
|
||||
return copy(data, []byte(id.ID)), nil
|
||||
}
|
||||
|
||||
func (id ProtoPeerID) MarshalJSON() ([]byte, error) {
|
||||
m, _ := id.Marshal()
|
||||
return json.Marshal(m)
|
||||
}
|
||||
|
||||
func (id *ProtoPeerID) Unmarshal(data []byte) (err error) {
|
||||
id.ID = peer.ID(string(data))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (id *ProtoPeerID) UnmarshalJSON(data []byte) error {
|
||||
var v []byte
|
||||
err := json.Unmarshal(data, &v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return id.Unmarshal(v)
|
||||
}
|
||||
|
||||
func (id ProtoPeerID) Size() int {
|
||||
return len([]byte(id.ID))
|
||||
}
|
||||
|
||||
// ProtoAddr is a custom type used by gogo to serde raw multiaddresses into the ma.Multiaddr type, and back.
|
||||
type ProtoAddr struct {
|
||||
ma.Multiaddr
|
||||
}
|
||||
|
||||
var _ customGogoType = (*ProtoAddr)(nil)
|
||||
|
||||
func (a ProtoAddr) Marshal() ([]byte, error) {
|
||||
return a.Bytes(), nil
|
||||
}
|
||||
|
||||
func (a ProtoAddr) MarshalTo(data []byte) (n int, err error) {
|
||||
return copy(data, a.Bytes()), nil
|
||||
}
|
||||
|
||||
func (a ProtoAddr) MarshalJSON() ([]byte, error) {
|
||||
m, _ := a.Marshal()
|
||||
return json.Marshal(m)
|
||||
}
|
||||
|
||||
func (a *ProtoAddr) Unmarshal(data []byte) (err error) {
|
||||
a.Multiaddr, err = ma.NewMultiaddrBytes(data)
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *ProtoAddr) UnmarshalJSON(data []byte) error {
|
||||
v := new([]byte)
|
||||
err := json.Unmarshal(data, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return a.Unmarshal(*v)
|
||||
}
|
||||
|
||||
func (a ProtoAddr) Size() int {
|
||||
return len(a.Bytes())
|
||||
}
|
||||
|
||||
// NewPopulatedProtoAddr generates a populated instance of the custom gogo type ProtoAddr.
|
||||
// It is required by gogo-generated tests.
|
||||
func NewPopulatedProtoAddr(r randyPstore) *ProtoAddr {
|
||||
a, _ := ma.NewMultiaddr("/ip4/123.123.123.123/tcp/7001")
|
||||
return &ProtoAddr{Multiaddr: a}
|
||||
}
|
||||
|
||||
// NewPopulatedProtoPeerID generates a populated instance of the custom gogo type ProtoPeerID.
|
||||
// It is required by gogo-generated tests.
|
||||
func NewPopulatedProtoPeerID(r randyPstore) *ProtoPeerID {
|
||||
id, _ := pt.RandPeerID()
|
||||
return &ProtoPeerID{ID: id}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,40 @@
|
|||
syntax = "proto3";
|
||||
package pstore.pb;
|
||||
|
||||
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||
|
||||
option (gogoproto.benchgen_all) = true;
|
||||
option (gogoproto.populate_all) = true;
|
||||
|
||||
// AddrBookRecord represents a record for a peer in the address book.
|
||||
message AddrBookRecord {
|
||||
// The peer ID.
|
||||
bytes id = 1 [(gogoproto.customtype) = "ProtoPeerID"];
|
||||
|
||||
// The multiaddresses. This is a sorted list where element 0 expires the soonest.
|
||||
repeated AddrEntry addrs = 2;
|
||||
|
||||
// The most recently received signed PeerRecord.
|
||||
CertifiedRecord certified_record = 3;
|
||||
|
||||
// AddrEntry represents a single multiaddress.
|
||||
message AddrEntry {
|
||||
bytes addr = 1 [(gogoproto.customtype) = "ProtoAddr"];
|
||||
|
||||
// The point in time when this address expires.
|
||||
int64 expiry = 2;
|
||||
|
||||
// The original TTL of this address.
|
||||
int64 ttl = 3;
|
||||
}
|
||||
|
||||
// CertifiedRecord contains a serialized signed PeerRecord used to
|
||||
// populate the signedAddrs list.
|
||||
message CertifiedRecord {
|
||||
// The Seq counter from the signed PeerRecord envelope
|
||||
uint64 seq = 1;
|
||||
|
||||
// The serialized bytes of the SignedEnvelope containing the PeerRecord.
|
||||
bytes raw = 2;
|
||||
}
|
||||
}
|
578
vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/addr_book.go
generated
vendored
Normal file
578
vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/addr_book.go
generated
vendored
Normal file
|
@ -0,0 +1,578 @@
|
|||
package pstoreds
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/record"
|
||||
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/query"
|
||||
logging "github.com/ipfs/go-log"
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
pstore "github.com/libp2p/go-libp2p-core/peerstore"
|
||||
pb "github.com/libp2p/go-libp2p-peerstore/pb"
|
||||
"github.com/libp2p/go-libp2p-peerstore/pstoremem"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
b32 "github.com/multiformats/go-base32"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
type ttlWriteMode int
|
||||
|
||||
const (
|
||||
ttlOverride ttlWriteMode = iota
|
||||
ttlExtend
|
||||
)
|
||||
|
||||
var (
|
||||
log = logging.Logger("peerstore/ds")
|
||||
|
||||
// Peer addresses are stored db key pattern:
|
||||
// /peers/addrs/<b32 peer id no padding>
|
||||
addrBookBase = ds.NewKey("/peers/addrs")
|
||||
)
|
||||
|
||||
// addrsRecord decorates the AddrBookRecord with locks and metadata.
|
||||
type addrsRecord struct {
|
||||
sync.RWMutex
|
||||
*pb.AddrBookRecord
|
||||
dirty bool
|
||||
}
|
||||
|
||||
// flush writes the record to the datastore by calling ds.Put, unless the record is
|
||||
// marked for deletion, in which case we call ds.Delete. To be called within a lock.
|
||||
func (r *addrsRecord) flush(write ds.Write) (err error) {
|
||||
key := addrBookBase.ChildString(b32.RawStdEncoding.EncodeToString([]byte(r.Id.ID)))
|
||||
|
||||
if len(r.Addrs) == 0 {
|
||||
if err = write.Delete(key); err == nil {
|
||||
r.dirty = false
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := r.Marshal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = write.Put(key, data); err != nil {
|
||||
return err
|
||||
}
|
||||
// write succeeded; record is no longer dirty.
|
||||
r.dirty = false
|
||||
return nil
|
||||
}
|
||||
|
||||
// clean is called on records to perform housekeeping. The return value indicates if the record was changed
|
||||
// as a result of this call.
|
||||
//
|
||||
// clean does the following:
|
||||
// * sorts addresses by expiration (soonest expiring first).
|
||||
// * removes expired addresses.
|
||||
//
|
||||
// It short-circuits optimistically when there's nothing to do.
|
||||
//
|
||||
// clean is called from several points:
|
||||
// * when accessing an entry.
|
||||
// * when performing periodic GC.
|
||||
// * after an entry has been modified (e.g. addresses have been added or removed, TTLs updated, etc.)
|
||||
//
|
||||
// If the return value is true, the caller should perform a flush immediately to sync the record with the store.
|
||||
func (r *addrsRecord) clean() (chgd bool) {
|
||||
now := time.Now().Unix()
|
||||
addrsLen := len(r.Addrs)
|
||||
|
||||
if !r.dirty && !r.hasExpiredAddrs(now) {
|
||||
// record is not dirty, and we have no expired entries to purge.
|
||||
return false
|
||||
}
|
||||
|
||||
if addrsLen == 0 {
|
||||
// this is a ghost record; let's signal it has to be written.
|
||||
// flush() will take care of doing the deletion.
|
||||
return true
|
||||
}
|
||||
|
||||
if r.dirty && addrsLen > 1 {
|
||||
sort.Slice(r.Addrs, func(i, j int) bool {
|
||||
return r.Addrs[i].Expiry < r.Addrs[j].Expiry
|
||||
})
|
||||
}
|
||||
|
||||
r.Addrs = removeExpired(r.Addrs, now)
|
||||
|
||||
return r.dirty || len(r.Addrs) != addrsLen
|
||||
}
|
||||
|
||||
func (r *addrsRecord) hasExpiredAddrs(now int64) bool {
|
||||
if len(r.Addrs) > 0 && r.Addrs[0].Expiry <= now {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func removeExpired(entries []*pb.AddrBookRecord_AddrEntry, now int64) []*pb.AddrBookRecord_AddrEntry {
|
||||
// since addresses are sorted by expiration, we find the first
|
||||
// survivor and split the slice on its index.
|
||||
pivot := -1
|
||||
for i, addr := range entries {
|
||||
if addr.Expiry > now {
|
||||
break
|
||||
}
|
||||
pivot = i
|
||||
}
|
||||
|
||||
return entries[pivot+1:]
|
||||
}
|
||||
|
||||
// dsAddrBook is an address book backed by a Datastore with a GC procedure to purge expired entries. It uses an
|
||||
// in-memory address stream manager. See the NewAddrBook for more information.
|
||||
type dsAddrBook struct {
|
||||
ctx context.Context
|
||||
opts Options
|
||||
|
||||
cache cache
|
||||
ds ds.Batching
|
||||
gc *dsAddrBookGc
|
||||
subsManager *pstoremem.AddrSubManager
|
||||
|
||||
// controls children goroutine lifetime.
|
||||
childrenDone sync.WaitGroup
|
||||
cancelFn func()
|
||||
}
|
||||
|
||||
var _ pstore.AddrBook = (*dsAddrBook)(nil)
|
||||
var _ pstore.CertifiedAddrBook = (*dsAddrBook)(nil)
|
||||
|
||||
// NewAddrBook initializes a new datastore-backed address book. It serves as a drop-in replacement for pstoremem
|
||||
// (memory-backed peerstore), and works with any datastore implementing the ds.Batching interface.
|
||||
//
|
||||
// Addresses and peer records are serialized into protobuf, storing one datastore entry per peer, along with metadata
|
||||
// to control address expiration. To alleviate disk access and serde overhead, we internally use a read/write-through
|
||||
// ARC cache, the size of which is adjustable via Options.CacheSize.
|
||||
//
|
||||
// The user has a choice of two GC algorithms:
|
||||
//
|
||||
// - lookahead GC: minimises the amount of full store traversals by maintaining a time-indexed list of entries that
|
||||
// need to be visited within the period specified in Options.GCLookaheadInterval. This is useful in scenarios with
|
||||
// considerable TTL variance, coupled with datastores whose native iterators return entries in lexicographical key
|
||||
// order. Enable this mode by passing a value Options.GCLookaheadInterval > 0. Lookahead windows are jumpy, not
|
||||
// sliding. Purges operate exclusively over the lookahead window with periodicity Options.GCPurgeInterval.
|
||||
//
|
||||
// - full-purge GC (default): performs a full visit of the store with periodicity Options.GCPurgeInterval. Useful when
|
||||
// the range of possible TTL values is small and the values themselves are also extreme, e.g. 10 minutes or
|
||||
// permanent, popular values used in other libp2p modules. In this cited case, optimizing with lookahead windows
|
||||
// makes little sense.
|
||||
func NewAddrBook(ctx context.Context, store ds.Batching, opts Options) (ab *dsAddrBook, err error) {
|
||||
ctx, cancelFn := context.WithCancel(ctx)
|
||||
ab = &dsAddrBook{
|
||||
ctx: ctx,
|
||||
ds: store,
|
||||
opts: opts,
|
||||
cancelFn: cancelFn,
|
||||
subsManager: pstoremem.NewAddrSubManager(),
|
||||
}
|
||||
|
||||
if opts.CacheSize > 0 {
|
||||
if ab.cache, err = lru.NewARC(int(opts.CacheSize)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
ab.cache = new(noopCache)
|
||||
}
|
||||
|
||||
if ab.gc, err = newAddressBookGc(ctx, ab); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ab, nil
|
||||
}
|
||||
|
||||
func (ab *dsAddrBook) Close() error {
|
||||
ab.cancelFn()
|
||||
ab.childrenDone.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadRecord is a read-through fetch. It fetches a record from cache, falling back to the
|
||||
// datastore upon a miss, and returning a newly initialized record if the peer doesn't exist.
|
||||
//
|
||||
// loadRecord calls clean() on an existing record before returning it. If the record changes
|
||||
// as a result and the update argument is true, the resulting state is saved in the datastore.
|
||||
//
|
||||
// If the cache argument is true, the record is inserted in the cache when loaded from the datastore.
|
||||
func (ab *dsAddrBook) loadRecord(id peer.ID, cache bool, update bool) (pr *addrsRecord, err error) {
|
||||
if err := id.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if e, ok := ab.cache.Get(id); ok {
|
||||
pr = e.(*addrsRecord)
|
||||
pr.Lock()
|
||||
defer pr.Unlock()
|
||||
|
||||
if pr.clean() && update {
|
||||
err = pr.flush(ab.ds)
|
||||
}
|
||||
return pr, err
|
||||
}
|
||||
|
||||
pr = &addrsRecord{AddrBookRecord: &pb.AddrBookRecord{}}
|
||||
key := addrBookBase.ChildString(b32.RawStdEncoding.EncodeToString([]byte(id)))
|
||||
data, err := ab.ds.Get(key)
|
||||
|
||||
switch err {
|
||||
case ds.ErrNotFound:
|
||||
err = nil
|
||||
pr.Id = &pb.ProtoPeerID{ID: id}
|
||||
case nil:
|
||||
if err = pr.Unmarshal(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// this record is new and local for now (not in cache), so we don't need to lock.
|
||||
if pr.clean() && update {
|
||||
err = pr.flush(ab.ds)
|
||||
}
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cache {
|
||||
ab.cache.Add(id, pr)
|
||||
}
|
||||
return pr, err
|
||||
}
|
||||
|
||||
// AddAddr will add a new address if it's not already in the AddrBook.
|
||||
func (ab *dsAddrBook) AddAddr(p peer.ID, addr ma.Multiaddr, ttl time.Duration) {
|
||||
ab.AddAddrs(p, []ma.Multiaddr{addr}, ttl)
|
||||
}
|
||||
|
||||
// AddAddrs will add many new addresses if they're not already in the AddrBook.
|
||||
func (ab *dsAddrBook) AddAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {
|
||||
if ttl <= 0 {
|
||||
return
|
||||
}
|
||||
addrs = cleanAddrs(addrs)
|
||||
ab.setAddrs(p, addrs, ttl, ttlExtend, false)
|
||||
}
|
||||
|
||||
// ConsumePeerRecord adds addresses from a signed peer.PeerRecord (contained in
|
||||
// a record.Envelope), which will expire after the given TTL.
|
||||
// See https://godoc.org/github.com/libp2p/go-libp2p-core/peerstore#CertifiedAddrBook for more details.
|
||||
func (ab *dsAddrBook) ConsumePeerRecord(recordEnvelope *record.Envelope, ttl time.Duration) (bool, error) {
|
||||
r, err := recordEnvelope.Record()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
rec, ok := r.(*peer.PeerRecord)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("envelope did not contain PeerRecord")
|
||||
}
|
||||
if !rec.PeerID.MatchesPublicKey(recordEnvelope.PublicKey) {
|
||||
return false, fmt.Errorf("signing key does not match PeerID in PeerRecord")
|
||||
}
|
||||
|
||||
// ensure that the seq number from envelope is >= any previously received seq no
|
||||
// update when equal to extend the ttls
|
||||
if ab.latestPeerRecordSeq(rec.PeerID) > rec.Seq {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
addrs := cleanAddrs(rec.Addrs)
|
||||
err = ab.setAddrs(rec.PeerID, addrs, ttl, ttlExtend, true)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
err = ab.storeSignedPeerRecord(rec.PeerID, recordEnvelope, rec)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (ab *dsAddrBook) latestPeerRecordSeq(p peer.ID) uint64 {
|
||||
pr, err := ab.loadRecord(p, true, false)
|
||||
if err != nil || len(pr.Addrs) == 0 || pr.CertifiedRecord == nil || len(pr.CertifiedRecord.Raw) == 0 {
|
||||
return 0
|
||||
}
|
||||
return pr.CertifiedRecord.Seq
|
||||
}
|
||||
|
||||
func (ab *dsAddrBook) storeSignedPeerRecord(p peer.ID, envelope *record.Envelope, rec *peer.PeerRecord) error {
|
||||
envelopeBytes, err := envelope.Marshal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// reload record and add routing state
|
||||
// this has to be done after we add the addresses, since if
|
||||
// we try to flush a datastore record with no addresses,
|
||||
// it will just get deleted
|
||||
pr, err := ab.loadRecord(p, true, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pr.Lock()
|
||||
defer pr.Unlock()
|
||||
pr.CertifiedRecord = &pb.AddrBookRecord_CertifiedRecord{
|
||||
Seq: rec.Seq,
|
||||
Raw: envelopeBytes,
|
||||
}
|
||||
pr.dirty = true
|
||||
err = pr.flush(ab.ds)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetPeerRecord returns a record.Envelope containing a peer.PeerRecord for the
|
||||
// given peer id, if one exists.
|
||||
// Returns nil if no signed PeerRecord exists for the peer.
|
||||
func (ab *dsAddrBook) GetPeerRecord(p peer.ID) *record.Envelope {
|
||||
pr, err := ab.loadRecord(p, true, false)
|
||||
if err != nil {
|
||||
log.Errorf("unable to load record for peer %s: %v", p.Pretty(), err)
|
||||
return nil
|
||||
}
|
||||
pr.RLock()
|
||||
defer pr.RUnlock()
|
||||
if pr.CertifiedRecord == nil || len(pr.CertifiedRecord.Raw) == 0 || len(pr.Addrs) == 0 {
|
||||
return nil
|
||||
}
|
||||
state, _, err := record.ConsumeEnvelope(pr.CertifiedRecord.Raw, peer.PeerRecordEnvelopeDomain)
|
||||
if err != nil {
|
||||
log.Errorf("error unmarshaling stored signed peer record for peer %s: %v", p.Pretty(), err)
|
||||
return nil
|
||||
}
|
||||
return state
|
||||
}
|
||||
|
||||
// SetAddr will add or update the TTL of an address in the AddrBook.
|
||||
func (ab *dsAddrBook) SetAddr(p peer.ID, addr ma.Multiaddr, ttl time.Duration) {
|
||||
ab.SetAddrs(p, []ma.Multiaddr{addr}, ttl)
|
||||
}
|
||||
|
||||
// SetAddrs will add or update the TTLs of addresses in the AddrBook.
|
||||
func (ab *dsAddrBook) SetAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {
|
||||
addrs = cleanAddrs(addrs)
|
||||
if ttl <= 0 {
|
||||
ab.deleteAddrs(p, addrs)
|
||||
return
|
||||
}
|
||||
ab.setAddrs(p, addrs, ttl, ttlOverride, false)
|
||||
}
|
||||
|
||||
// UpdateAddrs will update any addresses for a given peer and TTL combination to
|
||||
// have a new TTL.
|
||||
func (ab *dsAddrBook) UpdateAddrs(p peer.ID, oldTTL time.Duration, newTTL time.Duration) {
|
||||
pr, err := ab.loadRecord(p, true, false)
|
||||
if err != nil {
|
||||
log.Errorf("failed to update ttls for peer %s: %s\n", p.Pretty(), err)
|
||||
return
|
||||
}
|
||||
|
||||
pr.Lock()
|
||||
defer pr.Unlock()
|
||||
|
||||
newExp := time.Now().Add(newTTL).Unix()
|
||||
for _, entry := range pr.Addrs {
|
||||
if entry.Ttl != int64(oldTTL) {
|
||||
continue
|
||||
}
|
||||
entry.Ttl, entry.Expiry = int64(newTTL), newExp
|
||||
pr.dirty = true
|
||||
}
|
||||
|
||||
if pr.clean() {
|
||||
pr.flush(ab.ds)
|
||||
}
|
||||
}
|
||||
|
||||
// Addrs returns all of the non-expired addresses for a given peer.
|
||||
func (ab *dsAddrBook) Addrs(p peer.ID) []ma.Multiaddr {
|
||||
pr, err := ab.loadRecord(p, true, true)
|
||||
if err != nil {
|
||||
log.Warn("failed to load peerstore entry for peer %v while querying addrs, err: %v", p, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
pr.RLock()
|
||||
defer pr.RUnlock()
|
||||
|
||||
addrs := make([]ma.Multiaddr, len(pr.Addrs))
|
||||
for i, a := range pr.Addrs {
|
||||
addrs[i] = a.Addr
|
||||
}
|
||||
return addrs
|
||||
}
|
||||
|
||||
// Peers returns all of the peer IDs for which the AddrBook has addresses.
|
||||
func (ab *dsAddrBook) PeersWithAddrs() peer.IDSlice {
|
||||
ids, err := uniquePeerIds(ab.ds, addrBookBase, func(result query.Result) string {
|
||||
return ds.RawKey(result.Key).Name()
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("error while retrieving peers with addresses: %v", err)
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// AddrStream returns a channel on which all new addresses discovered for a
|
||||
// given peer ID will be published.
|
||||
func (ab *dsAddrBook) AddrStream(ctx context.Context, p peer.ID) <-chan ma.Multiaddr {
|
||||
initial := ab.Addrs(p)
|
||||
return ab.subsManager.AddrStream(ctx, p, initial)
|
||||
}
|
||||
|
||||
// ClearAddrs will delete all known addresses for a peer ID.
|
||||
func (ab *dsAddrBook) ClearAddrs(p peer.ID) {
|
||||
if err := p.Validate(); err != nil {
|
||||
// nothing to do
|
||||
return
|
||||
}
|
||||
|
||||
ab.cache.Remove(p)
|
||||
|
||||
key := addrBookBase.ChildString(b32.RawStdEncoding.EncodeToString([]byte(p)))
|
||||
if err := ab.ds.Delete(key); err != nil {
|
||||
log.Errorf("failed to clear addresses for peer %s: %v", p.Pretty(), err)
|
||||
}
|
||||
}
|
||||
|
||||
func (ab *dsAddrBook) setAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration, mode ttlWriteMode, signed bool) (err error) {
|
||||
pr, err := ab.loadRecord(p, true, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load peerstore entry for peer %v while setting addrs, err: %v", p, err)
|
||||
}
|
||||
|
||||
pr.Lock()
|
||||
defer pr.Unlock()
|
||||
|
||||
// // if we have a signed PeerRecord, ignore attempts to add unsigned addrs
|
||||
// if !signed && pr.CertifiedRecord != nil {
|
||||
// return nil
|
||||
// }
|
||||
|
||||
newExp := time.Now().Add(ttl).Unix()
|
||||
// TODO this is very inefficient O(m*n); we could build a map to use as an
|
||||
// index, and test against it. That would turn it into O(m+n). This code
|
||||
// will be refactored entirely anyway, and it's not being used by users
|
||||
// (that we know of); so OK to keep it for now.
|
||||
updateExisting := func(entryList []*pb.AddrBookRecord_AddrEntry, incoming ma.Multiaddr) *pb.AddrBookRecord_AddrEntry {
|
||||
for _, have := range entryList {
|
||||
if incoming.Equal(have.Addr) {
|
||||
switch mode {
|
||||
case ttlOverride:
|
||||
have.Ttl = int64(ttl)
|
||||
have.Expiry = newExp
|
||||
case ttlExtend:
|
||||
if int64(ttl) > have.Ttl {
|
||||
have.Ttl = int64(ttl)
|
||||
}
|
||||
if newExp > have.Expiry {
|
||||
have.Expiry = newExp
|
||||
}
|
||||
default:
|
||||
panic("BUG: unimplemented ttl mode")
|
||||
}
|
||||
return have
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var entries []*pb.AddrBookRecord_AddrEntry
|
||||
for _, incoming := range addrs {
|
||||
existingEntry := updateExisting(pr.Addrs, incoming)
|
||||
|
||||
if existingEntry == nil {
|
||||
// if signed {
|
||||
// entries = append(entries, existingEntry)
|
||||
// }
|
||||
// } else {
|
||||
// new addr, add & broadcast
|
||||
entry := &pb.AddrBookRecord_AddrEntry{
|
||||
Addr: &pb.ProtoAddr{Multiaddr: incoming},
|
||||
Ttl: int64(ttl),
|
||||
Expiry: newExp,
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
|
||||
// note: there's a minor chance that writing the record will fail, in which case we would've broadcast
|
||||
// the addresses without persisting them. This is very unlikely and not much of an issue.
|
||||
ab.subsManager.BroadcastAddr(p, incoming)
|
||||
}
|
||||
}
|
||||
|
||||
// if signed {
|
||||
// // when adding signed addrs, we want to keep _only_ the incoming addrs
|
||||
// pr.Addrs = entries
|
||||
// } else {
|
||||
pr.Addrs = append(pr.Addrs, entries...)
|
||||
// }
|
||||
|
||||
pr.dirty = true
|
||||
pr.clean()
|
||||
return pr.flush(ab.ds)
|
||||
}
|
||||
|
||||
// deletes addresses in place, avoiding copies until we encounter the first deletion.
|
||||
// does not preserve order, but entries are re-sorted before flushing to disk anyway.
|
||||
func deleteInPlace(s []*pb.AddrBookRecord_AddrEntry, addrs []ma.Multiaddr) []*pb.AddrBookRecord_AddrEntry {
|
||||
if s == nil || len(addrs) == 0 {
|
||||
return s
|
||||
}
|
||||
survived := len(s)
|
||||
Outer:
|
||||
for i, addr := range s {
|
||||
for _, del := range addrs {
|
||||
if !addr.Addr.Equal(del) {
|
||||
continue
|
||||
}
|
||||
survived--
|
||||
// if there are no survivors, bail out
|
||||
if survived == 0 {
|
||||
break Outer
|
||||
}
|
||||
s[i] = s[survived]
|
||||
// we've already dealt with s[i], move to the next
|
||||
continue Outer
|
||||
}
|
||||
}
|
||||
return s[:survived]
|
||||
}
|
||||
|
||||
func (ab *dsAddrBook) deleteAddrs(p peer.ID, addrs []ma.Multiaddr) (err error) {
|
||||
pr, err := ab.loadRecord(p, false, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load peerstore entry for peer %v while deleting addrs, err: %v", p, err)
|
||||
}
|
||||
|
||||
if pr.Addrs == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
pr.Lock()
|
||||
defer pr.Unlock()
|
||||
|
||||
pr.Addrs = deleteInPlace(pr.Addrs, addrs)
|
||||
|
||||
pr.dirty = true
|
||||
pr.clean()
|
||||
return pr.flush(ab.ds)
|
||||
}
|
||||
|
||||
func cleanAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
clean := make([]ma.Multiaddr, 0, len(addrs))
|
||||
for _, addr := range addrs {
|
||||
if addr == nil {
|
||||
continue
|
||||
}
|
||||
clean = append(clean, addr)
|
||||
}
|
||||
return clean
|
||||
}
|
388
vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/addr_book_gc.go
generated
vendored
Normal file
388
vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/addr_book_gc.go
generated
vendored
Normal file
|
@ -0,0 +1,388 @@
|
|||
package pstoreds
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
query "github.com/ipfs/go-datastore/query"
|
||||
|
||||
peer "github.com/libp2p/go-libp2p-core/peer"
|
||||
pb "github.com/libp2p/go-libp2p-peerstore/pb"
|
||||
|
||||
b32 "github.com/multiformats/go-base32"
|
||||
)
|
||||
|
||||
var (
|
||||
// GC lookahead entries are stored in key pattern:
|
||||
// /peers/gc/addrs/<unix timestamp of next visit>/<peer ID b32> => nil
|
||||
// in databases with lexicographical key order, this time-indexing allows us to visit
|
||||
// only the timeslice we are interested in.
|
||||
gcLookaheadBase = ds.NewKey("/peers/gc/addrs")
|
||||
|
||||
// queries
|
||||
purgeLookaheadQuery = query.Query{
|
||||
Prefix: gcLookaheadBase.String(),
|
||||
Orders: []query.Order{query.OrderByKey{}},
|
||||
KeysOnly: true,
|
||||
}
|
||||
|
||||
purgeStoreQuery = query.Query{
|
||||
Prefix: addrBookBase.String(),
|
||||
Orders: []query.Order{query.OrderByKey{}},
|
||||
KeysOnly: false,
|
||||
}
|
||||
|
||||
populateLookaheadQuery = query.Query{
|
||||
Prefix: addrBookBase.String(),
|
||||
Orders: []query.Order{query.OrderByKey{}},
|
||||
KeysOnly: true,
|
||||
}
|
||||
)
|
||||
|
||||
// dsAddrBookGc is responsible for garbage collection in a datastore-backed address book.
|
||||
type dsAddrBookGc struct {
|
||||
ctx context.Context
|
||||
ab *dsAddrBook
|
||||
running chan struct{}
|
||||
lookaheadEnabled bool
|
||||
purgeFunc func()
|
||||
currWindowEnd int64
|
||||
}
|
||||
|
||||
func newAddressBookGc(ctx context.Context, ab *dsAddrBook) (*dsAddrBookGc, error) {
|
||||
if ab.opts.GCPurgeInterval < 0 {
|
||||
return nil, fmt.Errorf("negative GC purge interval provided: %s", ab.opts.GCPurgeInterval)
|
||||
}
|
||||
if ab.opts.GCLookaheadInterval < 0 {
|
||||
return nil, fmt.Errorf("negative GC lookahead interval provided: %s", ab.opts.GCLookaheadInterval)
|
||||
}
|
||||
if ab.opts.GCInitialDelay < 0 {
|
||||
return nil, fmt.Errorf("negative GC initial delay provided: %s", ab.opts.GCInitialDelay)
|
||||
}
|
||||
if ab.opts.GCLookaheadInterval > 0 && ab.opts.GCLookaheadInterval < ab.opts.GCPurgeInterval {
|
||||
return nil, fmt.Errorf("lookahead interval must be larger than purge interval, respectively: %s, %s",
|
||||
ab.opts.GCLookaheadInterval, ab.opts.GCPurgeInterval)
|
||||
}
|
||||
|
||||
lookaheadEnabled := ab.opts.GCLookaheadInterval > 0
|
||||
gc := &dsAddrBookGc{
|
||||
ctx: ctx,
|
||||
ab: ab,
|
||||
running: make(chan struct{}, 1),
|
||||
lookaheadEnabled: lookaheadEnabled,
|
||||
}
|
||||
|
||||
if lookaheadEnabled {
|
||||
gc.purgeFunc = gc.purgeLookahead
|
||||
} else {
|
||||
gc.purgeFunc = gc.purgeStore
|
||||
}
|
||||
|
||||
// do not start GC timers if purge is disabled; this GC can only be triggered manually.
|
||||
if ab.opts.GCPurgeInterval > 0 {
|
||||
gc.ab.childrenDone.Add(1)
|
||||
go gc.background()
|
||||
}
|
||||
|
||||
return gc, nil
|
||||
}
|
||||
|
||||
// gc prunes expired addresses from the datastore at regular intervals. It should be spawned as a goroutine.
|
||||
func (gc *dsAddrBookGc) background() {
|
||||
defer gc.ab.childrenDone.Done()
|
||||
|
||||
select {
|
||||
case <-time.After(gc.ab.opts.GCInitialDelay):
|
||||
case <-gc.ab.ctx.Done():
|
||||
// yield if we have been cancelled/closed before the delay elapses.
|
||||
return
|
||||
}
|
||||
|
||||
purgeTimer := time.NewTicker(gc.ab.opts.GCPurgeInterval)
|
||||
defer purgeTimer.Stop()
|
||||
|
||||
var lookaheadCh <-chan time.Time
|
||||
if gc.lookaheadEnabled {
|
||||
lookaheadTimer := time.NewTicker(gc.ab.opts.GCLookaheadInterval)
|
||||
lookaheadCh = lookaheadTimer.C
|
||||
gc.populateLookahead() // do a lookahead now
|
||||
defer lookaheadTimer.Stop()
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-purgeTimer.C:
|
||||
gc.purgeFunc()
|
||||
|
||||
case <-lookaheadCh:
|
||||
// will never trigger if lookahead is disabled (nil Duration).
|
||||
gc.populateLookahead()
|
||||
|
||||
case <-gc.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// purgeCycle runs a single GC purge cycle. It operates within the lookahead window if lookahead is enabled; else it
|
||||
// visits all entries in the datastore, deleting the addresses that have expired.
|
||||
func (gc *dsAddrBookGc) purgeLookahead() {
|
||||
select {
|
||||
case gc.running <- struct{}{}:
|
||||
defer func() { <-gc.running }()
|
||||
default:
|
||||
// yield if lookahead is running.
|
||||
return
|
||||
}
|
||||
|
||||
var id peer.ID
|
||||
record := &addrsRecord{AddrBookRecord: &pb.AddrBookRecord{}} // empty record to reuse and avoid allocs.
|
||||
batch, err := newCyclicBatch(gc.ab.ds, defaultOpsPerCyclicBatch)
|
||||
if err != nil {
|
||||
log.Warnf("failed while creating batch to purge GC entries: %v", err)
|
||||
}
|
||||
|
||||
// This function drops an unparseable GC entry; this is for safety. It is an escape hatch in case
|
||||
// we modify the format of keys going forward. If a user runs a new version against an old DB,
|
||||
// if we don't clean up unparseable entries we'll end up accumulating garbage.
|
||||
dropInError := func(key ds.Key, err error, msg string) {
|
||||
if err != nil {
|
||||
log.Warnf("failed while %s record with GC key: %v, err: %v; deleting", msg, key, err)
|
||||
}
|
||||
if err = batch.Delete(key); err != nil {
|
||||
log.Warnf("failed to delete corrupt GC lookahead entry: %v, err: %v", key, err)
|
||||
}
|
||||
}
|
||||
|
||||
// This function drops a GC key if the entry is cleaned correctly. It may reschedule another visit
|
||||
// if the next earliest expiry falls within the current window again.
|
||||
dropOrReschedule := func(key ds.Key, ar *addrsRecord) {
|
||||
if err := batch.Delete(key); err != nil {
|
||||
log.Warnf("failed to delete lookahead entry: %v, err: %v", key, err)
|
||||
}
|
||||
|
||||
// re-add the record if it needs to be visited again in this window.
|
||||
if len(ar.Addrs) != 0 && ar.Addrs[0].Expiry <= gc.currWindowEnd {
|
||||
gcKey := gcLookaheadBase.ChildString(fmt.Sprintf("%d/%s", ar.Addrs[0].Expiry, key.Name()))
|
||||
if err := batch.Put(gcKey, []byte{}); err != nil {
|
||||
log.Warnf("failed to add new GC key: %v, err: %v", gcKey, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
results, err := gc.ab.ds.Query(purgeLookaheadQuery)
|
||||
if err != nil {
|
||||
log.Warnf("failed while fetching entries to purge: %v", err)
|
||||
return
|
||||
}
|
||||
defer results.Close()
|
||||
|
||||
now := time.Now().Unix()
|
||||
|
||||
// keys: /peers/gc/addrs/<unix timestamp of next visit>/<peer ID b32>
|
||||
// values: nil
|
||||
for result := range results.Next() {
|
||||
gcKey := ds.RawKey(result.Key)
|
||||
ts, err := strconv.ParseInt(gcKey.Parent().Name(), 10, 64)
|
||||
if err != nil {
|
||||
dropInError(gcKey, err, "parsing timestamp")
|
||||
log.Warnf("failed while parsing timestamp from key: %v, err: %v", result.Key, err)
|
||||
continue
|
||||
} else if ts > now {
|
||||
// this is an ordered cursor; when we hit an entry with a timestamp beyond now, we can break.
|
||||
break
|
||||
}
|
||||
|
||||
idb32, err := b32.RawStdEncoding.DecodeString(gcKey.Name())
|
||||
if err != nil {
|
||||
dropInError(gcKey, err, "parsing peer ID")
|
||||
log.Warnf("failed while parsing b32 peer ID from key: %v, err: %v", result.Key, err)
|
||||
continue
|
||||
}
|
||||
|
||||
id, err = peer.IDFromBytes(idb32)
|
||||
if err != nil {
|
||||
dropInError(gcKey, err, "decoding peer ID")
|
||||
log.Warnf("failed while decoding peer ID from key: %v, err: %v", result.Key, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// if the record is in cache, we clean it and flush it if necessary.
|
||||
if e, ok := gc.ab.cache.Peek(id); ok {
|
||||
cached := e.(*addrsRecord)
|
||||
cached.Lock()
|
||||
if cached.clean() {
|
||||
if err = cached.flush(batch); err != nil {
|
||||
log.Warnf("failed to flush entry modified by GC for peer: &v, err: %v", id.Pretty(), err)
|
||||
}
|
||||
}
|
||||
dropOrReschedule(gcKey, cached)
|
||||
cached.Unlock()
|
||||
continue
|
||||
}
|
||||
|
||||
record.Reset()
|
||||
|
||||
// otherwise, fetch it from the store, clean it and flush it.
|
||||
entryKey := addrBookBase.ChildString(gcKey.Name())
|
||||
val, err := gc.ab.ds.Get(entryKey)
|
||||
if err != nil {
|
||||
// captures all errors, including ErrNotFound.
|
||||
dropInError(gcKey, err, "fetching entry")
|
||||
continue
|
||||
}
|
||||
err = record.Unmarshal(val)
|
||||
if err != nil {
|
||||
dropInError(gcKey, err, "unmarshalling entry")
|
||||
continue
|
||||
}
|
||||
if record.clean() {
|
||||
err = record.flush(batch)
|
||||
if err != nil {
|
||||
log.Warnf("failed to flush entry modified by GC for peer: &v, err: %v", id.Pretty(), err)
|
||||
}
|
||||
}
|
||||
dropOrReschedule(gcKey, record)
|
||||
}
|
||||
|
||||
if err = batch.Commit(); err != nil {
|
||||
log.Warnf("failed to commit GC purge batch: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (gc *dsAddrBookGc) purgeStore() {
|
||||
select {
|
||||
case gc.running <- struct{}{}:
|
||||
defer func() { <-gc.running }()
|
||||
default:
|
||||
// yield if lookahead is running.
|
||||
return
|
||||
}
|
||||
|
||||
record := &addrsRecord{AddrBookRecord: &pb.AddrBookRecord{}} // empty record to reuse and avoid allocs.
|
||||
batch, err := newCyclicBatch(gc.ab.ds, defaultOpsPerCyclicBatch)
|
||||
if err != nil {
|
||||
log.Warnf("failed while creating batch to purge GC entries: %v", err)
|
||||
}
|
||||
|
||||
results, err := gc.ab.ds.Query(purgeStoreQuery)
|
||||
if err != nil {
|
||||
log.Warnf("failed while opening iterator: %v", err)
|
||||
return
|
||||
}
|
||||
defer results.Close()
|
||||
|
||||
// keys: /peers/addrs/<peer ID b32>
|
||||
for result := range results.Next() {
|
||||
record.Reset()
|
||||
if err = record.Unmarshal(result.Value); err != nil {
|
||||
// TODO log
|
||||
continue
|
||||
}
|
||||
|
||||
id := record.Id.ID
|
||||
if !record.clean() {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := record.flush(batch); err != nil {
|
||||
log.Warnf("failed to flush entry modified by GC for peer: &v, err: %v", id, err)
|
||||
}
|
||||
gc.ab.cache.Remove(id)
|
||||
}
|
||||
|
||||
if err = batch.Commit(); err != nil {
|
||||
log.Warnf("failed to commit GC purge batch: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// populateLookahead populates the lookahead window by scanning the entire store and picking entries whose earliest
|
||||
// expiration falls within the window period.
|
||||
//
|
||||
// Those entries are stored in the lookahead region in the store, indexed by the timestamp when they need to be
|
||||
// visited, to facilitate temporal range scans.
|
||||
func (gc *dsAddrBookGc) populateLookahead() {
|
||||
if gc.ab.opts.GCLookaheadInterval == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case gc.running <- struct{}{}:
|
||||
defer func() { <-gc.running }()
|
||||
default:
|
||||
// yield if something's running.
|
||||
return
|
||||
}
|
||||
|
||||
until := time.Now().Add(gc.ab.opts.GCLookaheadInterval).Unix()
|
||||
|
||||
var id peer.ID
|
||||
record := &addrsRecord{AddrBookRecord: &pb.AddrBookRecord{}}
|
||||
results, err := gc.ab.ds.Query(populateLookaheadQuery)
|
||||
if err != nil {
|
||||
log.Warnf("failed while querying to populate lookahead GC window: %v", err)
|
||||
return
|
||||
}
|
||||
defer results.Close()
|
||||
|
||||
batch, err := newCyclicBatch(gc.ab.ds, defaultOpsPerCyclicBatch)
|
||||
if err != nil {
|
||||
log.Warnf("failed while creating batch to populate lookahead GC window: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
for result := range results.Next() {
|
||||
idb32 := ds.RawKey(result.Key).Name()
|
||||
k, err := b32.RawStdEncoding.DecodeString(idb32)
|
||||
if err != nil {
|
||||
log.Warnf("failed while decoding peer ID from key: %v, err: %v", result.Key, err)
|
||||
continue
|
||||
}
|
||||
if id, err = peer.IDFromBytes(k); err != nil {
|
||||
log.Warnf("failed while decoding peer ID from key: %v, err: %v", result.Key, err)
|
||||
}
|
||||
|
||||
// if the record is in cache, use the cached version.
|
||||
if e, ok := gc.ab.cache.Peek(id); ok {
|
||||
cached := e.(*addrsRecord)
|
||||
cached.RLock()
|
||||
if len(cached.Addrs) == 0 || cached.Addrs[0].Expiry > until {
|
||||
cached.RUnlock()
|
||||
continue
|
||||
}
|
||||
gcKey := gcLookaheadBase.ChildString(fmt.Sprintf("%d/%s", cached.Addrs[0].Expiry, idb32))
|
||||
if err = batch.Put(gcKey, []byte{}); err != nil {
|
||||
log.Warnf("failed while inserting GC entry for peer: %v, err: %v", id.Pretty(), err)
|
||||
}
|
||||
cached.RUnlock()
|
||||
continue
|
||||
}
|
||||
|
||||
record.Reset()
|
||||
|
||||
val, err := gc.ab.ds.Get(ds.RawKey(result.Key))
|
||||
if err != nil {
|
||||
log.Warnf("failed which getting record from store for peer: %v, err: %v", id.Pretty(), err)
|
||||
continue
|
||||
}
|
||||
if err := record.Unmarshal(val); err != nil {
|
||||
log.Warnf("failed while unmarshalling record from store for peer: %v, err: %v", id.Pretty(), err)
|
||||
continue
|
||||
}
|
||||
if len(record.Addrs) > 0 && record.Addrs[0].Expiry <= until {
|
||||
gcKey := gcLookaheadBase.ChildString(fmt.Sprintf("%d/%s", record.Addrs[0].Expiry, idb32))
|
||||
if err = batch.Put(gcKey, []byte{}); err != nil {
|
||||
log.Warnf("failed while inserting GC entry for peer: %v, err: %v", id.Pretty(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err = batch.Commit(); err != nil {
|
||||
log.Warnf("failed to commit GC lookahead batch: %v", err)
|
||||
}
|
||||
|
||||
gc.currWindowEnd = until
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
package pstoreds
|
||||
|
||||
// cache abstracts all methods we access from ARCCache, to enable alternate
|
||||
// implementations such as a no-op one.
|
||||
type cache interface {
|
||||
Get(key interface{}) (value interface{}, ok bool)
|
||||
Add(key, value interface{})
|
||||
Remove(key interface{})
|
||||
Contains(key interface{}) bool
|
||||
Peek(key interface{}) (value interface{}, ok bool)
|
||||
Keys() []interface{}
|
||||
}
|
||||
|
||||
// noopCache is a dummy implementation that's used when the cache is disabled.
|
||||
type noopCache struct {
|
||||
}
|
||||
|
||||
var _ cache = (*noopCache)(nil)
|
||||
|
||||
func (*noopCache) Get(key interface{}) (value interface{}, ok bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (*noopCache) Add(key, value interface{}) {
|
||||
}
|
||||
|
||||
func (*noopCache) Remove(key interface{}) {
|
||||
}
|
||||
|
||||
func (*noopCache) Contains(key interface{}) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (*noopCache) Peek(key interface{}) (value interface{}, ok bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (*noopCache) Keys() (keys []interface{}) {
|
||||
return keys
|
||||
}
|
76
vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/cyclic_batch.go
generated
vendored
Normal file
76
vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/cyclic_batch.go
generated
vendored
Normal file
|
@ -0,0 +1,76 @@
|
|||
package pstoreds
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
)
|
||||
|
||||
// how many operations are queued in a cyclic batch before we flush it.
|
||||
var defaultOpsPerCyclicBatch = 20
|
||||
|
||||
// cyclicBatch buffers ds write operations and automatically flushes them after defaultOpsPerCyclicBatch (20) have been
|
||||
// queued. An explicit `Commit()` closes this cyclic batch, erroring all further operations.
|
||||
//
|
||||
// It is similar to go-ds autobatch, but it's driven by an actual Batch facility offered by the
|
||||
// ds.
|
||||
type cyclicBatch struct {
|
||||
threshold int
|
||||
ds.Batch
|
||||
ds ds.Batching
|
||||
pending int
|
||||
}
|
||||
|
||||
func newCyclicBatch(ds ds.Batching, threshold int) (ds.Batch, error) {
|
||||
batch, err := ds.Batch()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cyclicBatch{Batch: batch, ds: ds}, nil
|
||||
}
|
||||
|
||||
func (cb *cyclicBatch) cycle() (err error) {
|
||||
if cb.Batch == nil {
|
||||
return errors.New("cyclic batch is closed")
|
||||
}
|
||||
if cb.pending < cb.threshold {
|
||||
// we haven't reached the threshold yet.
|
||||
return nil
|
||||
}
|
||||
// commit and renew the batch.
|
||||
if err = cb.Batch.Commit(); err != nil {
|
||||
return errors.Wrap(err, "failed while committing cyclic batch")
|
||||
}
|
||||
if cb.Batch, err = cb.ds.Batch(); err != nil {
|
||||
return errors.Wrap(err, "failed while renewing cyclic batch")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cb *cyclicBatch) Put(key ds.Key, val []byte) error {
|
||||
if err := cb.cycle(); err != nil {
|
||||
return err
|
||||
}
|
||||
cb.pending++
|
||||
return cb.Batch.Put(key, val)
|
||||
}
|
||||
|
||||
func (cb *cyclicBatch) Delete(key ds.Key) error {
|
||||
if err := cb.cycle(); err != nil {
|
||||
return err
|
||||
}
|
||||
cb.pending++
|
||||
return cb.Batch.Delete(key)
|
||||
}
|
||||
|
||||
func (cb *cyclicBatch) Commit() error {
|
||||
if cb.Batch == nil {
|
||||
return errors.New("cyclic batch is closed")
|
||||
}
|
||||
if err := cb.Batch.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
cb.pending = 0
|
||||
cb.Batch = nil
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,134 @@
|
|||
package pstoreds
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
base32 "github.com/multiformats/go-base32"
|
||||
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
query "github.com/ipfs/go-datastore/query"
|
||||
|
||||
ic "github.com/libp2p/go-libp2p-core/crypto"
|
||||
peer "github.com/libp2p/go-libp2p-core/peer"
|
||||
pstore "github.com/libp2p/go-libp2p-core/peerstore"
|
||||
)
|
||||
|
||||
// Public and private keys are stored under the following db key pattern:
|
||||
// /peers/keys/<b32 peer id no padding>/{pub, priv}
|
||||
var (
|
||||
kbBase = ds.NewKey("/peers/keys")
|
||||
pubSuffix = ds.NewKey("/pub")
|
||||
privSuffix = ds.NewKey("/priv")
|
||||
)
|
||||
|
||||
type dsKeyBook struct {
|
||||
ds ds.Datastore
|
||||
}
|
||||
|
||||
var _ pstore.KeyBook = (*dsKeyBook)(nil)
|
||||
|
||||
func NewKeyBook(_ context.Context, store ds.Datastore, _ Options) (*dsKeyBook, error) {
|
||||
return &dsKeyBook{store}, nil
|
||||
}
|
||||
|
||||
func (kb *dsKeyBook) PubKey(p peer.ID) ic.PubKey {
|
||||
key := kbBase.ChildString(base32.RawStdEncoding.EncodeToString([]byte(p))).Child(pubSuffix)
|
||||
|
||||
var pk ic.PubKey
|
||||
if value, err := kb.ds.Get(key); err == nil {
|
||||
pk, err = ic.UnmarshalPublicKey(value)
|
||||
if err != nil {
|
||||
log.Errorf("error when unmarshalling pubkey from datastore for peer %s: %s\n", p.Pretty(), err)
|
||||
}
|
||||
} else if err == ds.ErrNotFound {
|
||||
pk, err = p.ExtractPublicKey()
|
||||
switch err {
|
||||
case nil:
|
||||
case peer.ErrNoPublicKey:
|
||||
return nil
|
||||
default:
|
||||
log.Errorf("error when extracting pubkey from peer ID for peer %s: %s\n", p.Pretty(), err)
|
||||
return nil
|
||||
}
|
||||
pkb, err := pk.Bytes()
|
||||
if err != nil {
|
||||
log.Errorf("error when turning extracted pubkey into bytes for peer %s: %s\n", p.Pretty(), err)
|
||||
return nil
|
||||
}
|
||||
err = kb.ds.Put(key, pkb)
|
||||
if err != nil {
|
||||
log.Errorf("error when adding extracted pubkey to peerstore for peer %s: %s\n", p.Pretty(), err)
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
log.Errorf("error when fetching pubkey from datastore for peer %s: %s\n", p.Pretty(), err)
|
||||
}
|
||||
|
||||
return pk
|
||||
}
|
||||
|
||||
func (kb *dsKeyBook) AddPubKey(p peer.ID, pk ic.PubKey) error {
|
||||
// check it's correct.
|
||||
if !p.MatchesPublicKey(pk) {
|
||||
return errors.New("peer ID does not match public key")
|
||||
}
|
||||
|
||||
key := kbBase.ChildString(base32.RawStdEncoding.EncodeToString([]byte(p))).Child(pubSuffix)
|
||||
val, err := pk.Bytes()
|
||||
if err != nil {
|
||||
log.Errorf("error while converting pubkey byte string for peer %s: %s\n", p.Pretty(), err)
|
||||
return err
|
||||
}
|
||||
err = kb.ds.Put(key, val)
|
||||
if err != nil {
|
||||
log.Errorf("error while updating pubkey in datastore for peer %s: %s\n", p.Pretty(), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (kb *dsKeyBook) PrivKey(p peer.ID) ic.PrivKey {
|
||||
key := kbBase.ChildString(base32.RawStdEncoding.EncodeToString([]byte(p))).Child(privSuffix)
|
||||
value, err := kb.ds.Get(key)
|
||||
if err != nil {
|
||||
log.Errorf("error while fetching privkey from datastore for peer %s: %s\n", p.Pretty(), err)
|
||||
return nil
|
||||
}
|
||||
sk, err := ic.UnmarshalPrivateKey(value)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return sk
|
||||
}
|
||||
|
||||
func (kb *dsKeyBook) AddPrivKey(p peer.ID, sk ic.PrivKey) error {
|
||||
if sk == nil {
|
||||
return errors.New("private key is nil")
|
||||
}
|
||||
// check it's correct.
|
||||
if !p.MatchesPrivateKey(sk) {
|
||||
return errors.New("peer ID does not match private key")
|
||||
}
|
||||
|
||||
key := kbBase.ChildString(base32.RawStdEncoding.EncodeToString([]byte(p))).Child(privSuffix)
|
||||
val, err := sk.Bytes()
|
||||
if err != nil {
|
||||
log.Errorf("error while converting privkey byte string for peer %s: %s\n", p.Pretty(), err)
|
||||
return err
|
||||
}
|
||||
err = kb.ds.Put(key, val)
|
||||
if err != nil {
|
||||
log.Errorf("error while updating privkey in datastore for peer %s: %s\n", p.Pretty(), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (kb *dsKeyBook) PeersWithKeys() peer.IDSlice {
|
||||
ids, err := uniquePeerIds(kb.ds, kbBase, func(result query.Result) string {
|
||||
return ds.RawKey(result.Key).Parent().Name()
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("error while retrieving peers with keys: %v", err)
|
||||
}
|
||||
return ids
|
||||
}
|
|
@ -0,0 +1,73 @@
|
|||
package pstoreds
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/gob"
|
||||
|
||||
base32 "github.com/multiformats/go-base32"
|
||||
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
|
||||
pool "github.com/libp2p/go-buffer-pool"
|
||||
peer "github.com/libp2p/go-libp2p-core/peer"
|
||||
pstore "github.com/libp2p/go-libp2p-core/peerstore"
|
||||
)
|
||||
|
||||
// Metadata is stored under the following db key pattern:
|
||||
// /peers/metadata/<b32 peer id no padding>/<key>
|
||||
var pmBase = ds.NewKey("/peers/metadata")
|
||||
|
||||
type dsPeerMetadata struct {
|
||||
ds ds.Datastore
|
||||
}
|
||||
|
||||
var _ pstore.PeerMetadata = (*dsPeerMetadata)(nil)
|
||||
|
||||
func init() {
|
||||
// Gob registers basic types by default.
|
||||
//
|
||||
// Register complex types used by the peerstore itself.
|
||||
gob.Register(make(map[string]struct{}))
|
||||
}
|
||||
|
||||
// NewPeerMetadata creates a metadata store backed by a persistent db. It uses gob for serialisation.
|
||||
//
|
||||
// See `init()` to learn which types are registered by default. Modules wishing to store
|
||||
// values of other types will need to `gob.Register()` them explicitly, or else callers
|
||||
// will receive runtime errors.
|
||||
func NewPeerMetadata(_ context.Context, store ds.Datastore, _ Options) (*dsPeerMetadata, error) {
|
||||
return &dsPeerMetadata{store}, nil
|
||||
}
|
||||
|
||||
func (pm *dsPeerMetadata) Get(p peer.ID, key string) (interface{}, error) {
|
||||
if err := p.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
k := pmBase.ChildString(base32.RawStdEncoding.EncodeToString([]byte(p))).ChildString(key)
|
||||
value, err := pm.ds.Get(k)
|
||||
if err != nil {
|
||||
if err == ds.ErrNotFound {
|
||||
err = pstore.ErrNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var res interface{}
|
||||
if err := gob.NewDecoder(bytes.NewReader(value)).Decode(&res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (pm *dsPeerMetadata) Put(p peer.ID, key string, val interface{}) error {
|
||||
if err := p.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
k := pmBase.ChildString(base32.RawStdEncoding.EncodeToString([]byte(p))).ChildString(key)
|
||||
var buf pool.Buffer
|
||||
if err := gob.NewEncoder(&buf).Encode(&val); err != nil {
|
||||
return err
|
||||
}
|
||||
return pm.ds.Put(k, buf.Bytes())
|
||||
}
|
166
vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/peerstore.go
generated
vendored
Normal file
166
vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/peerstore.go
generated
vendored
Normal file
|
@ -0,0 +1,166 @@
|
|||
package pstoreds
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
base32 "github.com/multiformats/go-base32"
|
||||
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
query "github.com/ipfs/go-datastore/query"
|
||||
|
||||
peer "github.com/libp2p/go-libp2p-core/peer"
|
||||
peerstore "github.com/libp2p/go-libp2p-core/peerstore"
|
||||
pstore "github.com/libp2p/go-libp2p-peerstore"
|
||||
)
|
||||
|
||||
// Configuration object for the peerstore.
|
||||
type Options struct {
|
||||
// The size of the in-memory cache. A value of 0 or lower disables the cache.
|
||||
CacheSize uint
|
||||
|
||||
// Sweep interval to purge expired addresses from the datastore. If this is a zero value, GC will not run
|
||||
// automatically, but it'll be available on demand via explicit calls.
|
||||
GCPurgeInterval time.Duration
|
||||
|
||||
// Interval to renew the GC lookahead window. If this is a zero value, lookahead will be disabled and we'll
|
||||
// traverse the entire datastore for every purge cycle.
|
||||
GCLookaheadInterval time.Duration
|
||||
|
||||
// Initial delay before GC processes start. Intended to give the system breathing room to fully boot
|
||||
// before starting GC.
|
||||
GCInitialDelay time.Duration
|
||||
}
|
||||
|
||||
// DefaultOpts returns the default options for a persistent peerstore, with the full-purge GC algorithm:
|
||||
//
|
||||
// * Cache size: 1024.
|
||||
// * GC purge interval: 2 hours.
|
||||
// * GC lookahead interval: disabled.
|
||||
// * GC initial delay: 60 seconds.
|
||||
func DefaultOpts() Options {
|
||||
return Options{
|
||||
CacheSize: 1024,
|
||||
GCPurgeInterval: 2 * time.Hour,
|
||||
GCLookaheadInterval: 0,
|
||||
GCInitialDelay: 60 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
type pstoreds struct {
|
||||
peerstore.Metrics
|
||||
|
||||
dsKeyBook
|
||||
dsAddrBook
|
||||
dsProtoBook
|
||||
dsPeerMetadata
|
||||
}
|
||||
|
||||
// NewPeerstore creates a peerstore backed by the provided persistent datastore.
|
||||
func NewPeerstore(ctx context.Context, store ds.Batching, opts Options) (*pstoreds, error) {
|
||||
addrBook, err := NewAddrBook(ctx, store, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
keyBook, err := NewKeyBook(ctx, store, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
peerMetadata, err := NewPeerMetadata(ctx, store, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
protoBook := NewProtoBook(peerMetadata)
|
||||
|
||||
ps := &pstoreds{
|
||||
Metrics: pstore.NewMetrics(),
|
||||
dsKeyBook: *keyBook,
|
||||
dsAddrBook: *addrBook,
|
||||
dsPeerMetadata: *peerMetadata,
|
||||
dsProtoBook: *protoBook,
|
||||
}
|
||||
return ps, nil
|
||||
}
|
||||
|
||||
// uniquePeerIds extracts and returns unique peer IDs from database keys.
|
||||
func uniquePeerIds(ds ds.Datastore, prefix ds.Key, extractor func(result query.Result) string) (peer.IDSlice, error) {
|
||||
var (
|
||||
q = query.Query{Prefix: prefix.String(), KeysOnly: true}
|
||||
results query.Results
|
||||
err error
|
||||
)
|
||||
|
||||
if results, err = ds.Query(q); err != nil {
|
||||
log.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer results.Close()
|
||||
|
||||
idset := make(map[string]struct{})
|
||||
for result := range results.Next() {
|
||||
k := extractor(result)
|
||||
idset[k] = struct{}{}
|
||||
}
|
||||
|
||||
if len(idset) == 0 {
|
||||
return peer.IDSlice{}, nil
|
||||
}
|
||||
|
||||
ids := make(peer.IDSlice, 0, len(idset))
|
||||
for id := range idset {
|
||||
pid, _ := base32.RawStdEncoding.DecodeString(id)
|
||||
id, _ := peer.IDFromBytes(pid)
|
||||
ids = append(ids, id)
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
func (ps *pstoreds) Close() (err error) {
|
||||
var errs []error
|
||||
weakClose := func(name string, c interface{}) {
|
||||
if cl, ok := c.(io.Closer); ok {
|
||||
if err = cl.Close(); err != nil {
|
||||
errs = append(errs, fmt.Errorf("%s error: %s", name, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
weakClose("keybook", ps.dsKeyBook)
|
||||
weakClose("addressbook", ps.dsAddrBook)
|
||||
weakClose("protobook", ps.dsProtoBook)
|
||||
weakClose("peermetadata", ps.dsPeerMetadata)
|
||||
|
||||
if len(errs) > 0 {
|
||||
return fmt.Errorf("failed while closing peerstore; err(s): %q", errs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ps *pstoreds) Peers() peer.IDSlice {
|
||||
set := map[peer.ID]struct{}{}
|
||||
for _, p := range ps.PeersWithKeys() {
|
||||
set[p] = struct{}{}
|
||||
}
|
||||
for _, p := range ps.PeersWithAddrs() {
|
||||
set[p] = struct{}{}
|
||||
}
|
||||
|
||||
pps := make(peer.IDSlice, 0, len(set))
|
||||
for p := range set {
|
||||
pps = append(pps, p)
|
||||
}
|
||||
return pps
|
||||
}
|
||||
|
||||
func (ps *pstoreds) PeerInfo(p peer.ID) peer.AddrInfo {
|
||||
return peer.AddrInfo{
|
||||
ID: p,
|
||||
Addrs: ps.dsAddrBook.Addrs(p),
|
||||
}
|
||||
}
|
188
vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/protobook.go
generated
vendored
Normal file
188
vendor/github.com/libp2p/go-libp2p-peerstore/pstoreds/protobook.go
generated
vendored
Normal file
|
@ -0,0 +1,188 @@
|
|||
package pstoreds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
peer "github.com/libp2p/go-libp2p-core/peer"
|
||||
|
||||
pstore "github.com/libp2p/go-libp2p-core/peerstore"
|
||||
)
|
||||
|
||||
type protoSegment struct {
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
type protoSegments [256]*protoSegment
|
||||
|
||||
func (s *protoSegments) get(p peer.ID) *protoSegment {
|
||||
return s[byte(p[len(p)-1])]
|
||||
}
|
||||
|
||||
type dsProtoBook struct {
|
||||
segments protoSegments
|
||||
meta pstore.PeerMetadata
|
||||
}
|
||||
|
||||
var _ pstore.ProtoBook = (*dsProtoBook)(nil)
|
||||
|
||||
func NewProtoBook(meta pstore.PeerMetadata) *dsProtoBook {
|
||||
return &dsProtoBook{
|
||||
meta: meta,
|
||||
segments: func() (ret protoSegments) {
|
||||
for i := range ret {
|
||||
ret[i] = &protoSegment{}
|
||||
}
|
||||
return ret
|
||||
}(),
|
||||
}
|
||||
}
|
||||
|
||||
func (pb *dsProtoBook) SetProtocols(p peer.ID, protos ...string) error {
|
||||
if err := p.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s := pb.segments.get(p)
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
protomap := make(map[string]struct{}, len(protos))
|
||||
for _, proto := range protos {
|
||||
protomap[proto] = struct{}{}
|
||||
}
|
||||
|
||||
return pb.meta.Put(p, "protocols", protomap)
|
||||
}
|
||||
|
||||
func (pb *dsProtoBook) AddProtocols(p peer.ID, protos ...string) error {
|
||||
if err := p.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s := pb.segments.get(p)
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
pmap, err := pb.getProtocolMap(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, proto := range protos {
|
||||
pmap[proto] = struct{}{}
|
||||
}
|
||||
|
||||
return pb.meta.Put(p, "protocols", pmap)
|
||||
}
|
||||
|
||||
func (pb *dsProtoBook) GetProtocols(p peer.ID) ([]string, error) {
|
||||
if err := p.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := pb.segments.get(p)
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
pmap, err := pb.getProtocolMap(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := make([]string, 0, len(pmap))
|
||||
for proto := range pmap {
|
||||
res = append(res, proto)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (pb *dsProtoBook) SupportsProtocols(p peer.ID, protos ...string) ([]string, error) {
|
||||
if err := p.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := pb.segments.get(p)
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
pmap, err := pb.getProtocolMap(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := make([]string, 0, len(protos))
|
||||
for _, proto := range protos {
|
||||
if _, ok := pmap[proto]; ok {
|
||||
res = append(res, proto)
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (pb *dsProtoBook) FirstSupportedProtocol(p peer.ID, protos ...string) (string, error) {
|
||||
if err := p.Validate(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
s := pb.segments.get(p)
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
pmap, err := pb.getProtocolMap(p)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, proto := range protos {
|
||||
if _, ok := pmap[proto]; ok {
|
||||
return proto, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (pb *dsProtoBook) RemoveProtocols(p peer.ID, protos ...string) error {
|
||||
if err := p.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s := pb.segments.get(p)
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
pmap, err := pb.getProtocolMap(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(pmap) == 0 {
|
||||
// nothing to do.
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, proto := range protos {
|
||||
delete(pmap, proto)
|
||||
}
|
||||
|
||||
return pb.meta.Put(p, "protocols", pmap)
|
||||
}
|
||||
|
||||
func (pb *dsProtoBook) getProtocolMap(p peer.ID) (map[string]struct{}, error) {
|
||||
iprotomap, err := pb.meta.Get(p, "protocols")
|
||||
switch err {
|
||||
default:
|
||||
return nil, err
|
||||
case pstore.ErrNotFound:
|
||||
return make(map[string]struct{}), nil
|
||||
case nil:
|
||||
cast, ok := iprotomap.(map[string]struct{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("stored protocol set was not a map")
|
||||
}
|
||||
|
||||
return cast, nil
|
||||
}
|
||||
}
|
|
@ -132,8 +132,10 @@ github.com/go-playground/universal-translator
|
|||
# github.com/go-stack/stack v1.8.0
|
||||
github.com/go-stack/stack
|
||||
# github.com/gogo/protobuf v1.3.2
|
||||
github.com/gogo/protobuf/gogoproto
|
||||
github.com/gogo/protobuf/io
|
||||
github.com/gogo/protobuf/proto
|
||||
github.com/gogo/protobuf/protoc-gen-gogo/descriptor
|
||||
# github.com/golang-migrate/migrate/v4 v4.8.0
|
||||
github.com/golang-migrate/migrate/v4
|
||||
github.com/golang-migrate/migrate/v4/database
|
||||
|
@ -183,6 +185,11 @@ github.com/huin/goupnp/soap
|
|||
github.com/huin/goupnp/ssdp
|
||||
# github.com/ipfs/go-cid v0.0.7
|
||||
github.com/ipfs/go-cid
|
||||
# github.com/ipfs/go-datastore v0.4.4
|
||||
github.com/ipfs/go-datastore
|
||||
github.com/ipfs/go-datastore/query
|
||||
# github.com/ipfs/go-ds-sql v0.2.0
|
||||
github.com/ipfs/go-ds-sql
|
||||
# github.com/ipfs/go-ipfs-util v0.0.2
|
||||
github.com/ipfs/go-ipfs-util
|
||||
# github.com/ipfs/go-log v1.0.4
|
||||
|
@ -271,6 +278,7 @@ github.com/libp2p/go-libp2p-core/routing
|
|||
github.com/libp2p/go-libp2p-core/sec
|
||||
github.com/libp2p/go-libp2p-core/sec/insecure
|
||||
github.com/libp2p/go-libp2p-core/sec/insecure/pb
|
||||
github.com/libp2p/go-libp2p-core/test
|
||||
github.com/libp2p/go-libp2p-core/transport
|
||||
# github.com/libp2p/go-libp2p-discovery v0.5.0
|
||||
github.com/libp2p/go-libp2p-discovery
|
||||
|
@ -286,6 +294,8 @@ github.com/libp2p/go-libp2p-noise/pb
|
|||
# github.com/libp2p/go-libp2p-peerstore v0.2.6
|
||||
github.com/libp2p/go-libp2p-peerstore
|
||||
github.com/libp2p/go-libp2p-peerstore/addr
|
||||
github.com/libp2p/go-libp2p-peerstore/pb
|
||||
github.com/libp2p/go-libp2p-peerstore/pstoreds
|
||||
github.com/libp2p/go-libp2p-peerstore/pstoremem
|
||||
# github.com/libp2p/go-libp2p-pnet v0.2.0
|
||||
github.com/libp2p/go-libp2p-pnet
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
)
|
||||
|
||||
func TestMultipleTopicCopyInNewMessageFilter(t *testing.T) {
|
||||
w, err := New("", nil, nil)
|
||||
w, err := New("", nil, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating WakuV2 client: %v", err)
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ type Config struct {
|
|||
SoftBlacklistedPeerIDs []string `toml:",omitempty"`
|
||||
Host string `toml:",omitempty"`
|
||||
Port int `toml:",omitempty"`
|
||||
PersistPeers bool `toml:",omitempty"`
|
||||
PeerExchange bool `toml:",omitempty"`
|
||||
KeepAliveInterval int `toml:",omitempty"`
|
||||
LightClient bool `toml:",omitempty"`
|
||||
|
|
|
@ -0,0 +1,93 @@
|
|||
package persistence
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Queries are the sqlite queries for a given table.
|
||||
type Queries struct {
|
||||
deleteQuery string
|
||||
existsQuery string
|
||||
getQuery string
|
||||
putQuery string
|
||||
queryQuery string
|
||||
prefixQuery string
|
||||
limitQuery string
|
||||
offsetQuery string
|
||||
getSizeQuery string
|
||||
}
|
||||
|
||||
// NewQueries creates a new set of queries for the passed table
|
||||
func NewQueries(tbl string, db *sql.DB) (*Queries, error) {
|
||||
err := CreateTable(db, tbl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Queries{
|
||||
deleteQuery: fmt.Sprintf("DELETE FROM %s WHERE key = $1", tbl),
|
||||
existsQuery: fmt.Sprintf("SELECT exists(SELECT 1 FROM %s WHERE key=$1)", tbl),
|
||||
getQuery: fmt.Sprintf("SELECT data FROM %s WHERE key = $1", tbl),
|
||||
putQuery: fmt.Sprintf("INSERT INTO %s (key, data) VALUES ($1, $2) ON CONFLICT (key) DO UPDATE SET data = $2", tbl),
|
||||
queryQuery: fmt.Sprintf("SELECT key, data FROM %s", tbl),
|
||||
prefixQuery: ` WHERE key LIKE '%s%%' ORDER BY key`,
|
||||
limitQuery: ` LIMIT %d`,
|
||||
offsetQuery: ` OFFSET %d`,
|
||||
getSizeQuery: fmt.Sprintf("SELECT length(data) FROM %s WHERE key = $1", tbl),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Delete returns the query for deleting a row.
|
||||
func (q Queries) Delete() string {
|
||||
return q.deleteQuery
|
||||
}
|
||||
|
||||
// Exists returns the query for determining if a row exists.
|
||||
func (q Queries) Exists() string {
|
||||
return q.existsQuery
|
||||
}
|
||||
|
||||
// Get returns the query for getting a row.
|
||||
func (q Queries) Get() string {
|
||||
return q.getQuery
|
||||
}
|
||||
|
||||
// Put returns the query for putting a row.
|
||||
func (q Queries) Put() string {
|
||||
return q.putQuery
|
||||
}
|
||||
|
||||
// Query returns the query for getting multiple rows.
|
||||
func (q Queries) Query() string {
|
||||
return q.queryQuery
|
||||
}
|
||||
|
||||
// Prefix returns the query fragment for getting a rows with a key prefix.
|
||||
func (q Queries) Prefix() string {
|
||||
return q.prefixQuery
|
||||
}
|
||||
|
||||
// Limit returns the query fragment for limiting results.
|
||||
func (q Queries) Limit() string {
|
||||
return q.limitQuery
|
||||
}
|
||||
|
||||
// Offset returns the query fragment for returning rows from a given offset.
|
||||
func (q Queries) Offset() string {
|
||||
return q.offsetQuery
|
||||
}
|
||||
|
||||
// GetSize returns the query for determining the size of a value.
|
||||
func (q Queries) GetSize() string {
|
||||
return q.getSizeQuery
|
||||
}
|
||||
|
||||
// CreateTable creates the table that will persist the peers
|
||||
func CreateTable(db *sql.DB, tableName string) error {
|
||||
sqlStmt := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (key TEXT NOT NULL UNIQUE, data BYTEA);", tableName)
|
||||
_, err := db.Exec(sqlStmt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -22,6 +22,7 @@ import (
|
|||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/sha256"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
|
@ -30,6 +31,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
"github.com/libp2p/go-libp2p-peerstore/pstoreds"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
@ -37,6 +39,8 @@ import (
|
|||
mapset "github.com/deckarep/golang-set"
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
|
||||
dssql "github.com/ipfs/go-ds-sql"
|
||||
|
||||
gethcommon "github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
|
@ -59,6 +63,7 @@ import (
|
|||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/signal"
|
||||
"github.com/status-im/status-go/wakuv2/common"
|
||||
"github.com/status-im/status-go/wakuv2/persistence"
|
||||
|
||||
libp2pdisc "github.com/libp2p/go-libp2p-core/discovery"
|
||||
node "github.com/status-im/go-waku/waku/v2/node"
|
||||
|
@ -115,7 +120,7 @@ type Waku struct {
|
|||
}
|
||||
|
||||
// New creates a WakuV2 client ready to communicate through the LibP2P network.
|
||||
func New(nodeKey string, cfg *Config, logger *zap.Logger) (*Waku, error) {
|
||||
func New(nodeKey string, cfg *Config, logger *zap.Logger, appdb *sql.DB) (*Waku, error) {
|
||||
if logger == nil {
|
||||
logger = zap.NewNop()
|
||||
}
|
||||
|
@ -165,15 +170,39 @@ func New(nodeKey string, cfg *Config, logger *zap.Logger) (*Waku, error) {
|
|||
return nil, fmt.Errorf("failed to setup the network interface: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
connStatusChan := make(chan node.ConnStatus, 100)
|
||||
|
||||
if cfg.KeepAliveInterval == 0 {
|
||||
cfg.KeepAliveInterval = DefaultConfig.KeepAliveInterval
|
||||
}
|
||||
|
||||
libp2pOpts := node.DefaultLibP2POptions
|
||||
libp2pOpts = append(libp2pOpts, libp2p.BandwidthReporter(waku.bandwidthCounter))
|
||||
|
||||
if cfg.PersistPeers {
|
||||
if appdb == nil {
|
||||
return nil, fmt.Errorf("a db connection must be provided in order to persist the peers")
|
||||
}
|
||||
|
||||
// Create persistent peerstore
|
||||
queries, err := persistence.NewQueries("peerstore", appdb)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to setup peerstore table: %v", err)
|
||||
}
|
||||
datastore := dssql.NewDatastore(appdb, queries)
|
||||
opts := pstoreds.DefaultOpts()
|
||||
peerStore, err := pstoreds.NewPeerstore(ctx, datastore, opts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to setup peerstore: %v", err)
|
||||
}
|
||||
libp2pOpts = append(libp2pOpts, libp2p.Peerstore(peerStore))
|
||||
}
|
||||
|
||||
opts := []node.WakuNodeOption{
|
||||
node.WithLibP2POptions(
|
||||
libp2p.BandwidthReporter(waku.bandwidthCounter),
|
||||
libp2pOpts...,
|
||||
),
|
||||
node.WithPrivateKey(privateKey),
|
||||
node.WithHostAddress([]net.Addr{hostAddr}),
|
||||
|
@ -202,7 +231,7 @@ func New(nodeKey string, cfg *Config, logger *zap.Logger) (*Waku, error) {
|
|||
opts = append(opts, node.WithWakuRelay(relayOpts...))
|
||||
}
|
||||
|
||||
if waku.node, err = node.New(context.Background(), opts...); err != nil {
|
||||
if waku.node, err = node.New(ctx, opts...); err != nil {
|
||||
return nil, fmt.Errorf("failed to create a go-waku node: %v", err)
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue