Merge Consul OSS branch master at commit b3541c4f34

This commit is contained in:
R.B. Boyer 2019-07-26 10:22:56 -05:00
commit c6c4a2251a
114 changed files with 4991 additions and 1270 deletions

View File

@ -60,6 +60,29 @@ jobs:
environment:
<<: *ENVIRONMENT
# checks vendor directory is correct
check-vendor:
docker:
- image: *GOLANG_IMAGE
environment:
<<: *ENVIRONMENT
steps:
- checkout
- restore_cache:
keys:
- consul-modcache-v1-{{ checksum "go.mod" }}
- run:
command: make update-vendor
- save_cache:
key: consul-modcache-v1-{{ checksum "go.mod" }}
paths:
- /go/pkg/mod
- run: |
if ! git diff --exit-code; then
echo "Git directory has vendor changes"
exit 1
fi
go-test:
docker:
- image: *GOLANG_IMAGE
@ -202,6 +225,21 @@ jobs:
exit 1
fi
# upload dev docker image
dev-upload-docker:
docker:
- image: circleci/golang:latest # use a circleci image so the attach_workspace step works (has ca-certs installed)
environment:
<<: *ENVIRONMENT
steps:
- checkout
# get consul binary
- attach_workspace:
at: bin/
- setup_remote_docker:
docker_layer_caching: true
- run: make ci.dev-docker
# Nomad 0.8 builds on go0.10
# Run integration tests on nomad/v0.8.7
nomad-integration-0_8:
@ -574,11 +612,13 @@ workflows:
- release/1-6
go-tests:
jobs:
- check-vendor
- lint-consul-retry
- go-fmt-and-vet
- go-fmt-and-vet:
requires:
- check-vendor
- dev-build:
requires:
- lint-consul-retry
- go-fmt-and-vet
- go-test: &go-test
requires:
@ -592,13 +632,16 @@ workflows:
test-integrations:
jobs:
- dev-build
- dev-upload-s3:
- dev-upload-s3: &dev-upload
requires:
- dev-build
filters:
branches:
ignore:
- /^pull\/.*$/ # only push dev builds from non forks
- dev-upload-docker:
<<: *dev-upload
context: consul-ci
- nomad-integration-master:
requires:
- dev-build

View File

@ -6,8 +6,6 @@ FEATURES:
IMPROVEMENTS:
* raft: allow trailing logs to be configured as an escape hatch for extreme load that prevents followers catching up with leader [[GH-6186](https://github.com/hashicorp/consul/pull/6186)]
* agent: added configurable limit for log files to be rotated [[GH-5831](https://github.com/hashicorp/consul/pull/5831)]
* agent: health checks: change long timeout behavior to use to user-configured `timeout` value [[GH-6094](https://github.com/hashicorp/consul/pull/6094)]
* api: Update filtering language to include substring and regular expression matching on string values [[GH-6190](https://github.com/hashicorp/consul/pull/6190)]
* api: Display allowed HTTP CIDR information nicely [[GH-6029](https://github.com/hashicorp/consul/pull/6029)]
@ -16,10 +14,6 @@ IMPROVEMENTS:
* connect: change router syntax for matching query parameters to resemble the syntax for matching paths and headers for consistency. [[GH-6163](https://github.com/hashicorp/consul/issues/6163)]
* connect: allow L7 routers to match on http methods [[GH-6164](https://github.com/hashicorp/consul/issues/6164)]
BUG FIXES:
* agent: avoid reverting any check updates that occur while a service is being added or the config is reloaded [[GH-6144](https://github.com/hashicorp/consul/issues/6144)]
## 1.6.0-beta2 (July 15, 2019)
IMPROVEMENTS:
@ -49,6 +43,20 @@ BUG FIXES:
* autopilot: update to also remove failed nodes from WAN gossip pool [[GH-6028](https://github.com/hashicorp/consul/pull/6028)]
## 1.5.3 (July 25, 2019)
IMPROVEMENTS:
* raft: allow trailing logs to be configured as an escape hatch for extreme load that prevents followers catching up with leader [[GH-6186](https://github.com/hashicorp/consul/pull/6186)]
* raft: added raft log chunking capabilities to allow for storing larger KV entries [[GH-6172](https://github.com/hashicorp/consul/pull/6172)]
* agent: added configurable limit for log files to be rotated [[GH-5831](https://github.com/hashicorp/consul/pull/5831)]
* api: The v1/status endpoints can now be forwarded to remote datacenters [[GH-6198](https://github.com/hashicorp/consul/pull/6198)]
BUG FIXES:
* autopilot: update to also remove failed nodes from WAN gossip pool [[GH-6028](https://github.com/hashicorp/consul/pull/6028)]
* agent: avoid reverting any check updates that occur while a service is being added or the config is reloaded [[GH-6144](https://github.com/hashicorp/consul/issues/6144)]
* auto-encrypt: fix an issue that could cause cloud retry-join to fail when utilized with auto-encrypt by falling back to a default port [[GH-6205]](https://github.com/hashicorp/consul/pull/6205)
## 1.5.2 (June 27, 2019)
FEATURE

View File

@ -48,6 +48,14 @@ UI_BUILD_TAG?=consul-build-ui
BUILD_CONTAINER_NAME?=consul-builder
CONSUL_IMAGE_VERSION?=latest
################
# CI Variables #
################
CI_DEV_DOCKER_NAMESPACE?=hashicorpdev
CI_DEV_DOCKER_IMAGE_NAME?=consul
CI_DEV_DOCKER_WORKDIR?=bin/
################
TEST_MODCACHE?=1
TEST_BUILDCACHE?=1
@ -156,6 +164,26 @@ dev-docker: linux
@echo "Building Consul Development container - $(CONSUL_DEV_IMAGE)"
@docker build $(NOCACHE) $(QUIET) -t '$(CONSUL_DEV_IMAGE)' --build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) $(CURDIR)/pkg/bin/linux_amd64 -f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile
# In CircleCI, the linux binary will be attached from a previous step at bin/. This make target
# should only run in CI and not locally.
ci.dev-docker:
@echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)"
@docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null
@echo "Building Consul Development container - $(CI_DEV_DOCKER_IMAGE_NAME)"
@docker build $(NOCACHE) $(QUIET) -t '$(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):$(GIT_COMMIT)' \
--build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \
--label COMMIT_SHA=$(CIRCLE_SHA1) \
--label PULL_REQUEST=$(CIRCLE_PULL_REQUEST) \
--label CIRCLE_BUILD_URL=$(CIRCLE_BUILD_URL) \
$(CI_DEV_DOCKER_WORKDIR) -f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile
@echo $(DOCKER_PASS) | docker login -u="$(DOCKER_LOGIN)" --password-stdin
@echo "Pushing dev image to: https://cloud.docker.com/u/hashicorpdev/repository/docker/hashicorpdev/consul"
@docker push $(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):$(GIT_COMMIT)
ifeq ($(CIRCLE_BRANCH), master)
@docker tag $(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):$(GIT_COMMIT) $(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):latest
@docker push $(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):latest
endif
changelogfmt:
@echo "--> Making [GH-xxxx] references clickable..."
@sed -E 's|([^\[])\[GH-([0-9]+)\]|\1[[GH-\2](https://github.com/hashicorp/consul/issues/\2)]|g' CHANGELOG.md > changelog.tmp && mv changelog.tmp CHANGELOG.md

File diff suppressed because one or more lines are too long

View File

@ -840,6 +840,7 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) {
GRPCPort: grpcPort,
GRPCAddrs: grpcAddrs,
KeyFile: b.stringVal(c.KeyFile),
KVMaxValueSize: b.uint64Val(c.Limits.KVMaxValueSize),
LeaveDrainTime: b.durationVal("performance.leave_drain_time", c.Performance.LeaveDrainTime),
LeaveOnTerm: leaveOnTerm,
LogLevel: b.stringVal(c.LogLevel),
@ -1441,6 +1442,17 @@ func (b *Builder) intVal(v *int) int {
return b.intValWithDefault(v, 0)
}
func (b *Builder) uint64ValWithDefault(v *uint64, defaultVal uint64) uint64 {
if v == nil {
return defaultVal
}
return *v
}
func (b *Builder) uint64Val(v *uint64) uint64 {
return b.uint64ValWithDefault(v, 0)
}
func (b *Builder) portVal(name string, v *int) int {
if v == nil || *v <= 0 {
return -1

View File

@ -658,8 +658,9 @@ type UnixSocket struct {
}
type Limits struct {
RPCMaxBurst *int `json:"rpc_max_burst,omitempty" hcl:"rpc_max_burst" mapstructure:"rpc_max_burst"`
RPCRate *float64 `json:"rpc_rate,omitempty" hcl:"rpc_rate" mapstructure:"rpc_rate"`
RPCMaxBurst *int `json:"rpc_max_burst,omitempty" hcl:"rpc_max_burst" mapstructure:"rpc_max_burst"`
RPCRate *float64 `json:"rpc_rate,omitempty" hcl:"rpc_rate" mapstructure:"rpc_rate"`
KVMaxValueSize *uint64 `json:"kv_max_value_size,omitempty" hcl:"kv_max_value_size" mapstructure:"kv_max_value_size"`
}
type Segment struct {

View File

@ -7,6 +7,7 @@ import (
"github.com/hashicorp/consul/agent/checks"
"github.com/hashicorp/consul/agent/consul"
"github.com/hashicorp/consul/version"
"github.com/hashicorp/raft"
)
func DefaultRPCProtocol() (int, error) {
@ -102,6 +103,7 @@ func DefaultSource() Source {
limits = {
rpc_rate = -1
rpc_max_burst = 1000
kv_max_value_size = ` + strconv.FormatInt(raft.SuggestedMaxDataSize, 10) + `
}
performance = {
leave_drain_time = "5s"

View File

@ -830,6 +830,12 @@ type RuntimeConfig struct {
// hcl: key_file = string
KeyFile string
// KVMaxValueSize controls the max allowed value size. If not set defaults
// to raft's suggested max value size.
//
// hcl: limits { kv_max_value_size = uint64 }
KVMaxValueSize uint64
// LeaveDrainTime is used to wait after a server has left the LAN Serf
// pool for RPCs to drain and new requests to be sent to other servers.
//

View File

@ -3738,7 +3738,8 @@ func TestFullConfig(t *testing.T) {
"leave_on_terminate": true,
"limits": {
"rpc_rate": 12029.43,
"rpc_max_burst": 44848
"rpc_max_burst": 44848,
"kv_max_value_size": 1234567800000000
},
"log_level": "k1zo9Spt",
"node_id": "AsUIlw99",
@ -4344,6 +4345,7 @@ func TestFullConfig(t *testing.T) {
limits {
rpc_rate = 12029.43
rpc_max_burst = 44848
kv_max_value_size = 1234567800000000
}
log_level = "k1zo9Spt"
node_id = "AsUIlw99"
@ -5030,6 +5032,7 @@ func TestFullConfig(t *testing.T) {
HTTPSAddrs: []net.Addr{tcpAddr("95.17.17.19:15127")},
HTTPSPort: 15127,
KeyFile: "IEkkwgIA",
KVMaxValueSize: 1234567800000000,
LeaveDrainTime: 8265 * time.Second,
LeaveOnTerm: true,
LogLevel: "k1zo9Spt",
@ -5720,6 +5723,7 @@ func TestSanitize(t *testing.T) {
OutputMaxSize: checks.DefaultBufSize,
},
},
KVMaxValueSize: 1234567800000000,
}
rtJSON := `{
@ -5885,6 +5889,7 @@ func TestSanitize(t *testing.T) {
"HTTPSAddrs": [],
"HTTPSPort": 0,
"KeyFile": "hidden",
"KVMaxValueSize": 1234567800000000,
"LeaveDrainTime": "0s",
"LeaveOnTerm": false,
"LogLevel": "",

View File

@ -4,6 +4,7 @@ import (
"fmt"
"log"
"net"
"strconv"
"strings"
"time"
@ -18,7 +19,7 @@ const (
retryJitterWindow = 30 * time.Second
)
func (c *Client) RequestAutoEncryptCerts(servers []string, port int, token string, interruptCh chan struct{}) (*structs.SignedResponse, string, error) {
func (c *Client) RequestAutoEncryptCerts(servers []string, defaultPort int, token string, interruptCh chan struct{}) (*structs.SignedResponse, string, error) {
errFn := func(err error) (*structs.SignedResponse, string, error) {
return nil, "", err
}
@ -81,11 +82,12 @@ func (c *Client) RequestAutoEncryptCerts(servers []string, port int, token strin
// Translate host to net.TCPAddr to make life easier for
// RPCInsecure.
for _, s := range servers {
ips, err := resolveAddr(s, c.logger)
ips, port, err := resolveAddr(s, defaultPort, c.logger)
if err != nil {
c.logger.Printf("[WARN] agent: AutoEncrypt resolveAddr failed: %v", err)
continue
}
for _, ip := range ips {
addr := net.TCPAddr{IP: ip, Port: port}
@ -112,16 +114,29 @@ func (c *Client) RequestAutoEncryptCerts(servers []string, port int, token strin
}
}
// resolveAddr is used to resolve the address into an address,
// port, and error. If no port is given, use the default
func resolveAddr(rawHost string, logger *log.Logger) ([]net.IP, error) {
host, _, err := net.SplitHostPort(rawHost)
if err != nil && err.Error() != "missing port in address" {
return nil, err
// resolveAddr is used to resolve the host into IPs, port, and error.
// If no port is given, use the default
func resolveAddr(rawHost string, defaultPort int, logger *log.Logger) ([]net.IP, int, error) {
host, splitPort, err := net.SplitHostPort(rawHost)
if err != nil && err.Error() != fmt.Sprintf("address %s: missing port in address", rawHost) {
return nil, defaultPort, err
}
// SplitHostPort returns empty host and splitPort on missingPort err,
// so those are set to defaults
var port int
if err != nil {
host = rawHost
port = defaultPort
} else {
port, err = strconv.Atoi(splitPort)
if err != nil {
port = defaultPort
}
}
if ip := net.ParseIP(host); ip != nil {
return []net.IP{ip}, nil
return []net.IP{ip}, port, nil
}
// First try TCP so we have the best chance for the largest list of
@ -130,13 +145,17 @@ func resolveAddr(rawHost string, logger *log.Logger) ([]net.IP, error) {
if ips, err := tcpLookupIP(host, logger); err != nil {
logger.Printf("[DEBUG] agent: TCP-first lookup failed for '%s', falling back to UDP: %s", host, err)
} else if len(ips) > 0 {
return ips, nil
return ips, port, nil
}
// If TCP didn't yield anything then use the normal Go resolver which
// will try UDP, then might possibly try TCP again if the UDP response
// indicates it was truncated.
return net.LookupIP(host)
ips, err := net.LookupIP(host)
if err != nil {
return nil, port, err
}
return ips, port, nil
}
// tcpLookupIP is a helper to initiate a TCP-based DNS lookup for the given host.

View File

@ -0,0 +1,80 @@
package consul
import (
"github.com/stretchr/testify/require"
"log"
"net"
"os"
"testing"
)
func TestAutoEncrypt_resolveAddr(t *testing.T) {
type args struct {
rawHost string
defaultPort int
logger *log.Logger
}
tests := []struct {
name string
args args
ips []net.IP
port int
wantErr bool
}{
{
name: "host without port",
args: args{
"127.0.0.1",
8300,
log.New(os.Stderr, "", log.LstdFlags),
},
ips: []net.IP{net.IPv4(127, 0, 0, 1)},
port: 8300,
wantErr: false,
},
{
name: "host with port",
args: args{
"127.0.0.1:1234",
8300,
log.New(os.Stderr, "", log.LstdFlags),
},
ips: []net.IP{net.IPv4(127, 0, 0, 1)},
port: 1234,
wantErr: false,
},
{
name: "host with broken port",
args: args{
"127.0.0.1:xyz",
8300,
log.New(os.Stderr, "", log.LstdFlags),
},
ips: []net.IP{net.IPv4(127, 0, 0, 1)},
port: 8300,
wantErr: false,
},
{
name: "not an address",
args: args{
"abc",
8300,
log.New(os.Stderr, "", log.LstdFlags),
},
ips: nil,
port: 8300,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ips, port, err := resolveAddr(tt.args.rawHost, tt.args.defaultPort, tt.args.logger)
if (err != nil) != tt.wantErr {
t.Errorf("resolveAddr error: %v, wantErr: %v", err, tt.wantErr)
return
}
require.Equal(t, tt.ips, ips)
require.Equal(t, tt.port, port)
})
}
}

View File

@ -1,6 +1,7 @@
package fsm
import (
"bytes"
"fmt"
"math/rand"
"os"
@ -8,12 +9,16 @@ import (
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/consul/autopilot"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/types"
"github.com/hashicorp/go-raftchunking"
raftchunkingtypes "github.com/hashicorp/go-raftchunking/types"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/raft"
"github.com/hashicorp/serf/coordinate"
"github.com/mitchellh/mapstructure"
"github.com/pascaldekloe/goe/verify"
@ -1397,3 +1402,214 @@ func TestFSM_ConfigEntry(t *testing.T) {
require.Equal(entry, config)
}
}
// This adapts another test by chunking the encoded data and then performing
// out-of-order applies of half the logs. It then snapshots, restores to a new
// FSM, and applies the rest. The goal is to verify that chunking snapshotting
// works as expected.
func TestFSM_Chunking_Lifecycle(t *testing.T) {
t.Parallel()
require := require.New(t)
assert := assert.New(t)
fsm, err := New(nil, os.Stderr)
require.NoError(err)
var logOfLogs [][]*raft.Log
var bufs [][]byte
for i := 0; i < 10; i++ {
req := structs.RegisterRequest{
Datacenter: "dc1",
Node: fmt.Sprintf("foo%d", i),
Address: "127.0.0.1",
Service: &structs.NodeService{
ID: "db",
Service: "db",
Tags: []string{"master"},
Port: 8000,
},
Check: &structs.HealthCheck{
Node: fmt.Sprintf("foo%d", i),
CheckID: "db",
Name: "db connectivity",
Status: api.HealthPassing,
ServiceID: "db",
},
}
buf, err := structs.Encode(structs.RegisterRequestType, req)
require.NoError(err)
var logs []*raft.Log
for j, b := range buf {
chunkInfo := &raftchunkingtypes.ChunkInfo{
OpNum: uint64(32 + i),
SequenceNum: uint32(j),
NumChunks: uint32(len(buf)),
}
chunkBytes, err := proto.Marshal(chunkInfo)
require.NoError(err)
logs = append(logs, &raft.Log{
Data: []byte{b},
Extensions: chunkBytes,
})
}
bufs = append(bufs, buf)
logOfLogs = append(logOfLogs, logs)
}
// The reason for the skipping is to test out-of-order applies which are
// theoretically possible. Apply some logs from each set of chunks, but not
// the full set, and out of order.
for _, logs := range logOfLogs {
resp := fsm.chunker.Apply(logs[8])
assert.Nil(resp)
resp = fsm.chunker.Apply(logs[0])
assert.Nil(resp)
resp = fsm.chunker.Apply(logs[3])
assert.Nil(resp)
}
// Verify we are not registered
for i := 0; i < 10; i++ {
_, node, err := fsm.state.GetNode(fmt.Sprintf("foo%d", i))
require.NoError(err)
assert.Nil(node)
}
// Snapshot, restore elsewhere, apply the rest of the logs, make sure it
// looks right
snap, err := fsm.Snapshot()
require.NoError(err)
defer snap.Release()
sinkBuf := bytes.NewBuffer(nil)
sink := &MockSink{sinkBuf, false}
err = snap.Persist(sink)
require.NoError(err)
fsm2, err := New(nil, os.Stderr)
require.NoError(err)
err = fsm2.Restore(sink)
require.NoError(err)
// Verify we are still not registered
for i := 0; i < 10; i++ {
_, node, err := fsm2.state.GetNode(fmt.Sprintf("foo%d", i))
require.NoError(err)
assert.Nil(node)
}
// Apply the rest of the logs
for _, logs := range logOfLogs {
var resp interface{}
for i, log := range logs {
switch i {
case 0, 3, 8:
default:
resp = fsm2.chunker.Apply(log)
if i != len(logs)-1 {
assert.Nil(resp)
}
}
}
_, ok := resp.(raftchunking.ChunkingSuccess)
assert.True(ok)
}
// Verify we are registered
for i := 0; i < 10; i++ {
_, node, err := fsm2.state.GetNode(fmt.Sprintf("foo%d", i))
require.NoError(err)
assert.NotNil(node)
// Verify service registered
_, services, err := fsm2.state.NodeServices(nil, fmt.Sprintf("foo%d", i))
require.NoError(err)
_, ok := services.Services["db"]
assert.True(ok)
// Verify check
_, checks, err := fsm2.state.NodeChecks(nil, fmt.Sprintf("foo%d", i))
require.NoError(err)
require.Equal(string(checks[0].CheckID), "db")
}
}
func TestFSM_Chunking_TermChange(t *testing.T) {
t.Parallel()
assert := assert.New(t)
require := require.New(t)
fsm, err := New(nil, os.Stderr)
require.NoError(err)
req := structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
ID: "db",
Service: "db",
Tags: []string{"master"},
Port: 8000,
},
Check: &structs.HealthCheck{
Node: "foo",
CheckID: "db",
Name: "db connectivity",
Status: api.HealthPassing,
ServiceID: "db",
},
}
buf, err := structs.Encode(structs.RegisterRequestType, req)
require.NoError(err)
// Only need two chunks to test this
chunks := [][]byte{
buf[0:2],
buf[2:],
}
var logs []*raft.Log
for i, b := range chunks {
chunkInfo := &raftchunkingtypes.ChunkInfo{
OpNum: uint64(32),
SequenceNum: uint32(i),
NumChunks: uint32(len(chunks)),
}
chunkBytes, err := proto.Marshal(chunkInfo)
if err != nil {
t.Fatal(err)
}
logs = append(logs, &raft.Log{
Term: uint64(i),
Data: b,
Extensions: chunkBytes,
})
}
// We should see nil for both
for _, log := range logs {
resp := fsm.chunker.Apply(log)
assert.Nil(resp)
}
// Now verify the other baseline, that when the term doesn't change we see
// non-nil. First make the chunker have a clean state, then set the terms
// to be the same.
fsm.chunker.RestoreState(nil)
logs[1].Term = uint64(0)
// We should see nil only for the first one
for i, log := range logs {
resp := fsm.chunker.Apply(log)
if i == 0 {
assert.Nil(resp)
}
if i == 1 {
assert.NotNil(resp)
}
}
}

View File

@ -10,6 +10,7 @@ import (
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/go-msgpack/codec"
"github.com/hashicorp/go-raftchunking"
"github.com/hashicorp/raft"
)
@ -60,6 +61,8 @@ type FSM struct {
state *state.Store
gc *state.TombstoneGC
chunker *raftchunking.ChunkingFSM
}
// New is used to construct a new FSM with a blank state.
@ -85,9 +88,15 @@ func New(gc *state.TombstoneGC, logOutput io.Writer) (*FSM, error) {
}
}
fsm.chunker = raftchunking.NewChunkingFSM(fsm, nil)
return fsm, nil
}
func (c *FSM) ChunkingFSM() *raftchunking.ChunkingFSM {
return c.chunker
}
// State is used to return a handle to the current state
func (c *FSM) State() *state.Store {
c.stateLock.RLock()
@ -127,7 +136,15 @@ func (c *FSM) Snapshot() (raft.FSMSnapshot, error) {
c.logger.Printf("[INFO] consul.fsm: snapshot created in %v", time.Since(start))
}(time.Now())
return &snapshot{c.state.Snapshot()}, nil
chunkState, err := c.chunker.CurrentState()
if err != nil {
return nil, err
}
return &snapshot{
state: c.state.Snapshot(),
chunkState: chunkState,
}, nil
}
// Restore streams in the snapshot and replaces the current state store with a
@ -167,11 +184,23 @@ func (c *FSM) Restore(old io.ReadCloser) error {
// Decode
msg := structs.MessageType(msgType[0])
if fn := restorers[msg]; fn != nil {
switch {
case msg == structs.ChunkingStateType:
chunkState := &raftchunking.State{
ChunkMap: make(raftchunking.ChunkMap),
}
if err := dec.Decode(chunkState); err != nil {
return err
}
if err := c.chunker.RestoreState(chunkState); err != nil {
return err
}
case restorers[msg] != nil:
fn := restorers[msg]
if err := fn(&header, restore, dec); err != nil {
return err
}
} else {
default:
return fmt.Errorf("Unrecognized msg type %d", msg)
}
}

View File

@ -8,6 +8,7 @@ import (
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/go-msgpack/codec"
"github.com/hashicorp/go-raftchunking"
"github.com/hashicorp/raft"
)
@ -15,7 +16,8 @@ import (
// state in a way that can be accessed concurrently with operations
// that may modify the live state.
type snapshot struct {
state *state.Snapshot
state *state.Snapshot
chunkState *raftchunking.State
}
// snapshotHeader is the first entry in our snapshot
@ -76,6 +78,18 @@ func (s *snapshot) Persist(sink raft.SnapshotSink) error {
return err
}
}
// Persist chunking state
if s.chunkState == nil {
return nil
}
if _, err := sink.Write([]byte{byte(structs.ChunkingStateType)}); err != nil {
return err
}
if err := encoder.Encode(s.chunkState); err != nil {
return err
}
return nil
}

View File

@ -14,6 +14,7 @@ import (
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/go-raftchunking"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -249,6 +250,36 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) {
require.NoError(fsm.state.EnsureConfigEntry(18, serviceConfig))
require.NoError(fsm.state.EnsureConfigEntry(19, proxyConfig))
chunkState := &raftchunking.State{
ChunkMap: make(raftchunking.ChunkMap),
}
chunkState.ChunkMap[0] = []*raftchunking.ChunkInfo{
{
OpNum: 0,
SequenceNum: 0,
NumChunks: 3,
Data: []byte("foo"),
},
nil,
{
OpNum: 0,
SequenceNum: 2,
NumChunks: 3,
Data: []byte("bar"),
},
}
chunkState.ChunkMap[20] = []*raftchunking.ChunkInfo{
nil,
{
OpNum: 20,
SequenceNum: 1,
NumChunks: 2,
Data: []byte("bar"),
},
}
err = fsm.chunker.RestoreState(chunkState)
require.NoError(err)
// Snapshot
snap, err := fsm.Snapshot()
if err != nil {
@ -463,6 +494,10 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) {
require.NoError(err)
assert.Equal(proxyConfig, proxyConfEntry)
newChunkState, err := fsm2.chunker.CurrentState()
require.NoError(err)
assert.Equal(newChunkState, chunkState)
// Snapshot
snap, err = fsm2.Snapshot()
if err != nil {

View File

@ -2,6 +2,7 @@ package consul
import (
"crypto/tls"
"errors"
"fmt"
"io"
"net"
@ -15,8 +16,10 @@ import (
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/lib"
memdb "github.com/hashicorp/go-memdb"
"github.com/hashicorp/go-raftchunking"
"github.com/hashicorp/memberlist"
msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc"
"github.com/hashicorp/raft"
"github.com/hashicorp/yamux"
)
@ -45,6 +48,10 @@ const (
enqueueLimit = 30 * time.Second
)
var (
ErrChunkingResubmit = errors.New("please resubmit call for rechunking")
)
// listen is used to listen for incoming RPC connections
func (s *Server) listen(listener net.Listener) {
for {
@ -203,6 +210,12 @@ func canRetry(args interface{}, err error) bool {
return true
}
// If we are chunking and it doesn't seem to have completed, try again
intErr, ok := args.(error)
if ok && strings.Contains(intErr.Error(), ErrChunkingResubmit.Error()) {
return true
}
// Reads are safe to retry for stream errors, such as if a server was
// being shut down.
info, ok := args.(structs.RPCInfo)
@ -366,12 +379,43 @@ func (s *Server) raftApply(t structs.MessageType, msg interface{}) (interface{},
s.logger.Printf("[WARN] consul: Attempting to apply large raft entry (%d bytes)", n)
}
future := s.raft.Apply(buf, enqueueLimit)
var chunked bool
var future raft.ApplyFuture
switch {
case len(buf) <= raft.SuggestedMaxDataSize || t != structs.KVSRequestType:
future = s.raft.Apply(buf, enqueueLimit)
default:
chunked = true
future = raftchunking.ChunkingApply(buf, nil, enqueueLimit, s.raft.ApplyLog)
}
if err := future.Error(); err != nil {
return nil, err
}
return future.Response(), nil
resp := future.Response()
if chunked {
// In this case we didn't apply all chunks successfully, possibly due
// to a term change; resubmit
if resp == nil {
// This returns the error in the interface because the raft library
// returns errors from the FSM via the future, not via err from the
// apply function. Downstream client code expects to see any error
// from the FSM (as opposed to the apply itself) and decide whether
// it can retry in the future's response.
return ErrChunkingResubmit, nil
}
// We expect that this conversion should always work
chunkedSuccess, ok := resp.(raftchunking.ChunkingSuccess)
if !ok {
return nil, errors.New("unknown type of response back from chunking FSM")
}
// Return the inner wrapped response
return chunkedSuccess.Response, nil
}
return resp, nil
}
// queryFn is used to perform a query operation. If a re-query is needed, the

View File

@ -734,7 +734,7 @@ func (s *Server) setupRaft() error {
s.raftNotifyCh = raftNotifyCh
// Setup the Raft store.
s.raft, err = raft.NewRaft(s.config.RaftConfig, s.fsm, log, stable, snap, trans)
s.raft, err = raft.NewRaft(s.config.RaftConfig, s.fsm.ChunkingFSM(), log, stable, snap, trans)
if err != nil {
return err
}

View File

@ -287,7 +287,7 @@ func (s *Server) maybeBootstrap() {
// Retry with exponential backoff to get peer status from this server
for attempt := uint(0); attempt < maxPeerRetries; attempt++ {
if err := s.connPool.RPC(s.config.Datacenter, server.Addr, server.Version,
"Status.Peers", server.UseTLS, &struct{}{}, &peers); err != nil {
"Status.Peers", server.UseTLS, &structs.DCSpecificRequest{Datacenter: s.config.Datacenter}, &peers); err != nil {
nextRetry := time.Duration((1 << attempt) * peerRetryBase)
s.logger.Printf("[ERR] consul: Failed to confirm peer status for %s: %v. Retrying in "+
"%v...", server.Name, err, nextRetry.String())

View File

@ -154,16 +154,18 @@ func testServerDCExpectNonVoter(t *testing.T, dc string, expect int) (string, *S
}
func testServerWithConfig(t *testing.T, cb func(*Config)) (string, *Server) {
dir, config := testServerConfig(t)
if cb != nil {
cb(config)
}
var dir string
var config *Config
var srv *Server
var err error
// Retry added to avoid cases where bind addr is already in use
retry.RunWith(retry.ThreeTimes(), t, func(r *retry.R) {
dir, config = testServerConfig(t)
if cb != nil {
cb(config)
}
srv, err = newServer(config)
if err != nil {
os.RemoveAll(dir)
@ -183,10 +185,6 @@ func newServer(c *Config) (*Server, error) {
oldNotify()
}
}
// Restore old notify to guard against re-closing `up` on a retry
defer func() {
c.NotifyListen = oldNotify
}()
// start server
w := c.LogOutput

View File

@ -5,6 +5,7 @@ import (
"strconv"
"github.com/hashicorp/consul/agent/consul/autopilot"
"github.com/hashicorp/consul/agent/structs"
)
// Status endpoint is used to check on server status
@ -18,7 +19,14 @@ func (s *Status) Ping(args struct{}, reply *struct{}) error {
}
// Leader is used to get the address of the leader
func (s *Status) Leader(args struct{}, reply *string) error {
func (s *Status) Leader(args *structs.DCSpecificRequest, reply *string) error {
// not using the regular forward function as it does a bunch of stuff we
// dont want like verifying consistency etc. We just want to enable DC
// forwarding
if args.Datacenter != "" && args.Datacenter != s.server.config.Datacenter {
return s.server.forwardDC("Status.Leader", args.Datacenter, args, reply)
}
leader := string(s.server.raft.Leader())
if leader != "" {
*reply = leader
@ -29,7 +37,14 @@ func (s *Status) Leader(args struct{}, reply *string) error {
}
// Peers is used to get all the Raft peers
func (s *Status) Peers(args struct{}, reply *[]string) error {
func (s *Status) Peers(args *structs.DCSpecificRequest, reply *[]string) error {
// not using the regular forward function as it does a bunch of stuff we
// dont want like verifying consistency etc. We just want to enable DC
// forwarding
if args.Datacenter != "" && args.Datacenter != s.server.config.Datacenter {
return s.server.forwardDC("Status.Peers", args.Datacenter, args, reply)
}
future := s.server.raft.GetConfiguration()
if err := future.Error(); err != nil {
return err

View File

@ -8,9 +8,11 @@ import (
"time"
"github.com/hashicorp/consul/agent/pool"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/tlsutil"
"github.com/hashicorp/net-rpc-msgpackrpc"
"github.com/stretchr/testify/require"
)
func rpcClient(t *testing.T, s *Server) rpc.ClientCodec {
@ -69,6 +71,32 @@ func TestStatusLeader(t *testing.T) {
}
}
func TestStatusLeader_ForwardDC(t *testing.T) {
t.Parallel()
dir1, s1 := testServerDC(t, "primary")
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
dir2, s2 := testServerDC(t, "secondary")
defer os.RemoveAll(dir2)
defer s2.Shutdown()
joinWAN(t, s2, s1)
testrpc.WaitForLeader(t, s1.RPC, "secondary")
testrpc.WaitForLeader(t, s2.RPC, "primary")
args := structs.DCSpecificRequest{
Datacenter: "secondary",
}
var out string
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Status.Leader", &args, &out))
require.Equal(t, s2.config.RPCAdvertise.String(), out)
}
func TestStatusPeers(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
@ -86,3 +114,29 @@ func TestStatusPeers(t *testing.T) {
t.Fatalf("no peers: %v", peers)
}
}
func TestStatusPeers_ForwardDC(t *testing.T) {
t.Parallel()
dir1, s1 := testServerDC(t, "primary")
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
dir2, s2 := testServerDC(t, "secondary")
defer os.RemoveAll(dir2)
defer s2.Shutdown()
joinWAN(t, s2, s1)
testrpc.WaitForLeader(t, s1.RPC, "secondary")
testrpc.WaitForLeader(t, s2.RPC, "primary")
args := structs.DCSpecificRequest{
Datacenter: "secondary",
}
var out []string
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Status.Peers", &args, &out))
require.Equal(t, []string{s2.config.RPCAdvertise.String()}, out)
}

View File

@ -12,13 +12,6 @@ import (
"github.com/hashicorp/consul/api"
)
const (
// maxKVSize is used to limit the maximum payload length
// of a KV entry. If it exceeds this amount, the client is
// likely abusing the KV store.
maxKVSize = 512 * 1024
)
func (s *HTTPServer) KVSEndpoint(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
// Set default DC
args := structs.KeyRequest{}
@ -182,9 +175,9 @@ func (s *HTTPServer) KVSPut(resp http.ResponseWriter, req *http.Request, args *s
}
// Check the content-length
if req.ContentLength > maxKVSize {
if req.ContentLength > int64(s.agent.config.KVMaxValueSize) {
resp.WriteHeader(http.StatusRequestEntityTooLarge)
fmt.Fprintf(resp, "Value exceeds %d byte limit", maxKVSize)
fmt.Fprintf(resp, "Value exceeds %d byte limit", s.agent.config.KVMaxValueSize)
return nil, nil
}

View File

@ -2,19 +2,31 @@ package agent
import (
"net/http"
"github.com/hashicorp/consul/agent/structs"
)
func (s *HTTPServer) StatusLeader(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
args := structs.DCSpecificRequest{}
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
return nil, nil
}
var out string
if err := s.agent.RPC("Status.Leader", struct{}{}, &out); err != nil {
if err := s.agent.RPC("Status.Leader", &args, &out); err != nil {
return nil, err
}
return out, nil
}
func (s *HTTPServer) StatusPeers(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
args := structs.DCSpecificRequest{}
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
return nil, nil
}
var out []string
if err := s.agent.RPC("Status.Peers", struct{}{}, &out); err != nil {
if err := s.agent.RPC("Status.Peers", &args, &out); err != nil {
return nil, err
}
return out, nil

View File

@ -1,10 +1,13 @@
package agent
import (
"fmt"
"net/http"
"testing"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc"
"github.com/stretchr/testify/require"
)
func TestStatusLeader(t *testing.T) {
@ -24,6 +27,42 @@ func TestStatusLeader(t *testing.T) {
}
}
func TestStatusLeaderSecondary(t *testing.T) {
t.Parallel()
a1 := NewTestAgent(t, t.Name(), "datacenter = \"primary\"")
defer a1.Shutdown()
a2 := NewTestAgent(t, t.Name(), "datacenter = \"secondary\"")
defer a2.Shutdown()
testrpc.WaitForTestAgent(t, a1.RPC, "primary")
testrpc.WaitForTestAgent(t, a2.RPC, "secondary")
a1SerfAddr := fmt.Sprintf("127.0.0.1:%d", a1.Config.SerfPortWAN)
a1Addr := fmt.Sprintf("127.0.0.1:%d", a1.Config.ServerPort)
a2Addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.ServerPort)
_, err := a2.JoinWAN([]string{a1SerfAddr})
require.NoError(t, err)
retry.Run(t, func(r *retry.R) {
require.Len(r, a1.WANMembers(), 2)
require.Len(r, a2.WANMembers(), 2)
})
req, _ := http.NewRequest("GET", "/v1/status/leader?dc=secondary", nil)
obj, err := a1.srv.StatusLeader(nil, req)
require.NoError(t, err)
leader, ok := obj.(string)
require.True(t, ok)
require.Equal(t, a2Addr, leader)
req, _ = http.NewRequest("GET", "/v1/status/leader?dc=primary", nil)
obj, err = a2.srv.StatusLeader(nil, req)
require.NoError(t, err)
leader, ok = obj.(string)
require.True(t, ok)
require.Equal(t, a1Addr, leader)
}
func TestStatusPeers(t *testing.T) {
t.Parallel()
a := NewTestAgent(t, t.Name(), "")
@ -40,3 +79,39 @@ func TestStatusPeers(t *testing.T) {
t.Fatalf("bad peers: %v", peers)
}
}
func TestStatusPeersSecondary(t *testing.T) {
t.Parallel()
a1 := NewTestAgent(t, t.Name(), "datacenter = \"primary\"")
defer a1.Shutdown()
a2 := NewTestAgent(t, t.Name(), "datacenter = \"secondary\"")
defer a2.Shutdown()
testrpc.WaitForTestAgent(t, a1.RPC, "primary")
testrpc.WaitForTestAgent(t, a2.RPC, "secondary")
a1SerfAddr := fmt.Sprintf("127.0.0.1:%d", a1.Config.SerfPortWAN)
a1Addr := fmt.Sprintf("127.0.0.1:%d", a1.Config.ServerPort)
a2Addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.ServerPort)
_, err := a2.JoinWAN([]string{a1SerfAddr})
require.NoError(t, err)
retry.Run(t, func(r *retry.R) {
require.Len(r, a1.WANMembers(), 2)
require.Len(r, a2.WANMembers(), 2)
})
req, _ := http.NewRequest("GET", "/v1/status/peers?dc=secondary", nil)
obj, err := a1.srv.StatusPeers(nil, req)
require.NoError(t, err)
peers, ok := obj.([]string)
require.True(t, ok)
require.Equal(t, []string{a2Addr}, peers)
req, _ = http.NewRequest("GET", "/v1/status/peers?dc=primary", nil)
obj, err = a2.srv.StatusPeers(nil, req)
require.NoError(t, err)
peers, ok = obj.([]string)
require.True(t, ok)
require.Equal(t, []string{a1Addr}, peers)
}

View File

@ -63,6 +63,7 @@ const (
ACLBindingRuleDeleteRequestType = 26
ACLAuthMethodSetRequestType = 27
ACLAuthMethodDeleteRequestType = 28
ChunkingStateType = 29
)
const (

View File

@ -123,17 +123,17 @@ func (s *HTTPServer) convertOps(resp http.ResponseWriter, req *http.Request) (st
// byte arrays so we can assign right over.
var opsRPC structs.TxnOps
var writes int
var netKVSize int
var netKVSize uint64
for _, in := range ops {
switch {
case in.KV != nil:
size := len(in.KV.Value)
if size > maxKVSize {
if uint64(size) > s.agent.config.KVMaxValueSize {
resp.WriteHeader(http.StatusRequestEntityTooLarge)
fmt.Fprintf(resp, "Value for key %q is too large (%d > %d bytes)", in.KV.Key, size, maxKVSize)
fmt.Fprintf(resp, "Value for key %q is too large (%d > %d bytes)", in.KV.Key, size, s.agent.config.KVMaxValueSize)
return nil, 0, false
}
netKVSize += size
netKVSize += uint64(size)
verb := in.KV.Verb
if isWrite(verb) {
@ -273,10 +273,10 @@ func (s *HTTPServer) convertOps(resp http.ResponseWriter, req *http.Request) (st
}
// Enforce an overall size limit to help prevent abuse.
if netKVSize > maxKVSize {
if netKVSize > s.agent.config.KVMaxValueSize {
resp.WriteHeader(http.StatusRequestEntityTooLarge)
fmt.Fprintf(resp, "Cumulative size of key data is too large (%d > %d bytes)",
netKVSize, maxKVSize)
netKVSize, s.agent.config.KVMaxValueSize)
return nil, 0, false
}

View File

@ -2,6 +2,7 @@ package agent
import (
"bytes"
"encoding/base64"
"fmt"
"net/http"
"net/http/httptest"
@ -12,6 +13,7 @@ import (
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/raft"
"github.com/pascaldekloe/goe/verify"
"github.com/hashicorp/consul/agent/structs"
@ -38,10 +40,10 @@ func TestTxnEndpoint_Bad_JSON(t *testing.T) {
func TestTxnEndpoint_Bad_Size_Item(t *testing.T) {
t.Parallel()
a := NewTestAgent(t, t.Name(), "")
defer a.Shutdown()
buf := bytes.NewBuffer([]byte(fmt.Sprintf(`
testIt := func(agent *TestAgent, wantPass bool) {
value := strings.Repeat("X", 3*raft.SuggestedMaxDataSize)
value = base64.StdEncoding.EncodeToString([]byte(value))
buf := bytes.NewBuffer([]byte(fmt.Sprintf(`
[
{
"KV": {
@ -51,24 +53,40 @@ func TestTxnEndpoint_Bad_Size_Item(t *testing.T) {
}
}
]
`, strings.Repeat("bad", 2*maxKVSize))))
req, _ := http.NewRequest("PUT", "/v1/txn", buf)
resp := httptest.NewRecorder()
if _, err := a.srv.Txn(resp, req); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Code != 413 {
t.Fatalf("expected 413, got %d", resp.Code)
`, value)))
req, _ := http.NewRequest("PUT", "/v1/txn", buf)
resp := httptest.NewRecorder()
if _, err := agent.srv.Txn(resp, req); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Code != 413 && !wantPass {
t.Fatalf("expected 413, got %d", resp.Code)
}
if resp.Code != 200 && wantPass {
t.Fatalf("expected 200, got %d", resp.Code)
}
}
t.Run("toobig", func(t *testing.T) {
a := NewTestAgent(t, t.Name(), "")
testIt(a, false)
a.Shutdown()
})
t.Run("allowed", func(t *testing.T) {
a := NewTestAgent(t, t.Name(), "limits = { kv_max_value_size = 123456789 }")
testIt(a, true)
a.Shutdown()
})
}
func TestTxnEndpoint_Bad_Size_Net(t *testing.T) {
t.Parallel()
a := NewTestAgent(t, t.Name(), "")
defer a.Shutdown()
value := strings.Repeat("X", maxKVSize/2)
buf := bytes.NewBuffer([]byte(fmt.Sprintf(`
testIt := func(agent *TestAgent, wantPass bool) {
value := strings.Repeat("X", 3*raft.SuggestedMaxDataSize)
value = base64.StdEncoding.EncodeToString([]byte(value))
buf := bytes.NewBuffer([]byte(fmt.Sprintf(`
[
{
"KV": {
@ -93,14 +111,30 @@ func TestTxnEndpoint_Bad_Size_Net(t *testing.T) {
}
]
`, value, value, value)))
req, _ := http.NewRequest("PUT", "/v1/txn", buf)
resp := httptest.NewRecorder()
if _, err := a.srv.Txn(resp, req); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Code != 413 {
t.Fatalf("expected 413, got %d", resp.Code)
req, _ := http.NewRequest("PUT", "/v1/txn", buf)
resp := httptest.NewRecorder()
if _, err := agent.srv.Txn(resp, req); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Code != 413 && !wantPass {
t.Fatalf("expected 413, got %d", resp.Code)
}
if resp.Code != 200 && wantPass {
t.Fatalf("expected 200, got %d", resp.Code)
}
}
t.Run("toobig", func(t *testing.T) {
a := NewTestAgent(t, t.Name(), "")
testIt(a, false)
a.Shutdown()
})
t.Run("allowed", func(t *testing.T) {
a := NewTestAgent(t, t.Name(), "limits = { kv_max_value_size = 123456789 }")
testIt(a, true)
a.Shutdown()
})
}
func TestTxnEndpoint_Bad_Size_Ops(t *testing.T) {

14
go.mod
View File

@ -22,7 +22,6 @@ require (
github.com/asaskevich/govalidator v0.0.0-20180319081651-7d2e70ef918f // indirect
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 // indirect
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
github.com/boltdb/bolt v1.3.1 // indirect
github.com/cenkalti/backoff v2.1.1+incompatible // indirect
github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 // indirect
github.com/coredns/coredns v1.1.2
@ -37,11 +36,10 @@ require (
github.com/go-ldap/ldap v3.0.2+incompatible // indirect
github.com/go-ole/go-ole v1.2.1 // indirect
github.com/go-sql-driver/mysql v0.0.0-20180618115901-749ddf1598b4 // indirect
github.com/go-test/deep v1.0.1 // indirect
github.com/gocql/gocql v0.0.0-20180617115710-e06f8c1bcd78 // indirect
github.com/gogo/googleapis v1.1.0
github.com/gogo/protobuf v1.2.1
github.com/golang/protobuf v1.2.0
github.com/golang/protobuf v1.3.1
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect
github.com/google/go-cmp v0.2.0 // indirect
github.com/google/go-github v17.0.0+incompatible // indirect
@ -58,6 +56,7 @@ require (
github.com/hashicorp/go-msgpack v0.5.5
github.com/hashicorp/go-multierror v1.0.0
github.com/hashicorp/go-plugin v0.0.0-20180331002553-e8d22c780116
github.com/hashicorp/go-raftchunking v0.6.1
github.com/hashicorp/go-sockaddr v1.0.0
github.com/hashicorp/go-syslog v1.0.0
github.com/hashicorp/go-uuid v1.0.1
@ -69,8 +68,8 @@ require (
github.com/hashicorp/mdns v1.0.1 // indirect
github.com/hashicorp/memberlist v0.1.4
github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69
github.com/hashicorp/raft v1.1.0
github.com/hashicorp/raft-boltdb v0.0.0-20150201200839-d1e82c1ec3f1
github.com/hashicorp/raft v1.1.1
github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea
github.com/hashicorp/serf v0.8.2
github.com/hashicorp/vault v0.10.3
github.com/hashicorp/vault-plugin-secrets-kv v0.0.0-20190318174639-195e0e9d07f1 // indirect
@ -78,12 +77,11 @@ require (
github.com/imdario/mergo v0.3.6
github.com/jefferai/jsonx v0.0.0-20160721235117-9cc31c3135ee // indirect
github.com/keybase/go-crypto v0.0.0-20180614160407-5114a9a81e1b // indirect
github.com/kr/pretty v0.1.0 // indirect
github.com/kr/text v0.1.0
github.com/lib/pq v0.0.0-20180523175426-90697d60dd84 // indirect
github.com/miekg/dns v1.0.14
github.com/mitchellh/cli v1.0.0
github.com/mitchellh/copystructure v0.0.0-20160804032330-cdac8253d00f
github.com/mitchellh/copystructure v1.0.0
github.com/mitchellh/go-testing-interface v1.0.0
github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452
github.com/mitchellh/mapstructure v1.1.2
@ -108,7 +106,7 @@ require (
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c
golang.org/x/sync v0.0.0-20190423024810-112230192c58
golang.org/x/sys v0.0.0-20190508220229-2d0786266e9c
golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2
google.golang.org/grpc v1.19.1
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect

23
go.sum
View File

@ -100,8 +100,8 @@ github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E=
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
github.com/go-sql-driver/mysql v0.0.0-20180618115901-749ddf1598b4 h1:1LlmVz15APoKz9dnm5j2ePptburJlwEH+/v/pUuoxck=
github.com/go-sql-driver/mysql v0.0.0-20180618115901-749ddf1598b4/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-test/deep v1.0.1 h1:UQhStjbkDClarlmv0am7OXXO4/GaPdCGiUiMTvi28sg=
github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw=
github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/gocql/gocql v0.0.0-20180617115710-e06f8c1bcd78 h1:G7iRamCffNivybfZvsJjtk3k2qHa73xW+OysVkukcGk=
github.com/gocql/gocql v0.0.0-20180617115710-e06f8c1bcd78/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0=
github.com/gogo/googleapis v1.1.0 h1:kFkMAZBNAn4j7K0GiZr8cRYzejq68VbheufiV3YuyFI=
@ -116,6 +116,8 @@ github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@ -167,6 +169,8 @@ github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uP
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-plugin v0.0.0-20180331002553-e8d22c780116 h1:Y4V/yReWjQo/Ngyc0w6C3EKXKincp4YgvXeo8lI4LrI=
github.com/hashicorp/go-plugin v0.0.0-20180331002553-e8d22c780116/go.mod h1:JSqWYsict+jzcj0+xElxyrBQRPNoiWQuddnxArJ7XHQ=
github.com/hashicorp/go-raftchunking v0.6.1 h1:moEnaG3gcwsWNyIBJoD5PCByE+Ewkqxh6N05CT+MbwA=
github.com/hashicorp/go-raftchunking v0.6.1/go.mod h1:cGlg3JtDy7qy6c/3Bu660Mic1JF+7lWqIwCFSb08fX0=
github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI=
@ -201,10 +205,10 @@ github.com/hashicorp/memberlist v0.1.4 h1:gkyML/r71w3FL8gUi74Vk76avkj/9lYAY9lvg0
github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69 h1:lc3c72qGlIMDqQpQH82Y4vaglRMMFdJbziYWriR4UcE=
github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69/go.mod h1:/z+jUGRBlwVpUZfjute9jWaF6/HuhjuFQuL1YXzVD1Q=
github.com/hashicorp/raft v1.1.0 h1:qPMePEczgbkiQsqCsRfuHRqvDUO+zmAInDaD5ptXlq0=
github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM=
github.com/hashicorp/raft-boltdb v0.0.0-20150201200839-d1e82c1ec3f1 h1:LHTrLUnNkk+2YkO5EMG49q0lHdR9AZhDbCpu0+M3e0E=
github.com/hashicorp/raft-boltdb v0.0.0-20150201200839-d1e82c1ec3f1/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk=
github.com/hashicorp/raft v1.1.1 h1:HJr7UE1x/JrJSc9Oy6aDBHtNHUUBHjcQjTgvUVihoZs=
github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8=
github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea h1:xykPFhrBAS2J0VBzVa5e80b5ZtYuNQtgXjN40qBZlD4=
github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk=
github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/vault v0.10.3 h1:3Hf6mwC4rggOq6ViWSoJ2yfk1oBS5ed58LLcP33gmEg=
@ -253,8 +257,8 @@ github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/copystructure v0.0.0-20160804032330-cdac8253d00f h1:UG6JHofOxDd6ldL+TCAM+z0LXBB1cbXny7qG4G/FjF0=
github.com/mitchellh/copystructure v0.0.0-20160804032330-cdac8253d00f/go.mod h1:eOsF2yLPlBBJPvD+nhl5QMTBSOBbOph6N7j/IDUw7PY=
github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ=
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
@ -266,6 +270,7 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE=
github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
@ -383,6 +388,8 @@ golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190508220229-2d0786266e9c h1:hDn6jm7snBX2O7+EeTk6Q4WXJfKt7MWgtiCCRi1rBoY=
golang.org/x/sys v0.0.0-20190508220229-2d0786266e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5 h1:sM3evRHxE/1RuMe1FYAL3j7C7fUfIjkbE+NiDAYUF8U=
golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=

View File

@ -186,7 +186,6 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) {
if b&0x80 == 0 {
goto done
}
// x -= 0x80 << 63 // Always zero.
return 0, errOverflow

63
vendor/github.com/golang/protobuf/proto/deprecated.go generated vendored Normal file
View File

@ -0,0 +1,63 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2018 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
import "errors"
// Deprecated: do not use.
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
// Deprecated: do not use.
func GetStats() Stats { return Stats{} }
// Deprecated: do not use.
func MarshalMessageSet(interface{}) ([]byte, error) {
return nil, errors.New("proto: not implemented")
}
// Deprecated: do not use.
func UnmarshalMessageSet([]byte, interface{}) error {
return errors.New("proto: not implemented")
}
// Deprecated: do not use.
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
return nil, errors.New("proto: not implemented")
}
// Deprecated: do not use.
func UnmarshalMessageSetJSON([]byte, interface{}) error {
return errors.New("proto: not implemented")
}
// Deprecated: do not use.
func RegisterMessageSetType(Message, int32, string) {}

View File

@ -246,7 +246,8 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
return false
}
m1, m2 := e1.value, e2.value
m1 := extensionAsLegacyType(e1.value)
m2 := extensionAsLegacyType(e2.value)
if m1 == nil && m2 == nil {
// Both have only encoded form.

View File

@ -185,9 +185,25 @@ type Extension struct {
// extension will have only enc set. When such an extension is
// accessed using GetExtension (or GetExtensions) desc and value
// will be set.
desc *ExtensionDesc
desc *ExtensionDesc
// value is a concrete value for the extension field. Let the type of
// desc.ExtensionType be the "API type" and the type of Extension.value
// be the "storage type". The API type and storage type are the same except:
// * For scalars (except []byte), the API type uses *T,
// while the storage type uses T.
// * For repeated fields, the API type uses []T, while the storage type
// uses *[]T.
//
// The reason for the divergence is so that the storage type more naturally
// matches what is expected of when retrieving the values through the
// protobuf reflection APIs.
//
// The value may only be populated if desc is also populated.
value interface{}
enc []byte
// enc is the raw bytes for the extension field.
enc []byte
}
// SetRawExtension is for testing only.
@ -334,7 +350,7 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
// descriptors with the same field number.
return nil, errors.New("proto: descriptor conflict")
}
return e.value, nil
return extensionAsLegacyType(e.value), nil
}
if extension.ExtensionType == nil {
@ -349,11 +365,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
// Remember the decoded version and drop the encoded version.
// That way it is safe to mutate what we return.
e.value = v
e.value = extensionAsStorageType(v)
e.desc = extension
e.enc = nil
emap[extension.Field] = e
return e.value, nil
return extensionAsLegacyType(e.value), nil
}
// defaultExtensionValue returns the default value for extension.
@ -488,7 +504,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error
}
typ := reflect.TypeOf(extension.ExtensionType)
if typ != reflect.TypeOf(value) {
return errors.New("proto: bad extension value type")
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
}
// nil extension values need to be caught early, because the
// encoder can't distinguish an ErrNil due to a nil extension
@ -500,7 +516,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error
}
extmap := epb.extensionsWrite()
extmap[extension.Field] = Extension{desc: extension, value: value}
extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
return nil
}
@ -541,3 +557,51 @@ func RegisterExtension(desc *ExtensionDesc) {
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
return extensionMaps[reflect.TypeOf(pb).Elem()]
}
// extensionAsLegacyType converts an value in the storage type as the API type.
// See Extension.value.
func extensionAsLegacyType(v interface{}) interface{} {
switch rv := reflect.ValueOf(v); rv.Kind() {
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
// Represent primitive types as a pointer to the value.
rv2 := reflect.New(rv.Type())
rv2.Elem().Set(rv)
v = rv2.Interface()
case reflect.Ptr:
// Represent slice types as the value itself.
switch rv.Type().Elem().Kind() {
case reflect.Slice:
if rv.IsNil() {
v = reflect.Zero(rv.Type().Elem()).Interface()
} else {
v = rv.Elem().Interface()
}
}
}
return v
}
// extensionAsStorageType converts an value in the API type as the storage type.
// See Extension.value.
func extensionAsStorageType(v interface{}) interface{} {
switch rv := reflect.ValueOf(v); rv.Kind() {
case reflect.Ptr:
// Represent slice types as the value itself.
switch rv.Type().Elem().Kind() {
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
if rv.IsNil() {
v = reflect.Zero(rv.Type().Elem()).Interface()
} else {
v = rv.Elem().Interface()
}
}
case reflect.Slice:
// Represent slice types as a pointer to the value.
if rv.Type().Elem().Kind() != reflect.Uint8 {
rv2 := reflect.New(rv.Type())
rv2.Elem().Set(rv)
v = rv2.Interface()
}
}
return v
}

View File

@ -341,26 +341,6 @@ type Message interface {
ProtoMessage()
}
// Stats records allocation details about the protocol buffer encoders
// and decoders. Useful for tuning the library itself.
type Stats struct {
Emalloc uint64 // mallocs in encode
Dmalloc uint64 // mallocs in decode
Encode uint64 // number of encodes
Decode uint64 // number of decodes
Chit uint64 // number of cache hits
Cmiss uint64 // number of cache misses
Size uint64 // number of sizes
}
// Set to true to enable stats collection.
const collectStats = false
var stats Stats
// GetStats returns a copy of the global Stats structure.
func GetStats() Stats { return stats }
// A Buffer is a buffer manager for marshaling and unmarshaling
// protocol buffers. It may be reused between invocations to
// reduce memory usage. It is not necessary to use a Buffer;
@ -960,13 +940,19 @@ func isProto3Zero(v reflect.Value) bool {
return false
}
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
const ProtoPackageIsVersion2 = true
const (
// ProtoPackageIsVersion3 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
ProtoPackageIsVersion3 = true
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
const ProtoPackageIsVersion1 = true
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
ProtoPackageIsVersion2 = true
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
ProtoPackageIsVersion1 = true
)
// InternalMessageInfo is a type used internally by generated .pb.go files.
// This type is not intended to be used by non-generated code.

View File

@ -36,13 +36,7 @@ package proto
*/
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"reflect"
"sort"
"sync"
)
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
@ -145,46 +139,9 @@ func skipVarint(buf []byte) []byte {
return buf[i+1:]
}
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
func MarshalMessageSet(exts interface{}) ([]byte, error) {
return marshalMessageSet(exts, false)
}
// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal.
func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) {
switch exts := exts.(type) {
case *XXX_InternalExtensions:
var u marshalInfo
siz := u.sizeMessageSet(exts)
b := make([]byte, 0, siz)
return u.appendMessageSet(b, exts, deterministic)
case map[int32]Extension:
// This is an old-style extension map.
// Wrap it in a new-style XXX_InternalExtensions.
ie := XXX_InternalExtensions{
p: &struct {
mu sync.Mutex
extensionMap map[int32]Extension
}{
extensionMap: exts,
},
}
var u marshalInfo
siz := u.sizeMessageSet(&ie)
b := make([]byte, 0, siz)
return u.appendMessageSet(b, &ie, deterministic)
default:
return nil, errors.New("proto: not an extension map")
}
}
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSet(buf []byte, exts interface{}) error {
func unmarshalMessageSet(buf []byte, exts interface{}) error {
var m map[int32]Extension
switch exts := exts.(type) {
case *XXX_InternalExtensions:
@ -222,93 +179,3 @@ func UnmarshalMessageSet(buf []byte, exts interface{}) error {
}
return nil
}
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
var m map[int32]Extension
switch exts := exts.(type) {
case *XXX_InternalExtensions:
var mu sync.Locker
m, mu = exts.extensionsRead()
if m != nil {
// Keep the extensions map locked until we're done marshaling to prevent
// races between marshaling and unmarshaling the lazily-{en,de}coded
// values.
mu.Lock()
defer mu.Unlock()
}
case map[int32]Extension:
m = exts
default:
return nil, errors.New("proto: not an extension map")
}
var b bytes.Buffer
b.WriteByte('{')
// Process the map in key order for deterministic output.
ids := make([]int32, 0, len(m))
for id := range m {
ids = append(ids, id)
}
sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
for i, id := range ids {
ext := m[id]
msd, ok := messageSetMap[id]
if !ok {
// Unknown type; we can't render it, so skip it.
continue
}
if i > 0 && b.Len() > 1 {
b.WriteByte(',')
}
fmt.Fprintf(&b, `"[%s]":`, msd.name)
x := ext.value
if x == nil {
x = reflect.New(msd.t.Elem()).Interface()
if err := Unmarshal(ext.enc, x.(Message)); err != nil {
return nil, err
}
}
d, err := json.Marshal(x)
if err != nil {
return nil, err
}
b.Write(d)
}
b.WriteByte('}')
return b.Bytes(), nil
}
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
// Common-case fast path.
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
return nil
}
// This is fairly tricky, and it's not clear that it is needed.
return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
}
// A global registry of types that can be used in a MessageSet.
var messageSetMap = make(map[int32]messageSetDesc)
type messageSetDesc struct {
t reflect.Type // pointer to struct
name string
}
// RegisterMessageSetType is called from the generated code.
func RegisterMessageSetType(m Message, fieldNum int32, name string) {
messageSetMap[fieldNum] = messageSetDesc{
t: reflect.TypeOf(m),
name: name,
}
}

View File

@ -79,10 +79,13 @@ func toPointer(i *Message) pointer {
// toAddrPointer converts an interface to a pointer that points to
// the interface data.
func toAddrPointer(i *interface{}, isptr bool) pointer {
func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
v := reflect.ValueOf(*i)
u := reflect.New(v.Type())
u.Elem().Set(v)
if deref {
u = u.Elem()
}
return pointer{v: u}
}

View File

@ -85,16 +85,21 @@ func toPointer(i *Message) pointer {
// toAddrPointer converts an interface to a pointer that points to
// the interface data.
func toAddrPointer(i *interface{}, isptr bool) pointer {
func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
// Super-tricky - read or get the address of data word of interface value.
if isptr {
// The interface is of pointer type, thus it is a direct interface.
// The data word is the pointer data itself. We take its address.
return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
} else {
// The interface is not of pointer type. The data word is the pointer
// to the data.
p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
}
// The interface is not of pointer type. The data word is the pointer
// to the data.
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
if deref {
p.p = *(*unsafe.Pointer)(p.p)
}
return p
}
// valToPointer converts v to a pointer. v must be of pointer type.

View File

@ -334,9 +334,6 @@ func GetProperties(t reflect.Type) *StructProperties {
sprop, ok := propertiesMap[t]
propertiesMu.RUnlock()
if ok {
if collectStats {
stats.Chit++
}
return sprop
}
@ -346,17 +343,20 @@ func GetProperties(t reflect.Type) *StructProperties {
return sprop
}
type (
oneofFuncsIface interface {
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
}
oneofWrappersIface interface {
XXX_OneofWrappers() []interface{}
}
)
// getPropertiesLocked requires that propertiesMu is held.
func getPropertiesLocked(t reflect.Type) *StructProperties {
if prop, ok := propertiesMap[t]; ok {
if collectStats {
stats.Chit++
}
return prop
}
if collectStats {
stats.Cmiss++
}
prop := new(StructProperties)
// in case of recursive protos, fill this in now.
@ -391,13 +391,14 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
// Re-order prop.order.
sort.Sort(prop)
type oneofMessage interface {
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
var oots []interface{}
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
case oneofFuncsIface:
_, _, _, oots = m.XXX_OneofFuncs()
case oneofWrappersIface:
oots = m.XXX_OneofWrappers()
}
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
var oots []interface{}
_, _, _, oots = om.XXX_OneofFuncs()
if len(oots) > 0 {
// Interpret oneof metadata.
prop.OneofTypes = make(map[string]*OneofProperties)
for _, oot := range oots {

View File

@ -87,6 +87,7 @@ type marshalElemInfo struct {
sizer sizer
marshaler marshaler
isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
deref bool // dereference the pointer before operating on it; implies isptr
}
var (
@ -320,8 +321,11 @@ func (u *marshalInfo) computeMarshalInfo() {
// get oneof implementers
var oneofImplementers []interface{}
if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
case oneofFuncsIface:
_, _, _, oneofImplementers = m.XXX_OneofFuncs()
case oneofWrappersIface:
oneofImplementers = m.XXX_OneofWrappers()
}
n := t.NumField()
@ -407,13 +411,22 @@ func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
panic("tag is not an integer")
}
wt := wiretype(tags[0])
if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct {
t = t.Elem()
}
sizer, marshaler := typeMarshaler(t, tags, false, false)
var deref bool
if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
t = reflect.PtrTo(t)
deref = true
}
e = &marshalElemInfo{
wiretag: uint64(tag)<<3 | wt,
tagsize: SizeVarint(uint64(tag) << 3),
sizer: sizer,
marshaler: marshaler,
isptr: t.Kind() == reflect.Ptr,
deref: deref,
}
// update cache
@ -448,7 +461,7 @@ func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
fi.field = toField(f)
fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
fi.isPointer = true
fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
@ -476,10 +489,6 @@ func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofI
}
}
type oneofMessage interface {
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
}
// wiretype returns the wire encoding of the type.
func wiretype(encoding string) uint64 {
switch encoding {
@ -2310,8 +2319,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
for _, k := range m.MapKeys() {
ki := k.Interface()
vi := m.MapIndex(k).Interface()
kaddr := toAddrPointer(&ki, false) // pointer to key
vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
kaddr := toAddrPointer(&ki, false, false) // pointer to key
vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
n += siz + SizeVarint(uint64(siz)) + tagsize
}
@ -2329,8 +2338,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
for _, k := range keys {
ki := k.Interface()
vi := m.MapIndex(k).Interface()
kaddr := toAddrPointer(&ki, false) // pointer to key
vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
kaddr := toAddrPointer(&ki, false, false) // pointer to key
vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
b = appendVarint(b, tag)
siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
b = appendVarint(b, uint64(siz))
@ -2399,7 +2408,7 @@ func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
// the last time this function was called.
ei := u.getExtElemInfo(e.desc)
v := e.value
p := toAddrPointer(&v, ei.isptr)
p := toAddrPointer(&v, ei.isptr, ei.deref)
n += ei.sizer(p, ei.tagsize)
}
mu.Unlock()
@ -2434,7 +2443,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
ei := u.getExtElemInfo(e.desc)
v := e.value
p := toAddrPointer(&v, ei.isptr)
p := toAddrPointer(&v, ei.isptr, ei.deref)
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
if !nerr.Merge(err) {
return b, err
@ -2465,7 +2474,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
ei := u.getExtElemInfo(e.desc)
v := e.value
p := toAddrPointer(&v, ei.isptr)
p := toAddrPointer(&v, ei.isptr, ei.deref)
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
if !nerr.Merge(err) {
return b, err
@ -2510,7 +2519,7 @@ func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
ei := u.getExtElemInfo(e.desc)
v := e.value
p := toAddrPointer(&v, ei.isptr)
p := toAddrPointer(&v, ei.isptr, ei.deref)
n += ei.sizer(p, 1) // message, tag = 3 (size=1)
}
mu.Unlock()
@ -2553,7 +2562,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
ei := u.getExtElemInfo(e.desc)
v := e.value
p := toAddrPointer(&v, ei.isptr)
p := toAddrPointer(&v, ei.isptr, ei.deref)
b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
if !nerr.Merge(err) {
return b, err
@ -2591,7 +2600,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
ei := u.getExtElemInfo(e.desc)
v := e.value
p := toAddrPointer(&v, ei.isptr)
p := toAddrPointer(&v, ei.isptr, ei.deref)
b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
b = append(b, 1<<3|WireEndGroup)
if !nerr.Merge(err) {
@ -2621,7 +2630,7 @@ func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
ei := u.getExtElemInfo(e.desc)
v := e.value
p := toAddrPointer(&v, ei.isptr)
p := toAddrPointer(&v, ei.isptr, ei.deref)
n += ei.sizer(p, ei.tagsize)
}
return n
@ -2656,7 +2665,7 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ
ei := u.getExtElemInfo(e.desc)
v := e.value
p := toAddrPointer(&v, ei.isptr)
p := toAddrPointer(&v, ei.isptr, ei.deref)
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
if !nerr.Merge(err) {
return b, err

View File

@ -136,7 +136,7 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
u.computeUnmarshalInfo()
}
if u.isMessageSet {
return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
}
var reqMask uint64 // bitmask of required fields we've seen.
var errLater error
@ -362,46 +362,48 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
}
// Find any types associated with oneof fields.
// TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it?
fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs")
if fn.IsValid() {
res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{}
for i := res.Len() - 1; i >= 0; i-- {
v := res.Index(i) // interface{}
tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X
typ := tptr.Elem() // Msg_X
var oneofImplementers []interface{}
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
case oneofFuncsIface:
_, _, _, oneofImplementers = m.XXX_OneofFuncs()
case oneofWrappersIface:
oneofImplementers = m.XXX_OneofWrappers()
}
for _, v := range oneofImplementers {
tptr := reflect.TypeOf(v) // *Msg_X
typ := tptr.Elem() // Msg_X
f := typ.Field(0) // oneof implementers have one field
baseUnmarshal := fieldUnmarshaler(&f)
tags := strings.Split(f.Tag.Get("protobuf"), ",")
fieldNum, err := strconv.Atoi(tags[1])
if err != nil {
panic("protobuf tag field not an integer: " + tags[1])
}
var name string
for _, tag := range tags {
if strings.HasPrefix(tag, "name=") {
name = strings.TrimPrefix(tag, "name=")
break
}
}
// Find the oneof field that this struct implements.
// Might take O(n^2) to process all of the oneofs, but who cares.
for _, of := range oneofFields {
if tptr.Implements(of.ityp) {
// We have found the corresponding interface for this struct.
// That lets us know where this struct should be stored
// when we encounter it during unmarshaling.
unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
u.setTag(fieldNum, of.field, unmarshal, 0, name)
}
f := typ.Field(0) // oneof implementers have one field
baseUnmarshal := fieldUnmarshaler(&f)
tags := strings.Split(f.Tag.Get("protobuf"), ",")
fieldNum, err := strconv.Atoi(tags[1])
if err != nil {
panic("protobuf tag field not an integer: " + tags[1])
}
var name string
for _, tag := range tags {
if strings.HasPrefix(tag, "name=") {
name = strings.TrimPrefix(tag, "name=")
break
}
}
// Find the oneof field that this struct implements.
// Might take O(n^2) to process all of the oneofs, but who cares.
for _, of := range oneofFields {
if tptr.Implements(of.ityp) {
// We have found the corresponding interface for this struct.
// That lets us know where this struct should be stored
// when we encounter it during unmarshaling.
unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
u.setTag(fieldNum, of.field, unmarshal, 0, name)
}
}
}
// Get extension ranges, if any.
fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
if fn.IsValid() {
if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
panic("a message with extensions, but no extensions field in " + t.Name())
@ -1948,7 +1950,7 @@ func encodeVarint(b []byte, x uint64) []byte {
// If there is an error, it returns 0,0.
func decodeVarint(b []byte) (uint64, int) {
var x, y uint64
if len(b) <= 0 {
if len(b) == 0 {
goto bad
}
x = uint64(b[0])

File diff suppressed because it is too large Load Diff

View File

@ -417,6 +417,17 @@ message FileOptions {
// determining the namespace.
optional string php_namespace = 41;
// Use this option to change the namespace of php generated metadata classes.
// Default is empty. When this option is empty, the proto file name will be used
// for determining the namespace.
optional string php_metadata_namespace = 44;
// Use this option to change the package of ruby generated classes. Default
// is empty. When this option is not set, the package name will be used for
// determining the ruby package.
optional string ruby_package = 45;
// The parser stores options it doesn't recognize here.
// See the documentation for the "Options" section above.
repeated UninterpretedOption uninterpreted_option = 999;

View File

@ -1,11 +1,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/any.proto
package any // import "github.com/golang/protobuf/ptypes/any"
package any
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@ -16,7 +18,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
@ -99,17 +101,18 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// }
//
type Any struct {
// A URL/resource name whose content describes the type of the
// serialized protocol buffer message.
// A URL/resource name that uniquely identifies the type of the serialized
// protocol buffer message. The last segment of the URL's path must represent
// the fully qualified name of the type (as in
// `path/google.protobuf.Duration`). The name should be in a canonical form
// (e.g., leading "." is not accepted).
//
// For URLs which use the scheme `http`, `https`, or no scheme, the
// following restrictions and interpretations apply:
// In practice, teams usually precompile into the binary all types that they
// expect it to use in the context of Any. However, for URLs which use the
// scheme `http`, `https`, or no scheme, one can optionally set up a type
// server that maps type URLs to message definitions as follows:
//
// * If no scheme is provided, `https` is assumed.
// * The last segment of the URL's path must represent the fully
// qualified name of the type (as in `path/google.protobuf.Duration`).
// The name should be in a canonical form (e.g., leading "." is
// not accepted).
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
@ -118,6 +121,10 @@ type Any struct {
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
// Note: this functionality is not currently available in the official
// protobuf release, and it is not used for type URLs beginning with
// type.googleapis.com.
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
@ -133,17 +140,19 @@ func (m *Any) Reset() { *m = Any{} }
func (m *Any) String() string { return proto.CompactTextString(m) }
func (*Any) ProtoMessage() {}
func (*Any) Descriptor() ([]byte, []int) {
return fileDescriptor_any_744b9ca530f228db, []int{0}
return fileDescriptor_b53526c13ae22eb4, []int{0}
}
func (*Any) XXX_WellKnownType() string { return "Any" }
func (m *Any) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Any.Unmarshal(m, b)
}
func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Any.Marshal(b, m, deterministic)
}
func (dst *Any) XXX_Merge(src proto.Message) {
xxx_messageInfo_Any.Merge(dst, src)
func (m *Any) XXX_Merge(src proto.Message) {
xxx_messageInfo_Any.Merge(m, src)
}
func (m *Any) XXX_Size() int {
return xxx_messageInfo_Any.Size(m)
@ -172,9 +181,9 @@ func init() {
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
}
func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) }
func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) }
var fileDescriptor_any_744b9ca530f228db = []byte{
var fileDescriptor_b53526c13ae22eb4 = []byte{
// 185 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,

View File

@ -120,17 +120,18 @@ option objc_class_prefix = "GPB";
// }
//
message Any {
// A URL/resource name whose content describes the type of the
// serialized protocol buffer message.
// A URL/resource name that uniquely identifies the type of the serialized
// protocol buffer message. The last segment of the URL's path must represent
// the fully qualified name of the type (as in
// `path/google.protobuf.Duration`). The name should be in a canonical form
// (e.g., leading "." is not accepted).
//
// For URLs which use the scheme `http`, `https`, or no scheme, the
// following restrictions and interpretations apply:
// In practice, teams usually precompile into the binary all types that they
// expect it to use in the context of Any. However, for URLs which use the
// scheme `http`, `https`, or no scheme, one can optionally set up a type
// server that maps type URLs to message definitions as follows:
//
// * If no scheme is provided, `https` is assumed.
// * The last segment of the URL's path must represent the fully
// qualified name of the type (as in `path/google.protobuf.Duration`).
// The name should be in a canonical form (e.g., leading "." is
// not accepted).
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
@ -139,6 +140,10 @@ message Any {
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
// Note: this functionality is not currently available in the official
// protobuf release, and it is not used for type URLs beginning with
// type.googleapis.com.
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//

View File

@ -82,7 +82,7 @@ func Duration(p *durpb.Duration) (time.Duration, error) {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}
if p.Nanos != 0 {
d += time.Duration(p.Nanos)
d += time.Duration(p.Nanos) * time.Nanosecond
if (d < 0) != (p.Nanos < 0) {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}

View File

@ -1,11 +1,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/duration.proto
package duration // import "github.com/golang/protobuf/ptypes/duration"
package duration
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@ -16,7 +18,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// A Duration represents a signed, fixed-length span of time represented
// as a count of seconds and fractions of seconds at nanosecond
@ -99,17 +101,19 @@ func (m *Duration) Reset() { *m = Duration{} }
func (m *Duration) String() string { return proto.CompactTextString(m) }
func (*Duration) ProtoMessage() {}
func (*Duration) Descriptor() ([]byte, []int) {
return fileDescriptor_duration_e7d612259e3f0613, []int{0}
return fileDescriptor_23597b2ebd7ac6c5, []int{0}
}
func (*Duration) XXX_WellKnownType() string { return "Duration" }
func (m *Duration) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Duration.Unmarshal(m, b)
}
func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
}
func (dst *Duration) XXX_Merge(src proto.Message) {
xxx_messageInfo_Duration.Merge(dst, src)
func (m *Duration) XXX_Merge(src proto.Message) {
xxx_messageInfo_Duration.Merge(m, src)
}
func (m *Duration) XXX_Size() int {
return xxx_messageInfo_Duration.Size(m)
@ -138,11 +142,9 @@ func init() {
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
}
func init() {
proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_e7d612259e3f0613)
}
func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) }
var fileDescriptor_duration_e7d612259e3f0613 = []byte{
var fileDescriptor_23597b2ebd7ac6c5 = []byte{
// 190 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,

View File

@ -111,11 +111,9 @@ func TimestampNow() *tspb.Timestamp {
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
// It returns an error if the resulting Timestamp is invalid.
func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
seconds := t.Unix()
nanos := int32(t.Sub(time.Unix(seconds, 0)))
ts := &tspb.Timestamp{
Seconds: seconds,
Nanos: nanos,
Seconds: t.Unix(),
Nanos: int32(t.Nanosecond()),
}
if err := validateTimestamp(ts); err != nil {
return nil, err

View File

@ -1,11 +1,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/timestamp.proto
package timestamp // import "github.com/golang/protobuf/ptypes/timestamp"
package timestamp
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@ -16,7 +18,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// A Timestamp represents a point in time independent of any time zone
// or calendar, represented as seconds and fractions of seconds at
@ -81,7 +83,9 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
// is required, though only UTC (as indicated by "Z") is presently supported.
// is required. A proto3 JSON serializer should always use UTC (as indicated by
// "Z") when printing the Timestamp type and a proto3 JSON parser should be
// able to accept both UTC and other timezones (as indicated by an offset).
//
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
// 01:30 UTC on January 15, 2017.
@ -92,8 +96,8 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--)
// to obtain a formatter capable of generating timestamps in this format.
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
// ) to obtain a formatter capable of generating timestamps in this format.
//
//
type Timestamp struct {
@ -115,17 +119,19 @@ func (m *Timestamp) Reset() { *m = Timestamp{} }
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
func (*Timestamp) ProtoMessage() {}
func (*Timestamp) Descriptor() ([]byte, []int) {
return fileDescriptor_timestamp_b826e8e5fba671a8, []int{0}
return fileDescriptor_292007bbfe81227e, []int{0}
}
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
func (m *Timestamp) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Timestamp.Unmarshal(m, b)
}
func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
}
func (dst *Timestamp) XXX_Merge(src proto.Message) {
xxx_messageInfo_Timestamp.Merge(dst, src)
func (m *Timestamp) XXX_Merge(src proto.Message) {
xxx_messageInfo_Timestamp.Merge(m, src)
}
func (m *Timestamp) XXX_Size() int {
return xxx_messageInfo_Timestamp.Size(m)
@ -154,11 +160,9 @@ func init() {
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
}
func init() {
proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_b826e8e5fba671a8)
}
func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) }
var fileDescriptor_timestamp_b826e8e5fba671a8 = []byte{
var fileDescriptor_292007bbfe81227e = []byte{
// 191 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,

View File

@ -103,7 +103,9 @@ option objc_class_prefix = "GPB";
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
// is required, though only UTC (as indicated by "Z") is presently supported.
// is required. A proto3 JSON serializer should always use UTC (as indicated by
// "Z") when printing the Timestamp type and a proto3 JSON parser should be
// able to accept both UTC and other timezones (as indicated by an offset).
//
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
// 01:30 UTC on January 15, 2017.
@ -114,8 +116,8 @@ option objc_class_prefix = "GPB";
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--)
// to obtain a formatter capable of generating timestamps in this format.
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
// ) to obtain a formatter capable of generating timestamps in this format.
//
//
message Timestamp {

354
vendor/github.com/hashicorp/go-raftchunking/LICENSE generated vendored Normal file
View File

@ -0,0 +1,354 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. “Contributor”
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. “Contributor Version”
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributors Contribution.
1.3. “Contribution”
means Covered Software of a particular Contributor.
1.4. “Covered Software”
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. “Incompatible With Secondary Licenses”
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of version
1.1 or earlier of the License, but not also under the terms of a
Secondary License.
1.6. “Executable Form”
means any form of the work other than Source Code Form.
1.7. “Larger Work”
means a work that combines Covered Software with other material, in a separate
file or files, that is not Covered Software.
1.8. “License”
means this document.
1.9. “Licensable”
means having the right to grant, to the maximum extent possible, whether at the
time of the initial grant or subsequently, any and all of the rights conveyed by
this License.
1.10. “Modifications”
means any of the following:
a. any file in Source Code Form that results from an addition to, deletion
from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. “Patent Claims” of a Contributor
means any patent claim(s), including without limitation, method, process,
and apparatus claims, in any patent Licensable by such Contributor that
would be infringed, but for the grant of the License, by the making,
using, selling, offering for sale, having made, import, or transfer of
either its Contributions or its Contributor Version.
1.12. “Secondary License”
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. “Source Code Form”
means the form of the work preferred for making modifications.
1.14. “You” (or “Your”)
means an individual or a legal entity exercising rights under this
License. For legal entities, “You” includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, “control” means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or as
part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its Contributions
or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution become
effective for each Contribution on the date the Contributor first distributes
such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under this
License. No additional rights or licenses will be implied from the distribution
or licensing of Covered Software under this License. Notwithstanding Section
2.1(b) above, no patent license is granted by a Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third partys
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of its
Contributions.
This License does not grant any rights in the trademarks, service marks, or
logos of any Contributor (except as may be necessary to comply with the
notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this License
(see Section 10.2) or under the terms of a Secondary License (if permitted
under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its Contributions
are its original creation(s) or it has sufficient rights to grant the
rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under applicable
copyright doctrines of fair use, fair dealing, or other equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under the
terms of this License. You must inform recipients that the Source Code Form
of the Covered Software is governed by the terms of this License, and how
they can obtain a copy of this License. You may not attempt to alter or
restrict the recipients rights in the Source Code Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this License,
or sublicense it under different terms, provided that the license for
the Executable Form does not attempt to limit or alter the recipients
rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for the
Covered Software. If the Larger Work is a combination of Covered Software
with a work governed by one or more Secondary Licenses, and the Covered
Software is not Incompatible With Secondary Licenses, this License permits
You to additionally distribute such Covered Software under the terms of
such Secondary License(s), so that the recipient of the Larger Work may, at
their option, further distribute the Covered Software under the terms of
either this License or such Secondary License(s).
3.4. Notices
You may not remove or alter the substance of any license notices (including
copyright notices, patent notices, disclaimers of warranty, or limitations
of liability) contained within the Source Code Form of the Covered
Software, except that You may alter any license notices to the extent
required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on behalf
of any Contributor. You must make it absolutely clear that any such
warranty, support, indemnity, or liability obligation is offered by You
alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute, judicial
order, or regulation then You must: (a) comply with the terms of this License
to the maximum extent possible; and (b) describe the limitations and the code
they affect. Such description must be placed in a text file included with all
distributions of the Covered Software under this License. Except to the
extent prohibited by statute or regulation, such description must be
sufficiently detailed for a recipient of ordinary skill to be able to
understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
if such Contributor fails to notify You of the non-compliance by some
reasonable means prior to 60 days after You have come back into compliance.
Moreover, Your grants from a particular Contributor are reinstated on an
ongoing basis if such Contributor notifies You of the non-compliance by
some reasonable means, this is the first time You have received notice of
non-compliance with this License from such Contributor, and You become
compliant prior to 30 days after Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions, counter-claims,
and cross-claims) alleging that a Contributor Version directly or
indirectly infringes any patent, then the rights granted to You by any and
all Contributors for the Covered Software under Section 2.1 of this License
shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an “as is” basis, without
warranty of any kind, either expressed, implied, or statutory, including,
without limitation, warranties that the Covered Software is free of defects,
merchantable, fit for a particular purpose or non-infringing. The entire
risk as to the quality and performance of the Covered Software is with You.
Should any Covered Software prove defective in any respect, You (not any
Contributor) assume the cost of any necessary servicing, repair, or
correction. This disclaimer of warranty constitutes an essential part of this
License. No use of any Covered Software is authorized under this License
except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from such
partys negligence to the extent applicable law prohibits such limitation.
Some jurisdictions do not allow the exclusion or limitation of incidental or
consequential damages, so this exclusion and limitation may not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts of
a jurisdiction where the defendant maintains its principal place of business
and such litigation shall be governed by laws of that jurisdiction, without
reference to its conflict-of-law provisions. Nothing in this Section shall
prevent a partys ability to bring cross-claims or counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject matter
hereof. If any provision of this License is held to be unenforceable, such
provision shall be reformed only to the extent necessary to make it
enforceable. Any law or regulation which provides that the language of a
contract shall be construed against the drafter shall not be used to construe
this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version of
the License under which You originally received the Covered Software, or
under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a modified
version of this License if you rename the license and remove any
references to the name of the license steward (except to note that such
modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file, then
You may include the notice in a location (such as a LICENSE file in a relevant
directory) where a recipient would be likely to look for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - “Incompatible With Secondary Licenses” Notice
This Source Code Form is “Incompatible
With Secondary Licenses”, as defined by
the Mozilla Public License, v. 2.0.

6
vendor/github.com/hashicorp/go-raftchunking/Makefile generated vendored Normal file
View File

@ -0,0 +1,6 @@
# Determine this makefile's path.
# Be sure to place this BEFORE `include` directives, if any.
THIS_FILE := $(lastword $(MAKEFILE_LIST))
proto:
protoc --go_out=paths=source_relative:. types/types.proto

160
vendor/github.com/hashicorp/go-raftchunking/api.go generated vendored Normal file
View File

@ -0,0 +1,160 @@
package raftchunking
import (
"bytes"
"crypto/rand"
"encoding/binary"
"fmt"
"io"
"time"
proto "github.com/golang/protobuf/proto"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-raftchunking/types"
"github.com/hashicorp/raft"
)
// errorFuture is used to return a static error.
type errorFuture struct {
err error
}
func (e errorFuture) Error() error {
return e.err
}
func (e errorFuture) Response() interface{} {
return nil
}
func (e errorFuture) Index() uint64 {
return 0
}
// multiFuture is a future specialized for the chunking case. It contains some
// number of other futures in the order in which data was chunked and sent to
// apply.
type multiFuture []raft.ApplyFuture
// Error will return only when all Error functions in the contained futures
// return, in order.
func (m multiFuture) Error() error {
for _, v := range m {
if err := v.Error(); err != nil {
return err
}
}
return nil
}
// Index returns the index of the last chunk. Since required behavior is to not
// call this until Error is called, the last Index will correspond to the Apply
// of the final chunk.
func (m multiFuture) Index() uint64 {
// This shouldn't happen but need an escape hatch
if len(m) == 0 {
return 0
}
return m[len(m)-1].Index()
}
// Response returns the response from underlying Apply of the last chunk.
func (m multiFuture) Response() interface{} {
// This shouldn't happen but need an escape hatch
if len(m) == 0 {
return nil
}
return m[len(m)-1].Response()
}
type ApplyFunc func(raft.Log, time.Duration) raft.ApplyFuture
// ChunkingApply takes in a byte slice and chunks into
// raft.SuggestedMaxDataSize (or less if EOF) chunks, calling Apply on each. It
// requires a corresponding wrapper around the FSM to handle reconstructing on
// the other end. Timeout will be the timeout for each individual operation,
// not total. The return value is a future whose Error() will return only when
// all underlying Apply futures have had Error() return. Note that any error
// indicates that the entire operation will not be applied, assuming the
// correct FSM wrapper is used. If extensions is passed in, it will be set as
// the Extensions value on the Apply once all chunks are received.
func ChunkingApply(cmd, extensions []byte, timeout time.Duration, applyFunc ApplyFunc) raft.ApplyFuture {
// Generate a random op num via 64 random bits. These only have to be
// unique across _in flight_ chunk operations until a Term changes so
// should be fine.
rb := make([]byte, 8)
n, err := rand.Read(rb)
if err != nil {
return errorFuture{err: err}
}
if n != 8 {
return errorFuture{err: fmt.Errorf("expected to read %d bytes for op num, read %d", 8, n)}
}
opNum := binary.BigEndian.Uint64(rb)
var logs []raft.Log
var byteChunks [][]byte
var mf multiFuture
// We break into chunks first so that we know how many chunks there will be
// to put in NumChunks in the extensions info. This could probably be a bit
// more efficient by just reslicing but doing it this way is a bit easier
// for others to follow/track and in this kind of operation this won't be
// the slow part anyways.
reader := bytes.NewReader(cmd)
remain := reader.Len()
for {
if remain <= 0 {
break
}
if remain > raft.SuggestedMaxDataSize {
remain = raft.SuggestedMaxDataSize
}
b := make([]byte, remain)
n, err := reader.Read(b)
if err != nil && err != io.EOF {
return errorFuture{err: err}
}
if n != remain {
return errorFuture{err: fmt.Errorf("expected to read %d bytes from buf, read %d", remain, n)}
}
byteChunks = append(byteChunks, b)
remain = reader.Len()
}
// Create the underlying chunked logs
for i, chunk := range byteChunks {
chunkInfo := &types.ChunkInfo{
OpNum: opNum,
SequenceNum: uint32(i),
NumChunks: uint32(len(byteChunks)),
}
// If extensions were passed in attach them to the last chunk so it
// will go through Apply at the end.
if i == len(byteChunks)-1 {
chunkInfo.NextExtensions = extensions
}
chunkBytes, err := proto.Marshal(chunkInfo)
if err != nil {
return errorFuture{err: errwrap.Wrapf("error marshaling chunk info: {{err}}", err)}
}
logs = append(logs, raft.Log{
Data: chunk,
Extensions: chunkBytes,
})
}
for _, log := range logs {
mf = append(mf, applyFunc(log, timeout))
}
return mf
}

103
vendor/github.com/hashicorp/go-raftchunking/chunking.go generated vendored Normal file
View File

@ -0,0 +1,103 @@
package raftchunking
import "github.com/mitchellh/copystructure"
type ChunkStorage interface {
// StoreChunk stores Data from ChunkInfo according to the other metadata
// (OpNum, SeqNum). The bool returns whether or not all chunks have been
// received, as in, the number of non-nil chunks is the same as NumChunks.
StoreChunk(*ChunkInfo) (bool, error)
// FinalizeOp gets all chunks for an op number and then removes the chunk
// info for that op from the store. It should only be called when
// StoreChunk for a given op number returns true but should be safe to call
// at any time; clearing an op can be accomplished by calling this function
// and ignoring the non-error result.
FinalizeOp(uint64) ([]*ChunkInfo, error)
// GetState gets all currently tracked ops, for snapshotting
GetChunks() (ChunkMap, error)
// RestoreChunks restores the current FSM state from a map
RestoreChunks(ChunkMap) error
}
type State struct {
ChunkMap ChunkMap
}
// ChunkInfo holds chunk information
type ChunkInfo struct {
OpNum uint64
SequenceNum uint32
NumChunks uint32
Term uint64
Data []byte
}
// ChunkMap represents a set of data chunks. We use ChunkInfo with Data instead
// of bare []byte in case there is a need to extend this info later.
type ChunkMap map[uint64][]*ChunkInfo
// InmemChunkStorage satisfies ChunkStorage using an in-memory-only tracking
// method.
type InmemChunkStorage struct {
chunks ChunkMap
}
func NewInmemChunkStorage() *InmemChunkStorage {
return &InmemChunkStorage{
chunks: make(ChunkMap),
}
}
func (i *InmemChunkStorage) StoreChunk(chunk *ChunkInfo) (bool, error) {
chunks, ok := i.chunks[chunk.OpNum]
if !ok {
chunks = make([]*ChunkInfo, chunk.NumChunks)
i.chunks[chunk.OpNum] = chunks
}
chunks[chunk.SequenceNum] = chunk
for _, c := range chunks {
// Check for nil, but also check data length in case it ends up
// unmarshaling weirdly for some reason where it makes a new struct
// instead of keeping the pointer nil
if c == nil || len(c.Data) == 0 {
// Not done yet, so return
return false, nil
}
}
return true, nil
}
func (i *InmemChunkStorage) FinalizeOp(opNum uint64) ([]*ChunkInfo, error) {
ret := i.chunks[opNum]
delete(i.chunks, opNum)
return ret, nil
}
func (i *InmemChunkStorage) GetChunks() (ChunkMap, error) {
ret, err := copystructure.Copy(i.chunks)
if err != nil {
return nil, err
}
return ret.(ChunkMap), nil
}
func (i *InmemChunkStorage) RestoreChunks(chunks ChunkMap) error {
// If passed in explicit emptiness, set state to empty
if chunks == nil || len(chunks) == 0 {
i.chunks = make(ChunkMap)
return nil
}
chunksCopy, err := copystructure.Copy(chunks)
if err != nil {
return err
}
i.chunks = chunksCopy.(ChunkMap)
return nil
}

159
vendor/github.com/hashicorp/go-raftchunking/fsm.go generated vendored Normal file
View File

@ -0,0 +1,159 @@
package raftchunking
import (
"io"
"github.com/golang/protobuf/proto"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-raftchunking/types"
"github.com/hashicorp/raft"
)
var _ raft.FSM = (*ChunkingFSM)(nil)
var _ raft.ConfigurationStore = (*ChunkingConfigurationStore)(nil)
type ChunkingSuccess struct {
Response interface{}
}
// ChunkingFSM is an FSM that implements chunking; it's the sister of
// ChunkingApply.
//
// N.B.: If a term change happens the final apply from the client will have a
// nil result and not be passed through to the underlying FSM. To detect this,
// the final apply to the underlying FSM is wrapped in ChunkingSuccess.
type ChunkingFSM struct {
underlying raft.FSM
store ChunkStorage
lastTerm uint64
}
type ChunkingConfigurationStore struct {
*ChunkingFSM
underlyingConfigurationStore raft.ConfigurationStore
}
func NewChunkingFSM(underlying raft.FSM, store ChunkStorage) *ChunkingFSM {
ret := &ChunkingFSM{
underlying: underlying,
store: store,
}
if store == nil {
ret.store = NewInmemChunkStorage()
}
return ret
}
func NewChunkingConfigurationStore(underlying raft.ConfigurationStore, store ChunkStorage) *ChunkingConfigurationStore {
ret := &ChunkingConfigurationStore{
ChunkingFSM: &ChunkingFSM{
underlying: underlying,
store: store,
},
underlyingConfigurationStore: underlying,
}
if store == nil {
ret.ChunkingFSM.store = NewInmemChunkStorage()
}
return ret
}
// Apply applies the log, handling chunking as needed. The return value will
// either be an error or whatever is returned from the underlying Apply.
func (c *ChunkingFSM) Apply(l *raft.Log) interface{} {
// Not chunking or wrong type, pass through
if l.Type != raft.LogCommand || l.Extensions == nil {
return c.underlying.Apply(l)
}
if l.Term != c.lastTerm {
// Term has changed. A raft library client that was applying chunks
// should get an error that it's no longer the leader and bail, and
// then any client of (Consul, Vault, etc.) should then retry the full
// chunking operation automatically, which will be under a different
// opnum. So it should be safe in this case to clear the map.
if err := c.store.RestoreChunks(nil); err != nil {
return err
}
c.lastTerm = l.Term
}
// Get chunk info from extensions
var ci types.ChunkInfo
if err := proto.Unmarshal(l.Extensions, &ci); err != nil {
return errwrap.Wrapf("error unmarshaling chunk info: {{err}}", err)
}
// Store the current chunk and find out if all chunks have arrived
done, err := c.store.StoreChunk(&ChunkInfo{
OpNum: ci.OpNum,
SequenceNum: ci.SequenceNum,
NumChunks: ci.NumChunks,
Term: l.Term,
Data: l.Data,
})
if err != nil {
return err
}
if !done {
return nil
}
// All chunks are here; get the full set and clear storage of the op
chunks, err := c.store.FinalizeOp(ci.OpNum)
if err != nil {
return err
}
finalData := make([]byte, 0, len(chunks)*raft.SuggestedMaxDataSize)
for _, chunk := range chunks {
finalData = append(finalData, chunk.Data...)
}
// Use the latest log's values with the final data
logToApply := &raft.Log{
Index: l.Index,
Term: l.Term,
Type: l.Type,
Data: finalData,
Extensions: ci.NextExtensions,
}
return ChunkingSuccess{Response: c.Apply(logToApply)}
}
func (c *ChunkingFSM) Snapshot() (raft.FSMSnapshot, error) {
return c.underlying.Snapshot()
}
func (c *ChunkingFSM) Restore(rc io.ReadCloser) error {
return c.underlying.Restore(rc)
}
// Note: this is used in tests via the Raft package test helper functions, even
// if it's not used in client code
func (c *ChunkingFSM) Underlying() raft.FSM {
return c.underlying
}
func (c *ChunkingFSM) CurrentState() (*State, error) {
chunks, err := c.store.GetChunks()
if err != nil {
return nil, err
}
return &State{
ChunkMap: chunks,
}, nil
}
func (c *ChunkingFSM) RestoreState(state *State) error {
// If nil we'll restore to blank, so create a new state with a nil map
if state == nil {
state = new(State)
}
return c.store.RestoreChunks(state.ChunkMap)
}
func (c *ChunkingConfigurationStore) StoreConfiguration(index uint64, configuration raft.Configuration) {
c.underlyingConfigurationStore.StoreConfiguration(index, configuration)
}

12
vendor/github.com/hashicorp/go-raftchunking/go.mod generated vendored Normal file
View File

@ -0,0 +1,12 @@
module github.com/hashicorp/go-raftchunking
go 1.12
require (
github.com/go-test/deep v1.0.2
github.com/golang/protobuf v1.3.1
github.com/hashicorp/errwrap v1.0.0
github.com/hashicorp/raft v1.1.1
github.com/kr/pretty v0.1.0
github.com/mitchellh/copystructure v1.0.0
)

61
vendor/github.com/hashicorp/go-raftchunking/go.sum generated vendored Normal file
View File

@ -0,0 +1,61 @@
github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM=
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw=
github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-hclog v0.9.1 h1:9PZfAcVEvez4yhLH2TBU64/h/z4xlFI80cWXRrxuKuM=
github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=
github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/raft v1.1.1 h1:HJr7UE1x/JrJSc9Oy6aDBHtNHUUBHjcQjTgvUVihoZs=
github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8=
github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ=
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY=
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

View File

@ -0,0 +1,115 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: types/types.proto
package types
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type ChunkInfo struct {
// OpNum is the ID of the op, used to ensure values are applied to the
// right operation
OpNum uint64 `protobuf:"varint,1,opt,name=op_num,json=opNum,proto3" json:"op_num,omitempty"`
// SequenceNum is the current number of the ops; when applying we should
// see this start at zero and increment by one without skips
SequenceNum uint32 `protobuf:"varint,2,opt,name=sequence_num,json=sequenceNum,proto3" json:"sequence_num,omitempty"`
// NumChunks is used to check whether all chunks have been received and
// reconstruction should be attempted
NumChunks uint32 `protobuf:"varint,3,opt,name=num_chunks,json=numChunks,proto3" json:"num_chunks,omitempty"`
// NextExtensions holds inner extensions information for the next layer
// down of Apply
NextExtensions []byte `protobuf:"bytes,4,opt,name=next_extensions,json=nextExtensions,proto3" json:"next_extensions,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ChunkInfo) Reset() { *m = ChunkInfo{} }
func (m *ChunkInfo) String() string { return proto.CompactTextString(m) }
func (*ChunkInfo) ProtoMessage() {}
func (*ChunkInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_2c0f90c600ad7e2e, []int{0}
}
func (m *ChunkInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ChunkInfo.Unmarshal(m, b)
}
func (m *ChunkInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ChunkInfo.Marshal(b, m, deterministic)
}
func (m *ChunkInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_ChunkInfo.Merge(m, src)
}
func (m *ChunkInfo) XXX_Size() int {
return xxx_messageInfo_ChunkInfo.Size(m)
}
func (m *ChunkInfo) XXX_DiscardUnknown() {
xxx_messageInfo_ChunkInfo.DiscardUnknown(m)
}
var xxx_messageInfo_ChunkInfo proto.InternalMessageInfo
func (m *ChunkInfo) GetOpNum() uint64 {
if m != nil {
return m.OpNum
}
return 0
}
func (m *ChunkInfo) GetSequenceNum() uint32 {
if m != nil {
return m.SequenceNum
}
return 0
}
func (m *ChunkInfo) GetNumChunks() uint32 {
if m != nil {
return m.NumChunks
}
return 0
}
func (m *ChunkInfo) GetNextExtensions() []byte {
if m != nil {
return m.NextExtensions
}
return nil
}
func init() {
proto.RegisterType((*ChunkInfo)(nil), "github_com_hashicorp_go_raftchunking_types.ChunkInfo")
}
func init() { proto.RegisterFile("types/types.proto", fileDescriptor_2c0f90c600ad7e2e) }
var fileDescriptor_2c0f90c600ad7e2e = []byte{
// 203 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x3c, 0x8f, 0x31, 0x4b, 0xc6, 0x30,
0x10, 0x86, 0x89, 0xb6, 0x95, 0xc6, 0xaa, 0x18, 0x10, 0xba, 0x08, 0xd5, 0xc5, 0xe2, 0xa0, 0x83,
0xff, 0x40, 0x71, 0x70, 0x71, 0xe8, 0xe8, 0x72, 0xb4, 0x21, 0x6d, 0x82, 0xe4, 0x2e, 0x36, 0x09,
0xd4, 0x1f, 0xe1, 0x7f, 0x96, 0x1e, 0x7c, 0xdf, 0x72, 0xc3, 0x73, 0xef, 0x3d, 0xdc, 0x2b, 0xaf,
0xd3, 0x6f, 0x30, 0xf1, 0x99, 0xe7, 0x53, 0x58, 0x29, 0x91, 0x7a, 0x5c, 0x5c, 0xb2, 0x79, 0x02,
0x4d, 0x1e, 0xec, 0x18, 0xad, 0xd3, 0xb4, 0x06, 0x58, 0x08, 0xd6, 0x71, 0x4e, 0xda, 0x66, 0xfc,
0x76, 0xb8, 0x00, 0x5f, 0xdc, 0xff, 0x09, 0x59, 0xbf, 0xed, 0xe8, 0x03, 0x67, 0x52, 0x37, 0xb2,
0xa2, 0x00, 0x98, 0x7d, 0x2b, 0x3a, 0xd1, 0x17, 0x43, 0x49, 0xe1, 0x33, 0x7b, 0x75, 0x27, 0x9b,
0x68, 0x7e, 0xb2, 0x41, 0x6d, 0x78, 0x79, 0xd2, 0x89, 0xfe, 0x62, 0x38, 0x3f, 0xb0, 0x3d, 0x72,
0x2b, 0x25, 0x66, 0x0f, 0x6c, 0x8f, 0xed, 0x29, 0x07, 0x6a, 0xcc, 0x9e, 0xdd, 0x51, 0x3d, 0xc8,
0x2b, 0x34, 0x5b, 0x02, 0xb3, 0x25, 0x83, 0xd1, 0x11, 0xc6, 0xb6, 0xe8, 0x44, 0xdf, 0x0c, 0x97,
0x3b, 0x7e, 0x3f, 0xd2, 0xd7, 0xb3, 0xaf, 0x92, 0x1f, 0x9b, 0x2a, 0xee, 0xf2, 0xf2, 0x1f, 0x00,
0x00, 0xff, 0xff, 0x4d, 0x98, 0xf7, 0x77, 0xe0, 0x00, 0x00, 0x00,
}

View File

@ -0,0 +1,23 @@
syntax = "proto3";
option go_package = "types";
package github_com_hashicorp_go_raftchunking_types;
message ChunkInfo {
// OpNum is the ID of the op, used to ensure values are applied to the
// right operation
uint64 op_num = 1;
// SequenceNum is the current number of the ops; when applying we should
// see this start at zero and increment by one without skips
uint32 sequence_num = 2;
// NumChunks is used to check whether all chunks have been received and
// reconstruction should be attempted
uint32 num_chunks = 3;
// NextExtensions holds inner extensions information for the next layer
// down of Apply
bytes next_extensions = 4;
}

10
vendor/github.com/hashicorp/raft-boltdb/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,10 @@
language: go
go:
- 1.6
- 1.7
- tip
install: make deps
script:
- make test

11
vendor/github.com/hashicorp/raft-boltdb/Makefile generated vendored Normal file
View File

@ -0,0 +1,11 @@
DEPS = $(go list -f '{{range .TestImports}}{{.}} {{end}}' ./...)
.PHONY: test deps
test:
go test -timeout=30s ./...
deps:
go get -d -v ./...
echo $(DEPS) | xargs -n1 go get -d

View File

@ -33,26 +33,56 @@ type BoltStore struct {
path string
}
// Options contains all the configuraiton used to open the BoltDB
type Options struct {
// Path is the file path to the BoltDB to use
Path string
// BoltOptions contains any specific BoltDB options you might
// want to specify [e.g. open timeout]
BoltOptions *bolt.Options
// NoSync causes the database to skip fsync calls after each
// write to the log. This is unsafe, so it should be used
// with caution.
NoSync bool
}
// readOnly returns true if the contained bolt options say to open
// the DB in readOnly mode [this can be useful to tools that want
// to examine the log]
func (o *Options) readOnly() bool {
return o != nil && o.BoltOptions != nil && o.BoltOptions.ReadOnly
}
// NewBoltStore takes a file path and returns a connected Raft backend.
func NewBoltStore(path string) (*BoltStore, error) {
return New(Options{Path: path})
}
// New uses the supplied options to open the BoltDB and prepare it for use as a raft backend.
func New(options Options) (*BoltStore, error) {
// Try to connect
handle, err := bolt.Open(path, dbFileMode, nil)
handle, err := bolt.Open(options.Path, dbFileMode, options.BoltOptions)
if err != nil {
return nil, err
}
handle.NoSync = options.NoSync
// Create the new store
store := &BoltStore{
conn: handle,
path: path,
path: options.Path,
}
// Set up our buckets
if err := store.initialize(); err != nil {
store.Close()
return nil, err
// If the store was opened read-only, don't try and create buckets
if !options.readOnly() {
// Set up our buckets
if err := store.initialize(); err != nil {
store.Close()
return nil, err
}
}
return store, nil
}
@ -213,7 +243,7 @@ func (b *BoltStore) Get(k []byte) ([]byte, error) {
if val == nil {
return nil, ErrKeyNotFound
}
return append([]byte{}, val...), nil
return append([]byte(nil), val...), nil
}
// SetUint64 is like Set, but handles uint64 values
@ -229,3 +259,10 @@ func (b *BoltStore) GetUint64(key []byte) (uint64, error) {
}
return bytesToUint64(val), nil
}
// Sync performs an fsync on the database file handle. This is not necessary
// under normal operation unless NoSync is enabled, in which this forces the
// database file to sync against the disk.
func (b *BoltStore) Sync() error {
return b.conn.Sync()
}

View File

@ -1,6 +1,23 @@
# UNRELEASED
# 1.1.0 (Mai 23rd, 2019)
# 1.1.1 (July 23rd, 2019)
FEATURES
* Add support for extensions to be sent on log entries [[GH-353](https://github.com/hashicorp/raft/pull/353)]
* Add config option to skip snapshot restore on startup [[GH-340](https://github.com/hashicorp/raft/pull/340)]
* Add optional configuration store interface [[GH-339](https://github.com/hashicorp/raft/pull/339)]
IMPROVEMENTS
* Break out of group commit early when no logs are present [[GH-341](https://github.com/hashicorp/raft/pull/341)]
BUGFIXES
* Fix 64-bit counters on 32-bit platforms [[GH-344](https://github.com/hashicorp/raft/pull/344)]
* Don't defer closing source in recover/restore operations since it's in a loop [[GH-337](https://github.com/hashicorp/raft/pull/337)]
# 1.1.0 (May 23rd, 2019)
FEATURES

View File

@ -1,4 +1,5 @@
DEPS = $(go list -f '{{range .TestImports}}{{.}} {{end}}' ./...)
TEST_RESULTS_DIR?=/tmp/test-results
test:
go test -timeout=60s -race .
@ -6,9 +7,18 @@ test:
integ: test
INTEG_TESTS=yes go test -timeout=25s -run=Integ .
ci.test-norace:
gotestsum --format=short-verbose --junitfile $(TEST_RESULTS_DIR)/gotestsum-report-test.xml -- -timeout=60s
ci.test:
gotestsum --format=short-verbose --junitfile $(TEST_RESULTS_DIR)/gotestsum-report-test.xml -- -timeout=60s -race .
ci.integ: ci.test
INTEG_TESTS=yes gotestsum --format=short-verbose --junitfile $(TEST_RESULTS_DIR)/gotestsum-report-integ.xml -- -timeout=25s -run=Integ .
fuzz:
go test -timeout=300s ./fuzzy
deps:
go get -t -d -v ./...
echo $(DEPS) | xargs -n1 go get -d

View File

@ -1,12 +1,12 @@
raft [![Build Status](https://travis-ci.org/hashicorp/raft.png)](https://travis-ci.org/hashicorp/raft)
raft [![Build Status](https://travis-ci.org/hashicorp/raft.png)](https://travis-ci.org/hashicorp/raft) [![CircleCI](https://circleci.com/gh/hashicorp/raft.svg?style=svg)](https://circleci.com/gh/hashicorp/raft)
====
raft is a [Go](http://www.golang.org) library that manages a replicated
log and can be used with an FSM to manage replicated state machines. It
is a library for providing [consensus](http://en.wikipedia.org/wiki/Consensus_(computer_science)).
The use cases for such a library are far-reaching as replicated state
machines are a key component of many distributed systems. They enable
The use cases for such a library are far-reaching, such as replicated state
machines which are a key component of many distributed systems. They enable
building Consistent, Partition Tolerant (CP) systems, with limited
fault tolerance as well.
@ -52,10 +52,10 @@ to port Consul to these new interfaces.
## Protocol
raft is based on ["Raft: In Search of an Understandable Consensus Algorithm"](https://ramcloud.stanford.edu/wiki/download/attachments/11370504/raft.pdf)
raft is based on ["Raft: In Search of an Understandable Consensus Algorithm"](https://raft.github.io/raft.pdf)
A high level overview of the Raft protocol is described below, but for details please read the full
[Raft paper](https://ramcloud.stanford.edu/wiki/download/attachments/11370504/raft.pdf)
[Raft paper](https://raft.github.io/raft.pdf)
followed by the raft source. Any questions about the raft protocol should be sent to the
[raft-dev mailing list](https://groups.google.com/forum/#!forum/raft-dev).

View File

@ -14,6 +14,19 @@ import (
"github.com/armon/go-metrics"
)
const (
// This is the current suggested max size of the data in a raft log entry.
// This is based on current architecture, default timing, etc. Clients can
// ignore this value if they want as there is no actual hard checking
// within the library. As the library is enhanced this value may change
// over time to reflect current suggested maximums.
//
// Increasing beyond this risks RPC IO taking too long and preventing
// timely heartbeat signals which are sent in serial in current transports,
// potentially causing leadership instability.
SuggestedMaxDataSize = 512 * 1024
)
var (
// ErrLeader is returned when an operation can't be completed on a
// leader node.
@ -175,9 +188,12 @@ type Raft struct {
// BootstrapCluster initializes a server's storage with the given cluster
// configuration. This should only be called at the beginning of time for the
// cluster, and you absolutely must make sure that you call it with the same
// configuration on all the Voter servers. There is no need to bootstrap
// Nonvoter and Staging servers.
// cluster with an identical configuration listing all Voter servers. There is
// no need to bootstrap Nonvoter and Staging servers.
//
// A cluster can only be bootstrapped once from a single participating Voter
// server. Any further attempts to bootstrap will return an error that can be
// safely ignored.
//
// One sane approach is to bootstrap a single server with a configuration
// listing just itself as a Voter, then invoke AddVoter() on it to add other
@ -288,17 +304,21 @@ func RecoverCluster(conf *Config, fsm FSM, logs LogStore, stable StableStore,
return fmt.Errorf("failed to list snapshots: %v", err)
}
for _, snapshot := range snapshots {
_, source, err := snaps.Open(snapshot.ID)
if err != nil {
// Skip this one and try the next. We will detect if we
// couldn't open any snapshots.
continue
}
defer source.Close()
if !conf.NoSnapshotRestoreOnStart {
_, source, err := snaps.Open(snapshot.ID)
if err != nil {
// Skip this one and try the next. We will detect if we
// couldn't open any snapshots.
continue
}
if err := fsm.Restore(source); err != nil {
// Same here, skip and try the next one.
continue
err = fsm.Restore(source)
// Close the source after the restore has completed
source.Close()
if err != nil {
// Same here, skip and try the next one.
continue
}
}
snapshotIndex = snapshot.Index
@ -540,21 +560,23 @@ func (r *Raft) restoreSnapshot() error {
// Try to load in order of newest to oldest
for _, snapshot := range snapshots {
_, source, err := r.snapshots.Open(snapshot.ID)
if err != nil {
r.logger.Error(fmt.Sprintf("Failed to open snapshot %v: %v", snapshot.ID, err))
continue
if !r.conf.NoSnapshotRestoreOnStart {
_, source, err := r.snapshots.Open(snapshot.ID)
if err != nil {
r.logger.Error(fmt.Sprintf("Failed to open snapshot %v: %v", snapshot.ID, err))
continue
}
err = r.fsm.Restore(source)
// Close the source after the restore has completed
source.Close()
if err != nil {
r.logger.Error(fmt.Sprintf("Failed to restore snapshot %v: %v", snapshot.ID, err))
continue
}
r.logger.Info(fmt.Sprintf("Restored from snapshot %v", snapshot.ID))
}
defer source.Close()
if err := r.fsm.Restore(source); err != nil {
r.logger.Error(fmt.Sprintf("Failed to restore snapshot %v: %v", snapshot.ID, err))
continue
}
// Log success
r.logger.Info(fmt.Sprintf("Restored from snapshot %v", snapshot.ID))
// Update the lastApplied so we don't replay old logs
r.setLastApplied(snapshot.Index)
@ -588,10 +610,17 @@ func (r *Raft) restoreSnapshot() error {
// BootstrapCluster is equivalent to non-member BootstrapCluster but can be
// called on an un-bootstrapped Raft instance after it has been created. This
// should only be called at the beginning of time for the cluster, and you
// absolutely must make sure that you call it with the same configuration on all
// the Voter servers. There is no need to bootstrap Nonvoter and Staging
// servers.
// should only be called at the beginning of time for the cluster with an
// identical configuration listing all Voter servers. There is no need to
// bootstrap Nonvoter and Staging servers.
//
// A cluster can only be bootstrapped once from a single participating Voter
// server. Any further attempts to bootstrap will return an error that can be
// safely ignored.
//
// One sane approach is to bootstrap a single server with a configuration
// listing just itself as a Voter, then invoke AddVoter() on it to add other
// servers to the cluster.
func (r *Raft) BootstrapCluster(configuration Configuration) Future {
bootstrapReq := &bootstrapFuture{}
bootstrapReq.init()
@ -620,7 +649,14 @@ func (r *Raft) Leader() ServerAddress {
// for the command to be started. This must be run on the leader or it
// will fail.
func (r *Raft) Apply(cmd []byte, timeout time.Duration) ApplyFuture {
return r.ApplyLog(Log{Data: cmd}, timeout)
}
// ApplyLog performs Apply but takes in a Log directly. The only values
// currently taken from the submitted Log are Data and Extensions.
func (r *Raft) ApplyLog(log Log, timeout time.Duration) ApplyFuture {
metrics.IncrCounter([]string{"raft", "apply"}, 1)
var timer <-chan time.Time
if timeout > 0 {
timer = time.After(timeout)
@ -629,8 +665,9 @@ func (r *Raft) Apply(cmd []byte, timeout time.Duration) ApplyFuture {
// Create a log future, no index or term yet
logFuture := &logFuture{
log: Log{
Type: LogCommand,
Data: cmd,
Type: LogCommand,
Data: log.Data,
Extensions: log.Extensions,
},
}
logFuture.init()
@ -934,7 +971,7 @@ func (r *Raft) LastContact() time.Time {
// "last_snapshot_index", "last_snapshot_term",
// "latest_configuration", "last_contact", and "num_peers".
//
// The value of "state" is a numeric constant representing one of
// The value of "state" is a numeric constant representing one of
// the possible leadership states the node is in at any given time.
// the possible states are: "Follower", "Candidate", "Leader", "Shutdown".
//

View File

@ -199,6 +199,13 @@ type Config struct {
// Logger is a user-provided hc-log logger. If nil, a logger writing to
// LogOutput with LogLevel is used.
Logger hclog.Logger
// NoSnapshotRestoreOnStart controls if raft will restore a snapshot to the
// FSM on start. This is useful if your FSM recovers from other mechanisms
// than raft snapshotting. Snapshot metadata will still be used to initalize
// raft's configuration and index values. This is used in NewRaft and
// RestoreCluster.
NoSnapshotRestoreOnStart bool
}
// DefaultConfig returns a Config with usable defaults.

View File

@ -32,6 +32,26 @@ func (s ServerSuffrage) String() string {
return "ServerSuffrage"
}
// ConfigurationStore provides an interface that can optionally be implemented by FSMs
// to store configuration updates made in the replicated log. In general this is only
// necessary for FSMs that mutate durable state directly instead of applying changes
// in memory and snapshotting periodically. By storing configuration changes, the
// persistent FSM state can behave as a complete snapshot, and be able to recover
// without an external snapshot just for persisting the raft configuration.
type ConfigurationStore interface {
// ConfigurationStore is a superset of the FSM functionality
FSM
// StoreConfiguration is invoked once a log entry containing a configuration
// change is committed. It takes the index at which the configuration was
// written and the configuration value.
StoreConfiguration(index uint64, configuration Configuration)
}
type nopConfigurationStore struct{}
func (s nopConfigurationStore) StoreConfiguration(_ uint64, _ Configuration) {}
// ServerID is a unique string identifying a server for all time.
type ServerID string

View File

@ -50,23 +50,39 @@ func (r *Raft) runFSM() {
var lastIndex, lastTerm uint64
commit := func(req *commitTuple) {
// Apply the log if a command
// Apply the log if a command or config change
var resp interface{}
if req.log.Type == LogCommand {
// Make sure we send a response
defer func() {
// Invoke the future if given
if req.future != nil {
req.future.response = resp
req.future.respond(nil)
}
}()
switch req.log.Type {
case LogCommand:
start := time.Now()
resp = r.fsm.Apply(req.log)
metrics.MeasureSince([]string{"raft", "fsm", "apply"}, start)
case LogConfiguration:
configStore, ok := r.fsm.(ConfigurationStore)
if !ok {
// Return early to avoid incrementing the index and term for
// an unimplemented operation.
return
}
start := time.Now()
configStore.StoreConfiguration(req.log.Index, decodeConfiguration(req.log.Data))
metrics.MeasureSince([]string{"raft", "fsm", "store_config"}, start)
}
// Update the indexes
lastIndex = req.log.Index
lastTerm = req.log.Term
// Invoke the future if given
if req.future != nil {
req.future.response = resp
req.future.respond(nil)
}
}
restore := func(req *restoreFuture) {

View File

@ -4,7 +4,10 @@ go 1.12
require (
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878
github.com/boltdb/bolt v1.3.1 // indirect
github.com/hashicorp/go-hclog v0.9.1
github.com/hashicorp/go-msgpack v0.5.5
github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea
github.com/stretchr/testify v1.3.0
golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5 // indirect
)

View File

@ -2,6 +2,8 @@ github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM=
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -16,10 +18,14 @@ github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjh
github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=
github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea h1:xykPFhrBAS2J0VBzVa5e80b5ZtYuNQtgXjN40qBZlD4=
github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@ -35,3 +41,5 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5 h1:sM3evRHxE/1RuMe1FYAL3j7C7fUfIjkbE+NiDAYUF8U=
golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

View File

@ -47,6 +47,21 @@ type Log struct {
// Data holds the log entry's type-specific data.
Data []byte
// Extensions holds an opaque byte slice of information for middleware. It
// is up to the client of the library to properly modify this as it adds
// layers and remove those layers when appropriate. This value is a part of
// the log, so very large values could cause timing issues.
//
// N.B. It is _up to the client_ to handle upgrade paths. For instance if
// using this with go-raftchunking, the client should ensure that all Raft
// peers are using a version that can handle that extension before ever
// actually triggering chunking behavior. It is sometimes sufficient to
// ensure that non-leaders are upgraded first, then the current leader is
// upgraded, but a leader changeover during this process could lead to
// trouble, so gating extension behavior via some flag in the client
// program is also a good idea.
Extensions []byte
}
// LogStore is used to provide an interface for storing

View File

@ -549,6 +549,7 @@ func (r *Raft) leaderLoop() {
future.respond(ErrLeadershipTransferInProgress)
continue
}
r.logger.Debug("starting leadership transfer", "id", future.ID, "address", future.Address)
// When we are leaving leaderLoop, we are no longer
@ -725,12 +726,13 @@ func (r *Raft) leaderLoop() {
}
// Group commit, gather all the ready commits
ready := []*logFuture{newLog}
GROUP_COMMIT_LOOP:
for i := 0; i < r.conf.MaxAppendEntries; i++ {
select {
case newLog := <-r.applyCh:
ready = append(ready, newLog)
default:
break
break GROUP_COMMIT_LOOP
}
}
@ -844,27 +846,34 @@ func (r *Raft) leadershipTransfer(id ServerID, address ServerAddress, repl *foll
// contact. This must only be called from the main thread.
func (r *Raft) checkLeaderLease() time.Duration {
// Track contacted nodes, we can always contact ourself
contacted := 1
contacted := 0
// Check each follower
var maxDiff time.Duration
now := time.Now()
for peer, f := range r.leaderState.replState {
diff := now.Sub(f.LastContact())
if diff <= r.conf.LeaderLeaseTimeout {
contacted++
if diff > maxDiff {
maxDiff = diff
for _, server := range r.configurations.latest.Servers {
if server.Suffrage == Voter {
if server.ID == r.localID {
contacted++
continue
}
} else {
// Log at least once at high value, then debug. Otherwise it gets very verbose.
if diff <= 3*r.conf.LeaderLeaseTimeout {
r.logger.Warn(fmt.Sprintf("Failed to contact %v in %v", peer, diff))
f := r.leaderState.replState[server.ID]
diff := now.Sub(f.LastContact())
if diff <= r.conf.LeaderLeaseTimeout {
contacted++
if diff > maxDiff {
maxDiff = diff
}
} else {
r.logger.Debug(fmt.Sprintf("Failed to contact %v in %v", peer, diff))
// Log at least once at high value, then debug. Otherwise it gets very verbose.
if diff <= 3*r.conf.LeaderLeaseTimeout {
r.logger.Warn(fmt.Sprintf("Failed to contact %v in %v", server.ID, diff))
} else {
r.logger.Debug(fmt.Sprintf("Failed to contact %v in %v", server.ID, diff))
}
}
metrics.AddSample([]string{"raft", "leader", "lastContact"}, float32(diff/time.Millisecond))
}
metrics.AddSample([]string{"raft", "leader", "lastContact"}, float32(diff/time.Millisecond))
}
// Verify we can contact a quorum
@ -1124,6 +1133,21 @@ func (r *Raft) processLog(l *Log, future *logFuture) {
return
case LogConfiguration:
// Only support this with the v2 configuration format
if r.protocolVersion > 2 {
// Forward to the fsm handler
select {
case r.fsmMutateCh <- &commitTuple{l, future}:
case <-r.shutdownCh:
if future != nil {
future.respond(ErrRaftShutdown)
}
}
// Return so that the future is only responded to
// by the FSM handler when the application is done
return
}
case LogAddPeerDeprecated:
case LogRemovePeerDeprecated:
case LogNoop:

View File

@ -28,6 +28,18 @@ var (
// followerReplication is in charge of sending snapshots and log entries from
// this leader during this particular term to a remote follower.
type followerReplication struct {
// currentTerm and nextIndex must be kept at the top of the struct so
// they're 64 bit aligned which is a requirement for atomic ops on 32 bit
// platforms.
// currentTerm is the term of this leader, to be included in AppendEntries
// requests.
currentTerm uint64
// nextIndex is the index of the next log entry to send to the follower,
// which may fall past the end of the log.
nextIndex uint64
// peer contains the network address and ID of the remote follower.
peer Server
@ -49,14 +61,6 @@ type followerReplication struct {
// deferErr, the sender can be notifed when the replication is done.
triggerDeferErrorCh chan *deferError
// currentTerm is the term of this leader, to be included in AppendEntries
// requests.
currentTerm uint64
// nextIndex is the index of the next log entry to send to the follower,
// which may fall past the end of the log.
nextIndex uint64
// lastContact is updated to the current time whenever any response is
// received from the follower (successful or not). This is used to check
// whether the leader should step down (Raft.checkLeaderLease()).

799
vendor/github.com/hashicorp/raft/testing.go generated vendored Normal file
View File

@ -0,0 +1,799 @@
package raft
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"reflect"
"sync"
"testing"
"time"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-msgpack/codec"
)
// Return configurations optimized for in-memory
func inmemConfig(t *testing.T) *Config {
conf := DefaultConfig()
conf.HeartbeatTimeout = 50 * time.Millisecond
conf.ElectionTimeout = 50 * time.Millisecond
conf.LeaderLeaseTimeout = 50 * time.Millisecond
conf.CommitTimeout = 5 * time.Millisecond
conf.Logger = newTestLeveledLogger(t)
return conf
}
// MockFSM is an implementation of the FSM interface, and just stores
// the logs sequentially.
//
// NOTE: This is exposed for middleware testing purposes and is not a stable API
type MockFSM struct {
sync.Mutex
logs [][]byte
configurations []Configuration
}
// NOTE: This is exposed for middleware testing purposes and is not a stable API
type MockFSMConfigStore struct {
FSM
}
// NOTE: This is exposed for middleware testing purposes and is not a stable API
type WrappingFSM interface {
Underlying() FSM
}
func getMockFSM(fsm FSM) *MockFSM {
switch f := fsm.(type) {
case *MockFSM:
return f
case *MockFSMConfigStore:
return f.FSM.(*MockFSM)
case WrappingFSM:
return getMockFSM(f.Underlying())
}
return nil
}
// NOTE: This is exposed for middleware testing purposes and is not a stable API
type MockSnapshot struct {
logs [][]byte
maxIndex int
}
var _ ConfigurationStore = (*MockFSMConfigStore)(nil)
// NOTE: This is exposed for middleware testing purposes and is not a stable API
func (m *MockFSM) Apply(log *Log) interface{} {
m.Lock()
defer m.Unlock()
m.logs = append(m.logs, log.Data)
return len(m.logs)
}
// NOTE: This is exposed for middleware testing purposes and is not a stable API
func (m *MockFSM) Snapshot() (FSMSnapshot, error) {
m.Lock()
defer m.Unlock()
return &MockSnapshot{m.logs, len(m.logs)}, nil
}
// NOTE: This is exposed for middleware testing purposes and is not a stable API
func (m *MockFSM) Restore(inp io.ReadCloser) error {
m.Lock()
defer m.Unlock()
defer inp.Close()
hd := codec.MsgpackHandle{}
dec := codec.NewDecoder(inp, &hd)
m.logs = nil
return dec.Decode(&m.logs)
}
// NOTE: This is exposed for middleware testing purposes and is not a stable API
func (m *MockFSM) Logs() [][]byte {
m.Lock()
defer m.Unlock()
return m.logs
}
// NOTE: This is exposed for middleware testing purposes and is not a stable API
func (m *MockFSMConfigStore) StoreConfiguration(index uint64, config Configuration) {
mm := m.FSM.(*MockFSM)
mm.Lock()
defer mm.Unlock()
mm.configurations = append(mm.configurations, config)
}
// NOTE: This is exposed for middleware testing purposes and is not a stable API
func (m *MockSnapshot) Persist(sink SnapshotSink) error {
hd := codec.MsgpackHandle{}
enc := codec.NewEncoder(sink, &hd)
if err := enc.Encode(m.logs[:m.maxIndex]); err != nil {
sink.Cancel()
return err
}
sink.Close()
return nil
}
// NOTE: This is exposed for middleware testing purposes and is not a stable API
func (m *MockSnapshot) Release() {
}
// This can be used as the destination for a logger and it'll
// map them into calls to testing.T.Log, so that you only see
// the logging for failed tests.
type testLoggerAdapter struct {
t *testing.T
prefix string
}
func (a *testLoggerAdapter) Write(d []byte) (int, error) {
if d[len(d)-1] == '\n' {
d = d[:len(d)-1]
}
if a.prefix != "" {
l := a.prefix + ": " + string(d)
if testing.Verbose() {
fmt.Printf("testLoggerAdapter verbose: %s\n", l)
}
a.t.Log(l)
return len(l), nil
}
a.t.Log(string(d))
return len(d), nil
}
func newTestLogger(t *testing.T) *log.Logger {
return log.New(&testLoggerAdapter{t: t}, "", log.Lmicroseconds)
}
func newTestLoggerWithPrefix(t *testing.T, prefix string) *log.Logger {
return log.New(&testLoggerAdapter{t: t, prefix: prefix}, "", log.Lmicroseconds)
}
func newTestLeveledLogger(t *testing.T) hclog.Logger {
return hclog.New(&hclog.LoggerOptions{
Name: "",
Output: &testLoggerAdapter{t: t},
})
}
func newTestLeveledLoggerWithPrefix(t *testing.T, prefix string) hclog.Logger {
return hclog.New(&hclog.LoggerOptions{
Name: prefix,
Output: &testLoggerAdapter{t: t, prefix: prefix},
})
}
type cluster struct {
dirs []string
stores []*InmemStore
fsms []FSM
snaps []*FileSnapshotStore
trans []LoopbackTransport
rafts []*Raft
t *testing.T
observationCh chan Observation
conf *Config
propagateTimeout time.Duration
longstopTimeout time.Duration
logger *log.Logger
startTime time.Time
failedLock sync.Mutex
failedCh chan struct{}
failed bool
}
func (c *cluster) Merge(other *cluster) {
c.dirs = append(c.dirs, other.dirs...)
c.stores = append(c.stores, other.stores...)
c.fsms = append(c.fsms, other.fsms...)
c.snaps = append(c.snaps, other.snaps...)
c.trans = append(c.trans, other.trans...)
c.rafts = append(c.rafts, other.rafts...)
}
// notifyFailed will close the failed channel which can signal the goroutine
// running the test that another goroutine has detected a failure in order to
// terminate the test.
func (c *cluster) notifyFailed() {
c.failedLock.Lock()
defer c.failedLock.Unlock()
if !c.failed {
c.failed = true
close(c.failedCh)
}
}
// Failf provides a logging function that fails the tests, prints the output
// with microseconds, and does not mysteriously eat the string. This can be
// safely called from goroutines but won't immediately halt the test. The
// failedCh will be closed to allow blocking functions in the main thread to
// detect the failure and react. Note that you should arrange for the main
// thread to block until all goroutines have completed in order to reliably
// fail tests using this function.
func (c *cluster) Failf(format string, args ...interface{}) {
c.logger.Printf(format, args...)
c.t.Fail()
c.notifyFailed()
}
// FailNowf provides a logging function that fails the tests, prints the output
// with microseconds, and does not mysteriously eat the string. FailNowf must be
// called from the goroutine running the test or benchmark function, not from
// other goroutines created during the test. Calling FailNowf does not stop
// those other goroutines.
func (c *cluster) FailNowf(format string, args ...interface{}) {
c.logger.Printf(format, args...)
c.t.FailNow()
}
// Close shuts down the cluster and cleans up.
func (c *cluster) Close() {
var futures []Future
for _, r := range c.rafts {
futures = append(futures, r.Shutdown())
}
// Wait for shutdown
limit := time.AfterFunc(c.longstopTimeout, func() {
// We can't FailNowf here, and c.Failf won't do anything if we
// hang, so panic.
panic("timed out waiting for shutdown")
})
defer limit.Stop()
for _, f := range futures {
if err := f.Error(); err != nil {
c.FailNowf("[ERR] shutdown future err: %v", err)
}
}
for _, d := range c.dirs {
os.RemoveAll(d)
}
}
// WaitEventChan returns a channel which will signal if an observation is made
// or a timeout occurs. It is possible to set a filter to look for specific
// observations. Setting timeout to 0 means that it will wait forever until a
// non-filtered observation is made.
func (c *cluster) WaitEventChan(filter FilterFn, timeout time.Duration) <-chan struct{} {
ch := make(chan struct{})
go func() {
defer close(ch)
var timeoutCh <-chan time.Time
if timeout > 0 {
timeoutCh = time.After(timeout)
}
for {
select {
case <-timeoutCh:
return
case o, ok := <-c.observationCh:
if !ok || filter == nil || filter(&o) {
return
}
}
}
}()
return ch
}
// WaitEvent waits until an observation is made, a timeout occurs, or a test
// failure is signaled. It is possible to set a filter to look for specific
// observations. Setting timeout to 0 means that it will wait forever until a
// non-filtered observation is made or a test failure is signaled.
func (c *cluster) WaitEvent(filter FilterFn, timeout time.Duration) {
select {
case <-c.failedCh:
c.t.FailNow()
case <-c.WaitEventChan(filter, timeout):
}
}
// WaitForReplication blocks until every FSM in the cluster has the given
// length, or the long sanity check timeout expires.
func (c *cluster) WaitForReplication(fsmLength int) {
limitCh := time.After(c.longstopTimeout)
CHECK:
for {
ch := c.WaitEventChan(nil, c.conf.CommitTimeout)
select {
case <-c.failedCh:
c.t.FailNow()
case <-limitCh:
c.FailNowf("[ERR] Timeout waiting for replication")
case <-ch:
for _, fsmRaw := range c.fsms {
fsm := getMockFSM(fsmRaw)
fsm.Lock()
num := len(fsm.logs)
fsm.Unlock()
if num != fsmLength {
continue CHECK
}
}
return
}
}
}
// pollState takes a snapshot of the state of the cluster. This might not be
// stable, so use GetInState() to apply some additional checks when waiting
// for the cluster to achieve a particular state.
func (c *cluster) pollState(s RaftState) ([]*Raft, uint64) {
var highestTerm uint64
in := make([]*Raft, 0, 1)
for _, r := range c.rafts {
if r.State() == s {
in = append(in, r)
}
term := r.getCurrentTerm()
if term > highestTerm {
highestTerm = term
}
}
return in, highestTerm
}
// GetInState polls the state of the cluster and attempts to identify when it has
// settled into the given state.
func (c *cluster) GetInState(s RaftState) []*Raft {
c.logger.Printf("[INFO] Starting stability test for raft state: %+v", s)
limitCh := time.After(c.longstopTimeout)
// An election should complete after 2 * max(HeartbeatTimeout, ElectionTimeout)
// because of the randomised timer expiring in 1 x interval ... 2 x interval.
// We add a bit for propagation delay. If the election fails (e.g. because
// two elections start at once), we will have got something through our
// observer channel indicating a different state (i.e. one of the nodes
// will have moved to candidate state) which will reset the timer.
//
// Because of an implementation peculiarity, it can actually be 3 x timeout.
timeout := c.conf.HeartbeatTimeout
if timeout < c.conf.ElectionTimeout {
timeout = c.conf.ElectionTimeout
}
timeout = 2*timeout + c.conf.CommitTimeout
timer := time.NewTimer(timeout)
defer timer.Stop()
// Wait until we have a stable instate slice. Each time we see an
// observation a state has changed, recheck it and if it has changed,
// restart the timer.
var pollStartTime = time.Now()
for {
inState, highestTerm := c.pollState(s)
inStateTime := time.Now()
// Sometimes this routine is called very early on before the
// rafts have started up. We then timeout even though no one has
// even started an election. So if the highest term in use is
// zero, we know there are no raft processes that have yet issued
// a RequestVote, and we set a long time out. This is fixed when
// we hear the first RequestVote, at which point we reset the
// timer.
if highestTerm == 0 {
timer.Reset(c.longstopTimeout)
} else {
timer.Reset(timeout)
}
// Filter will wake up whenever we observe a RequestVote.
filter := func(ob *Observation) bool {
switch ob.Data.(type) {
case RaftState:
return true
case RequestVoteRequest:
return true
default:
return false
}
}
select {
case <-c.failedCh:
c.t.FailNow()
case <-limitCh:
c.FailNowf("[ERR] Timeout waiting for stable %s state", s)
case <-c.WaitEventChan(filter, 0):
c.logger.Printf("[DEBUG] Resetting stability timeout")
case t, ok := <-timer.C:
if !ok {
c.FailNowf("[ERR] Timer channel errored")
}
c.logger.Printf("[INFO] Stable state for %s reached at %s (%d nodes), %s from start of poll, %s from cluster start. Timeout at %s, %s after stability",
s, inStateTime, len(inState), inStateTime.Sub(pollStartTime), inStateTime.Sub(c.startTime), t, t.Sub(inStateTime))
return inState
}
}
}
// Leader waits for the cluster to elect a leader and stay in a stable state.
func (c *cluster) Leader() *Raft {
leaders := c.GetInState(Leader)
if len(leaders) != 1 {
c.FailNowf("[ERR] expected one leader: %v", leaders)
}
return leaders[0]
}
// Followers waits for the cluster to have N-1 followers and stay in a stable
// state.
func (c *cluster) Followers() []*Raft {
expFollowers := len(c.rafts) - 1
followers := c.GetInState(Follower)
if len(followers) != expFollowers {
c.FailNowf("[ERR] timeout waiting for %d followers (followers are %v)", expFollowers, followers)
}
return followers
}
// FullyConnect connects all the transports together.
func (c *cluster) FullyConnect() {
c.logger.Printf("[DEBUG] Fully Connecting")
for i, t1 := range c.trans {
for j, t2 := range c.trans {
if i != j {
t1.Connect(t2.LocalAddr(), t2)
t2.Connect(t1.LocalAddr(), t1)
}
}
}
}
// Disconnect disconnects all transports from the given address.
func (c *cluster) Disconnect(a ServerAddress) {
c.logger.Printf("[DEBUG] Disconnecting %v", a)
for _, t := range c.trans {
if t.LocalAddr() == a {
t.DisconnectAll()
} else {
t.Disconnect(a)
}
}
}
// Partition keeps the given list of addresses connected but isolates them
// from the other members of the cluster.
func (c *cluster) Partition(far []ServerAddress) {
c.logger.Printf("[DEBUG] Partitioning %v", far)
// Gather the set of nodes on the "near" side of the partition (we
// will call the supplied list of nodes the "far" side).
near := make(map[ServerAddress]struct{})
OUTER:
for _, t := range c.trans {
l := t.LocalAddr()
for _, a := range far {
if l == a {
continue OUTER
}
}
near[l] = struct{}{}
}
// Now fixup all the connections. The near side will be separated from
// the far side, and vice-versa.
for _, t := range c.trans {
l := t.LocalAddr()
if _, ok := near[l]; ok {
for _, a := range far {
t.Disconnect(a)
}
} else {
for a, _ := range near {
t.Disconnect(a)
}
}
}
}
// IndexOf returns the index of the given raft instance.
func (c *cluster) IndexOf(r *Raft) int {
for i, n := range c.rafts {
if n == r {
return i
}
}
return -1
}
// EnsureLeader checks that ALL the nodes think the leader is the given expected
// leader.
func (c *cluster) EnsureLeader(t *testing.T, expect ServerAddress) {
// We assume c.Leader() has been called already; now check all the rafts
// think the leader is correct
fail := false
for _, r := range c.rafts {
leader := ServerAddress(r.Leader())
if leader != expect {
if leader == "" {
leader = "[none]"
}
if expect == "" {
c.logger.Printf("[ERR] Peer %s sees leader %v expected [none]", r, leader)
} else {
c.logger.Printf("[ERR] Peer %s sees leader %v expected %v", r, leader, expect)
}
fail = true
}
}
if fail {
c.FailNowf("[ERR] At least one peer has the wrong notion of leader")
}
}
// EnsureSame makes sure all the FSMs have the same contents.
func (c *cluster) EnsureSame(t *testing.T) {
limit := time.Now().Add(c.longstopTimeout)
first := getMockFSM(c.fsms[0])
CHECK:
first.Lock()
for i, fsmRaw := range c.fsms {
fsm := getMockFSM(fsmRaw)
if i == 0 {
continue
}
fsm.Lock()
if len(first.logs) != len(fsm.logs) {
fsm.Unlock()
if time.Now().After(limit) {
c.FailNowf("[ERR] FSM log length mismatch: %d %d",
len(first.logs), len(fsm.logs))
} else {
goto WAIT
}
}
for idx := 0; idx < len(first.logs); idx++ {
if bytes.Compare(first.logs[idx], fsm.logs[idx]) != 0 {
fsm.Unlock()
if time.Now().After(limit) {
c.FailNowf("[ERR] FSM log mismatch at index %d", idx)
} else {
goto WAIT
}
}
}
if len(first.configurations) != len(fsm.configurations) {
fsm.Unlock()
if time.Now().After(limit) {
c.FailNowf("[ERR] FSM configuration length mismatch: %d %d",
len(first.logs), len(fsm.logs))
} else {
goto WAIT
}
}
for idx := 0; idx < len(first.configurations); idx++ {
if !reflect.DeepEqual(first.configurations[idx], fsm.configurations[idx]) {
fsm.Unlock()
if time.Now().After(limit) {
c.FailNowf("[ERR] FSM configuration mismatch at index %d: %v, %v", idx, first.configurations[idx], fsm.configurations[idx])
} else {
goto WAIT
}
}
}
fsm.Unlock()
}
first.Unlock()
return
WAIT:
first.Unlock()
c.WaitEvent(nil, c.conf.CommitTimeout)
goto CHECK
}
// getConfiguration returns the configuration of the given Raft instance, or
// fails the test if there's an error
func (c *cluster) getConfiguration(r *Raft) Configuration {
future := r.GetConfiguration()
if err := future.Error(); err != nil {
c.FailNowf("[ERR] failed to get configuration: %v", err)
return Configuration{}
}
return future.Configuration()
}
// EnsureSamePeers makes sure all the rafts have the same set of peers.
func (c *cluster) EnsureSamePeers(t *testing.T) {
limit := time.Now().Add(c.longstopTimeout)
peerSet := c.getConfiguration(c.rafts[0])
CHECK:
for i, raft := range c.rafts {
if i == 0 {
continue
}
otherSet := c.getConfiguration(raft)
if !reflect.DeepEqual(peerSet, otherSet) {
if time.Now().After(limit) {
c.FailNowf("[ERR] peer mismatch: %+v %+v", peerSet, otherSet)
} else {
goto WAIT
}
}
}
return
WAIT:
c.WaitEvent(nil, c.conf.CommitTimeout)
goto CHECK
}
// NOTE: This is exposed for middleware testing purposes and is not a stable API
type MakeClusterOpts struct {
Peers int
Bootstrap bool
Conf *Config
ConfigStoreFSM bool
MakeFSMFunc func() FSM
LongstopTimeout time.Duration
}
// makeCluster will return a cluster with the given config and number of peers.
// If bootstrap is true, the servers will know about each other before starting,
// otherwise their transports will be wired up but they won't yet have configured
// each other.
func makeCluster(t *testing.T, opts *MakeClusterOpts) *cluster {
if opts.Conf == nil {
opts.Conf = inmemConfig(t)
}
c := &cluster{
observationCh: make(chan Observation, 1024),
conf: opts.Conf,
// Propagation takes a maximum of 2 heartbeat timeouts (time to
// get a new heartbeat that would cause a commit) plus a bit.
propagateTimeout: opts.Conf.HeartbeatTimeout*2 + opts.Conf.CommitTimeout,
longstopTimeout: 5 * time.Second,
logger: newTestLoggerWithPrefix(t, "cluster"),
failedCh: make(chan struct{}),
}
if opts.LongstopTimeout > 0 {
c.longstopTimeout = opts.LongstopTimeout
}
c.t = t
var configuration Configuration
// Setup the stores and transports
for i := 0; i < opts.Peers; i++ {
dir, err := ioutil.TempDir("", "raft")
if err != nil {
c.FailNowf("[ERR] err: %v ", err)
}
store := NewInmemStore()
c.dirs = append(c.dirs, dir)
c.stores = append(c.stores, store)
if opts.ConfigStoreFSM {
c.fsms = append(c.fsms, &MockFSMConfigStore{
FSM: &MockFSM{},
})
} else {
var fsm FSM
if opts.MakeFSMFunc != nil {
fsm = opts.MakeFSMFunc()
} else {
fsm = &MockFSM{}
}
c.fsms = append(c.fsms, fsm)
}
dir2, snap := FileSnapTest(t)
c.dirs = append(c.dirs, dir2)
c.snaps = append(c.snaps, snap)
addr, trans := NewInmemTransport("")
c.trans = append(c.trans, trans)
localID := ServerID(fmt.Sprintf("server-%s", addr))
if opts.Conf.ProtocolVersion < 3 {
localID = ServerID(addr)
}
configuration.Servers = append(configuration.Servers, Server{
Suffrage: Voter,
ID: localID,
Address: addr,
})
}
// Wire the transports together
c.FullyConnect()
// Create all the rafts
c.startTime = time.Now()
for i := 0; i < opts.Peers; i++ {
logs := c.stores[i]
store := c.stores[i]
snap := c.snaps[i]
trans := c.trans[i]
peerConf := opts.Conf
peerConf.LocalID = configuration.Servers[i].ID
peerConf.Logger = newTestLeveledLoggerWithPrefix(t, string(configuration.Servers[i].ID))
if opts.Bootstrap {
err := BootstrapCluster(peerConf, logs, store, snap, trans, configuration)
if err != nil {
c.FailNowf("[ERR] BootstrapCluster failed: %v", err)
}
}
raft, err := NewRaft(peerConf, c.fsms[i], logs, store, snap, trans)
if err != nil {
c.FailNowf("[ERR] NewRaft failed: %v", err)
}
raft.RegisterObserver(NewObserver(c.observationCh, false, nil))
if err != nil {
c.FailNowf("[ERR] RegisterObserver failed: %v", err)
}
c.rafts = append(c.rafts, raft)
}
return c
}
// NOTE: This is exposed for middleware testing purposes and is not a stable API
func MakeCluster(n int, t *testing.T, conf *Config) *cluster {
return makeCluster(t, &MakeClusterOpts{
Peers: n,
Bootstrap: true,
Conf: conf,
})
}
// NOTE: This is exposed for middleware testing purposes and is not a stable API
func MakeClusterNoBootstrap(n int, t *testing.T, conf *Config) *cluster {
return makeCluster(t, &MakeClusterOpts{
Peers: n,
Conf: conf,
})
}
// NOTE: This is exposed for middleware testing purposes and is not a stable API
func MakeClusterCustom(t *testing.T, opts *MakeClusterOpts) *cluster {
return makeCluster(t, opts)
}
// NOTE: This is exposed for middleware testing purposes and is not a stable API
func FileSnapTest(t *testing.T) (string, *FileSnapshotStore) {
// Create a test dir
dir, err := ioutil.TempDir("", "raft")
if err != nil {
t.Fatalf("err: %v ", err)
}
snap, err := NewFileSnapshotStoreWithLogger(dir, 3, newTestLogger(t))
if err != nil {
t.Fatalf("err: %v", err)
}
return dir, snap
}

12
vendor/github.com/mitchellh/copystructure/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,12 @@
language: go
go:
- 1.7
- tip
script:
- go test
matrix:
allow_failures:
- go: tip

View File

@ -1,28 +1,16 @@
package copystructure
import (
"errors"
"reflect"
"sync"
"github.com/mitchellh/reflectwalk"
)
// Copy returns a deep copy of v.
func Copy(v interface{}) (interface{}, error) {
w := new(walker)
err := reflectwalk.Walk(v, w)
if err != nil {
return nil, err
}
// Get the result. If the result is nil, then we want to turn it
// into a typed nil if we can.
result := w.Result
if result == nil {
val := reflect.ValueOf(v)
result = reflect.Indirect(reflect.New(val.Type())).Interface()
}
return result, nil
return Config{}.Copy(v)
}
// CopierFunc is a function that knows how to deep copy a specific type.
@ -40,6 +28,68 @@ type CopierFunc func(interface{}) (interface{}, error)
// this map as well as to Copy in a mutex.
var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc)
// Must is a helper that wraps a call to a function returning
// (interface{}, error) and panics if the error is non-nil. It is intended
// for use in variable initializations and should only be used when a copy
// error should be a crashing case.
func Must(v interface{}, err error) interface{} {
if err != nil {
panic("copy error: " + err.Error())
}
return v
}
var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true")
type Config struct {
// Lock any types that are a sync.Locker and are not a mutex while copying.
// If there is an RLocker method, use that to get the sync.Locker.
Lock bool
// Copiers is a map of types associated with a CopierFunc. Use the global
// Copiers map if this is nil.
Copiers map[reflect.Type]CopierFunc
}
func (c Config) Copy(v interface{}) (interface{}, error) {
if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr {
return nil, errPointerRequired
}
w := new(walker)
if c.Lock {
w.useLocks = true
}
if c.Copiers == nil {
c.Copiers = Copiers
}
err := reflectwalk.Walk(v, w)
if err != nil {
return nil, err
}
// Get the result. If the result is nil, then we want to turn it
// into a typed nil if we can.
result := w.Result
if result == nil {
val := reflect.ValueOf(v)
result = reflect.Indirect(reflect.New(val.Type())).Interface()
}
return result, nil
}
// Return the key used to index interfaces types we've seen. Store the number
// of pointers in the upper 32bits, and the depth in the lower 32bits. This is
// easy to calculate, easy to match a key with our current depth, and we don't
// need to deal with initializing and cleaning up nested maps or slices.
func ifaceKey(pointers, depth int) uint64 {
return uint64(pointers)<<32 | uint64(depth)
}
type walker struct {
Result interface{}
@ -47,15 +97,55 @@ type walker struct {
ignoreDepth int
vals []reflect.Value
cs []reflect.Value
ps []bool
// This stores the number of pointers we've walked over, indexed by depth.
ps []int
// If an interface is indirected by a pointer, we need to know the type of
// interface to create when creating the new value. Store the interface
// types here, indexed by both the walk depth and the number of pointers
// already seen at that depth. Use ifaceKey to calculate the proper uint64
// value.
ifaceTypes map[uint64]reflect.Type
// any locks we've taken, indexed by depth
locks []sync.Locker
// take locks while walking the structure
useLocks bool
}
func (w *walker) Enter(l reflectwalk.Location) error {
w.depth++
// ensure we have enough elements to index via w.depth
for w.depth >= len(w.locks) {
w.locks = append(w.locks, nil)
}
for len(w.ps) < w.depth+1 {
w.ps = append(w.ps, 0)
}
return nil
}
func (w *walker) Exit(l reflectwalk.Location) error {
locker := w.locks[w.depth]
w.locks[w.depth] = nil
if locker != nil {
defer locker.Unlock()
}
// clear out pointers and interfaces as we exit the stack
w.ps[w.depth] = 0
for k := range w.ifaceTypes {
mask := uint64(^uint32(0))
if k&mask == uint64(w.depth) {
delete(w.ifaceTypes, k)
}
}
w.depth--
if w.ignoreDepth > w.depth {
w.ignoreDepth = 0
@ -66,9 +156,13 @@ func (w *walker) Exit(l reflectwalk.Location) error {
}
switch l {
case reflectwalk.Array:
fallthrough
case reflectwalk.Map:
fallthrough
case reflectwalk.Slice:
w.replacePointerMaybe()
// Pop map off our container
w.cs = w.cs[:len(w.cs)-1]
case reflectwalk.MapValue:
@ -76,13 +170,36 @@ func (w *walker) Exit(l reflectwalk.Location) error {
mv := w.valPop()
mk := w.valPop()
m := w.cs[len(w.cs)-1]
m.SetMapIndex(mk, mv)
// If mv is the zero value, SetMapIndex deletes the key form the map,
// or in this case never adds it. We need to create a properly typed
// zero value so that this key can be set.
if !mv.IsValid() {
mv = reflect.Zero(m.Elem().Type().Elem())
}
m.Elem().SetMapIndex(mk, mv)
case reflectwalk.ArrayElem:
// Pop off the value and the index and set it on the array
v := w.valPop()
i := w.valPop().Interface().(int)
if v.IsValid() {
a := w.cs[len(w.cs)-1]
ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call
if ae.CanSet() {
ae.Set(v)
}
}
case reflectwalk.SliceElem:
// Pop off the value and the index and set it on the slice
v := w.valPop()
i := w.valPop().Interface().(int)
s := w.cs[len(w.cs)-1]
s.Index(i).Set(v)
if v.IsValid() {
s := w.cs[len(w.cs)-1]
se := s.Elem().Index(i)
if se.CanSet() {
se.Set(v)
}
}
case reflectwalk.Struct:
w.replacePointerMaybe()
@ -95,6 +212,7 @@ func (w *walker) Exit(l reflectwalk.Location) error {
if v.IsValid() {
s := w.cs[len(w.cs)-1]
sf := reflect.Indirect(s).FieldByName(f.Name)
if sf.CanSet() {
sf.Set(v)
}
@ -112,13 +230,14 @@ func (w *walker) Map(m reflect.Value) error {
if w.ignoring() {
return nil
}
w.lock(m)
// Create the map. If the map itself is nil, then just make a nil map
var newMap reflect.Value
if m.IsNil() {
newMap = reflect.Indirect(reflect.New(m.Type()))
newMap = reflect.New(m.Type())
} else {
newMap = reflect.MakeMap(m.Type())
newMap = wrapPtr(reflect.MakeMap(m.Type()))
}
w.cs = append(w.cs, newMap)
@ -131,20 +250,28 @@ func (w *walker) MapElem(m, k, v reflect.Value) error {
}
func (w *walker) PointerEnter(v bool) error {
if w.ignoring() {
return nil
if v {
w.ps[w.depth]++
}
w.ps = append(w.ps, v)
return nil
}
func (w *walker) PointerExit(bool) error {
if w.ignoring() {
func (w *walker) PointerExit(v bool) error {
if v {
w.ps[w.depth]--
}
return nil
}
func (w *walker) Interface(v reflect.Value) error {
if !v.IsValid() {
return nil
}
if w.ifaceTypes == nil {
w.ifaceTypes = make(map[uint64]reflect.Type)
}
w.ps = w.ps[:len(w.ps)-1]
w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type()
return nil
}
@ -152,13 +279,14 @@ func (w *walker) Primitive(v reflect.Value) error {
if w.ignoring() {
return nil
}
w.lock(v)
// IsValid verifies the v is non-zero and CanInterface verifies
// that we're allowed to read this value (unexported fields).
var newV reflect.Value
if v.IsValid() && v.CanInterface() {
newV = reflect.New(v.Type())
reflect.Indirect(newV).Set(v)
newV.Elem().Set(v)
}
w.valPush(newV)
@ -170,12 +298,13 @@ func (w *walker) Slice(s reflect.Value) error {
if w.ignoring() {
return nil
}
w.lock(s)
var newS reflect.Value
if s.IsNil() {
newS = reflect.Indirect(reflect.New(s.Type()))
newS = reflect.New(s.Type())
} else {
newS = reflect.MakeSlice(s.Type(), s.Len(), s.Cap())
newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap()))
}
w.cs = append(w.cs, newS)
@ -195,10 +324,36 @@ func (w *walker) SliceElem(i int, elem reflect.Value) error {
return nil
}
func (w *walker) Array(a reflect.Value) error {
if w.ignoring() {
return nil
}
w.lock(a)
newA := reflect.New(a.Type())
w.cs = append(w.cs, newA)
w.valPush(newA)
return nil
}
func (w *walker) ArrayElem(i int, elem reflect.Value) error {
if w.ignoring() {
return nil
}
// We don't write the array here because elem might still be
// arbitrarily complex. Just record the index and continue on.
w.valPush(reflect.ValueOf(i))
return nil
}
func (w *walker) Struct(s reflect.Value) error {
if w.ignoring() {
return nil
}
w.lock(s)
var v reflect.Value
if c, ok := Copiers[s.Type()]; ok {
@ -211,7 +366,10 @@ func (w *walker) Struct(s reflect.Value) error {
return err
}
v = reflect.ValueOf(dup)
// We need to put a pointer to the value on the value stack,
// so allocate a new pointer and set it.
v = reflect.New(s.Type())
reflect.Indirect(v).Set(reflect.ValueOf(dup))
} else {
// No copier, we copy ourselves and allow reflectwalk to guide
// us deeper into the structure for copying.
@ -232,18 +390,29 @@ func (w *walker) StructField(f reflect.StructField, v reflect.Value) error {
return nil
}
// If PkgPath is non-empty, this is a private (unexported) field.
// We do not set this unexported since the Go runtime doesn't allow us.
if f.PkgPath != "" {
return reflectwalk.SkipEntry
}
// Push the field onto the stack, we'll handle it when we exit
// the struct field in Exit...
w.valPush(reflect.ValueOf(f))
return nil
}
// ignore causes the walker to ignore any more values until we exit this on
func (w *walker) ignore() {
w.ignoreDepth = w.depth
}
func (w *walker) ignoring() bool {
return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth
}
func (w *walker) pointerPeek() bool {
return w.ps[len(w.ps)-1]
return w.ps[w.depth] > 0
}
func (w *walker) valPop() reflect.Value {
@ -275,5 +444,105 @@ func (w *walker) replacePointerMaybe() {
// we need to push that onto the stack.
if !w.pointerPeek() {
w.valPush(reflect.Indirect(w.valPop()))
return
}
v := w.valPop()
// If the expected type is a pointer to an interface of any depth,
// such as *interface{}, **interface{}, etc., then we need to convert
// the value "v" from *CONCRETE to *interface{} so types match for
// Set.
//
// Example if v is type *Foo where Foo is a struct, v would become
// *interface{} instead. This only happens if we have an interface expectation
// at this depth.
//
// For more info, see GH-16
if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface {
y := reflect.New(iType) // Create *interface{}
y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced)
v = y // v is now typed *interface{} (where *v = Foo)
}
for i := 1; i < w.ps[w.depth]; i++ {
if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok {
iface := reflect.New(iType).Elem()
iface.Set(v)
v = iface
}
p := reflect.New(v.Type())
p.Elem().Set(v)
v = p
}
w.valPush(v)
}
// if this value is a Locker, lock it and add it to the locks slice
func (w *walker) lock(v reflect.Value) {
if !w.useLocks {
return
}
if !v.IsValid() || !v.CanInterface() {
return
}
type rlocker interface {
RLocker() sync.Locker
}
var locker sync.Locker
// We can't call Interface() on a value directly, since that requires
// a copy. This is OK, since the pointer to a value which is a sync.Locker
// is also a sync.Locker.
if v.Kind() == reflect.Ptr {
switch l := v.Interface().(type) {
case rlocker:
// don't lock a mutex directly
if _, ok := l.(*sync.RWMutex); !ok {
locker = l.RLocker()
}
case sync.Locker:
locker = l
}
} else if v.CanAddr() {
switch l := v.Addr().Interface().(type) {
case rlocker:
// don't lock a mutex directly
if _, ok := l.(*sync.RWMutex); !ok {
locker = l.RLocker()
}
case sync.Locker:
locker = l
}
}
// still no callable locker
if locker == nil {
return
}
// don't lock a mutex directly
switch locker.(type) {
case *sync.Mutex, *sync.RWMutex:
return
}
locker.Lock()
w.locks[w.depth] = locker
}
// wrapPtr is a helper that takes v and always make it *v. copystructure
// stores things internally as pointers until the last moment before unwrapping
func wrapPtr(v reflect.Value) reflect.Value {
if !v.IsValid() {
return v
}
vPtr := reflect.New(v.Type())
vPtr.Elem().Set(v)
return vPtr
}

3
vendor/github.com/mitchellh/copystructure/go.mod generated vendored Normal file
View File

@ -0,0 +1,3 @@
module github.com/mitchellh/copystructure
require github.com/mitchellh/reflectwalk v1.0.0

2
vendor/github.com/mitchellh/copystructure/go.sum generated vendored Normal file
View File

@ -0,0 +1,2 @@
github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY=
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=

View File

@ -42,6 +42,13 @@ func main() {
log.Fatal(err)
}
if goos == "aix" {
// Replace type of Atim, Mtim and Ctim by Timespec in Stat_t
// to avoid having both StTimespec and Timespec.
sttimespec := regexp.MustCompile(`_Ctype_struct_st_timespec`)
b = sttimespec.ReplaceAll(b, []byte("Timespec"))
}
// Intentionally export __val fields in Fsid and Sigset_t
valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__val(\s+\S+\s+)}`)
b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$3}"))
@ -96,6 +103,15 @@ func main() {
cgoCommandRegex := regexp.MustCompile(`(cgo -godefs .*)`)
b = cgoCommandRegex.ReplaceAll(b, []byte(replacement))
// Rename Stat_t time fields
if goos == "freebsd" && goarch == "386" {
// Hide Stat_t.[AMCB]tim_ext fields
renameStatTimeExtFieldsRegex := regexp.MustCompile(`[AMCB]tim_ext`)
b = renameStatTimeExtFieldsRegex.ReplaceAll(b, []byte("_"))
}
renameStatTimeFieldsRegex := regexp.MustCompile(`([AMCB])(?:irth)?time?(?:spec)?\s+(Timespec|StTimespec)`)
b = renameStatTimeFieldsRegex.ReplaceAll(b, []byte("${1}tim ${2}"))
// gofmt
b, err = format.Source(b)
if err != nil {

View File

@ -454,8 +454,8 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) {
//sys Dup2(oldfd int, newfd int) (err error)
//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = posix_fadvise64
//sys Fchown(fd int, uid int, gid int) (err error)
//sys Fstat(fd int, stat *Stat_t) (err error)
//sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = fstatat
//sys fstat(fd int, stat *Stat_t) (err error)
//sys fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = fstatat
//sys Fstatfs(fd int, buf *Statfs_t) (err error)
//sys Ftruncate(fd int, length int64) (err error)
//sysnb Getegid() (egid int)
@ -464,7 +464,7 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) {
//sysnb Getuid() (uid int)
//sys Lchown(path string, uid int, gid int) (err error)
//sys Listen(s int, n int) (err error)
//sys Lstat(path string, stat *Stat_t) (err error)
//sys lstat(path string, stat *Stat_t) (err error)
//sys Pause() (err error)
//sys Pread(fd int, p []byte, offset int64) (n int, err error) = pread64
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = pwrite64
@ -474,7 +474,7 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) {
//sysnb Setreuid(ruid int, euid int) (err error)
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
//sys Stat(path string, stat *Stat_t) (err error)
//sys stat(path string, statptr *Stat_t) (err error)
//sys Statfs(path string, buf *Statfs_t) (err error)
//sys Truncate(path string, length int64) (err error)

View File

@ -32,3 +32,19 @@ func (msghdr *Msghdr) SetControllen(length int) {
func (cmsg *Cmsghdr) SetLen(length int) {
cmsg.Len = uint32(length)
}
func Fstat(fd int, stat *Stat_t) error {
return fstat(fd, stat)
}
func Fstatat(dirfd int, path string, stat *Stat_t, flags int) error {
return fstatat(dirfd, path, stat, flags)
}
func Lstat(path string, stat *Stat_t) error {
return lstat(path, stat)
}
func Stat(path string, statptr *Stat_t) error {
return stat(path, statptr)
}

View File

@ -32,3 +32,50 @@ func (msghdr *Msghdr) SetControllen(length int) {
func (cmsg *Cmsghdr) SetLen(length int) {
cmsg.Len = uint32(length)
}
// In order to only have Timespec structure, type of Stat_t's fields
// Atim, Mtim and Ctim is changed from StTimespec to Timespec during
// ztypes generation.
// On ppc64, Timespec.Nsec is an int64 while StTimespec.Nsec is an
// int32, so the fields' value must be modified.
func fixStatTimFields(stat *Stat_t) {
stat.Atim.Nsec >>= 32
stat.Mtim.Nsec >>= 32
stat.Ctim.Nsec >>= 32
}
func Fstat(fd int, stat *Stat_t) error {
err := fstat(fd, stat)
if err != nil {
return err
}
fixStatTimFields(stat)
return nil
}
func Fstatat(dirfd int, path string, stat *Stat_t, flags int) error {
err := fstatat(dirfd, path, stat, flags)
if err != nil {
return err
}
fixStatTimFields(stat)
return nil
}
func Lstat(path string, stat *Stat_t) error {
err := lstat(path, stat)
if err != nil {
return err
}
fixStatTimFields(stat)
return nil
}
func Stat(path string, statptr *Stat_t) error {
err := stat(path, statptr)
if err != nil {
return err
}
fixStatTimFields(statptr)
return nil
}

View File

@ -404,22 +404,22 @@ func roundup(x, y int) int {
func (s *Stat_t) convertFrom(old *stat_freebsd11_t) {
*s = Stat_t{
Dev: uint64(old.Dev),
Ino: uint64(old.Ino),
Nlink: uint64(old.Nlink),
Mode: old.Mode,
Uid: old.Uid,
Gid: old.Gid,
Rdev: uint64(old.Rdev),
Atim: old.Atim,
Mtim: old.Mtim,
Ctim: old.Ctim,
Birthtim: old.Birthtim,
Size: old.Size,
Blocks: old.Blocks,
Blksize: old.Blksize,
Flags: old.Flags,
Gen: uint64(old.Gen),
Dev: uint64(old.Dev),
Ino: uint64(old.Ino),
Nlink: uint64(old.Nlink),
Mode: old.Mode,
Uid: old.Uid,
Gid: old.Gid,
Rdev: uint64(old.Rdev),
Atim: old.Atim,
Mtim: old.Mtim,
Ctim: old.Ctim,
Btim: old.Btim,
Size: old.Size,
Blocks: old.Blocks,
Blksize: old.Blksize,
Flags: old.Flags,
Gen: uint64(old.Gen),
}
}

View File

@ -87,8 +87,6 @@ type Mode_t C.mode_t
type Timespec C.struct_timespec
type StTimespec C.struct_st_timespec
type Timeval C.struct_timeval
type Timeval32 C.struct_timeval32
@ -133,6 +131,8 @@ type RawSockaddrInet6 C.struct_sockaddr_in6
type RawSockaddrUnix C.struct_sockaddr_un
type RawSockaddrDatalink C.struct_sockaddr_dl
type RawSockaddr C.struct_sockaddr
type RawSockaddrAny C.struct_sockaddr_any
@ -156,17 +156,18 @@ type Linger C.struct_linger
type Msghdr C.struct_msghdr
const (
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
SizeofLinger = C.sizeof_struct_linger
SizeofIPMreq = C.sizeof_struct_ip_mreq
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
SizeofMsghdr = C.sizeof_struct_msghdr
SizeofCmsghdr = C.sizeof_struct_cmsghdr
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
SizeofLinger = C.sizeof_struct_linger
SizeofIPMreq = C.sizeof_struct_ip_mreq
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
SizeofMsghdr = C.sizeof_struct_msghdr
SizeofCmsghdr = C.sizeof_struct_cmsghdr
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
)
// Routing and interface messages

View File

@ -859,7 +859,7 @@ func Fchown(fd int, uid int, gid int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fstat(fd int, stat *Stat_t) (err error) {
func fstat(fd int, stat *Stat_t) (err error) {
r0, er := C.fstat(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(stat))))
if r0 == -1 && er != nil {
err = er
@ -869,7 +869,7 @@ func Fstat(fd int, stat *Stat_t) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) {
func fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) {
_p0 := uintptr(unsafe.Pointer(C.CString(path)))
r0, er := C.fstatat(C.int(dirfd), C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(stat))), C.int(flags))
if r0 == -1 && er != nil {
@ -953,7 +953,7 @@ func Listen(s int, n int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Lstat(path string, stat *Stat_t) (err error) {
func lstat(path string, stat *Stat_t) (err error) {
_p0 := uintptr(unsafe.Pointer(C.CString(path)))
r0, er := C.lstat(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(stat))))
if r0 == -1 && er != nil {
@ -1071,9 +1071,9 @@ func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n i
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Stat(path string, stat *Stat_t) (err error) {
func stat(path string, statptr *Stat_t) (err error) {
_p0 := uintptr(unsafe.Pointer(C.CString(path)))
r0, er := C.stat(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(stat))))
r0, er := C.stat(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(statptr))))
if r0 == -1 && er != nil {
err = er
}

View File

@ -803,7 +803,7 @@ func Fchown(fd int, uid int, gid int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fstat(fd int, stat *Stat_t) (err error) {
func fstat(fd int, stat *Stat_t) (err error) {
_, e1 := callfstat(fd, uintptr(unsafe.Pointer(stat)))
if e1 != 0 {
err = errnoErr(e1)
@ -813,7 +813,7 @@ func Fstat(fd int, stat *Stat_t) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) {
func fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
@ -905,7 +905,7 @@ func Listen(s int, n int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Lstat(path string, stat *Stat_t) (err error) {
func lstat(path string, stat *Stat_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
@ -1023,13 +1023,13 @@ func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n i
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Stat(path string, stat *Stat_t) (err error) {
func stat(path string, statptr *Stat_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, e1 := callstat(uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)))
_, e1 := callstat(uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(statptr)))
if e1 != 0 {
err = errnoErr(e1)
}

View File

@ -941,8 +941,8 @@ func callsplice(rfd int, roff uintptr, wfd int, woff uintptr, len int, flags int
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func callstat(_p0 uintptr, stat uintptr) (r1 uintptr, e1 Errno) {
r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_stat)), 2, _p0, stat, 0, 0, 0, 0)
func callstat(_p0 uintptr, statptr uintptr) (r1 uintptr, e1 Errno) {
r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_stat)), 2, _p0, statptr, 0, 0, 0, 0)
return
}

View File

@ -783,8 +783,8 @@ func callsplice(rfd int, roff uintptr, wfd int, woff uintptr, len int, flags int
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func callstat(_p0 uintptr, stat uintptr) (r1 uintptr, e1 Errno) {
r1 = uintptr(C.stat(C.uintptr_t(_p0), C.uintptr_t(stat)))
func callstat(_p0 uintptr, statptr uintptr) (r1 uintptr, e1 Errno) {
r1 = uintptr(C.stat(C.uintptr_t(_p0), C.uintptr_t(statptr)))
e1 = syscall.GetErrno()
return
}

View File

@ -30,11 +30,6 @@ type Timespec struct {
Nsec int32
}
type StTimespec struct {
Sec int32
Nsec int32
}
type Timeval struct {
Sec int32
Usec int32
@ -101,9 +96,9 @@ type Stat_t struct {
Gid uint32
Rdev uint32
Size int32
Atim StTimespec
Mtim StTimespec
Ctim StTimespec
Atim Timespec
Mtim Timespec
Ctim Timespec
Blksize int32
Blocks int32
Vfstype int32
@ -148,6 +143,17 @@ type RawSockaddrUnix struct {
Path [1023]uint8
}
type RawSockaddrDatalink struct {
Len uint8
Family uint8
Index uint16
Type uint8
Nlen uint8
Alen uint8
Slen uint8
Data [120]uint8
}
type RawSockaddr struct {
Len uint8
Family uint8
@ -207,17 +213,18 @@ type Msghdr struct {
}
const (
SizeofSockaddrInet4 = 0x10
SizeofSockaddrInet6 = 0x1c
SizeofSockaddrAny = 0x404
SizeofSockaddrUnix = 0x401
SizeofLinger = 0x8
SizeofIPMreq = 0x8
SizeofIPv6Mreq = 0x14
SizeofIPv6MTUInfo = 0x20
SizeofMsghdr = 0x1c
SizeofCmsghdr = 0xc
SizeofICMPv6Filter = 0x20
SizeofSockaddrInet4 = 0x10
SizeofSockaddrInet6 = 0x1c
SizeofSockaddrAny = 0x404
SizeofSockaddrUnix = 0x401
SizeofSockaddrDatalink = 0x80
SizeofLinger = 0x8
SizeofIPMreq = 0x8
SizeofIPv6Mreq = 0x14
SizeofIPv6MTUInfo = 0x20
SizeofMsghdr = 0x1c
SizeofCmsghdr = 0xc
SizeofICMPv6Filter = 0x20
)
const (

View File

@ -30,12 +30,6 @@ type Timespec struct {
Nsec int64
}
type StTimespec struct {
Sec int64
Nsec int32
_ [4]byte
}
type Timeval struct {
Sec int64
Usec int32
@ -103,9 +97,9 @@ type Stat_t struct {
Gid uint32
Rdev uint64
Ssize int32
Atim StTimespec
Mtim StTimespec
Ctim StTimespec
Atim Timespec
Mtim Timespec
Ctim Timespec
Blksize int64
Blocks int64
Vfstype int32
@ -153,6 +147,17 @@ type RawSockaddrUnix struct {
Path [1023]uint8
}
type RawSockaddrDatalink struct {
Len uint8
Family uint8
Index uint16
Type uint8
Nlen uint8
Alen uint8
Slen uint8
Data [120]uint8
}
type RawSockaddr struct {
Len uint8
Family uint8
@ -212,17 +217,18 @@ type Msghdr struct {
}
const (
SizeofSockaddrInet4 = 0x10
SizeofSockaddrInet6 = 0x1c
SizeofSockaddrAny = 0x404
SizeofSockaddrUnix = 0x401
SizeofLinger = 0x8
SizeofIPMreq = 0x8
SizeofIPv6Mreq = 0x14
SizeofIPv6MTUInfo = 0x20
SizeofMsghdr = 0x30
SizeofCmsghdr = 0xc
SizeofICMPv6Filter = 0x20
SizeofSockaddrInet4 = 0x10
SizeofSockaddrInet6 = 0x1c
SizeofSockaddrAny = 0x404
SizeofSockaddrUnix = 0x401
SizeofSockaddrDatalink = 0x80
SizeofLinger = 0x8
SizeofIPMreq = 0x8
SizeofIPv6Mreq = 0x14
SizeofIPv6MTUInfo = 0x20
SizeofMsghdr = 0x30
SizeofCmsghdr = 0xc
SizeofICMPv6Filter = 0x20
)
const (

View File

@ -59,24 +59,24 @@ type Rlimit struct {
type _Gid_t uint32
type Stat_t struct {
Dev int32
Mode uint16
Nlink uint16
Ino uint64
Uid uint32
Gid uint32
Rdev int32
Atimespec Timespec
Mtimespec Timespec
Ctimespec Timespec
Birthtimespec Timespec
Size int64
Blocks int64
Blksize int32
Flags uint32
Gen uint32
Lspare int32
Qspare [2]int64
Dev int32
Mode uint16
Nlink uint16
Ino uint64
Uid uint32
Gid uint32
Rdev int32
Atim Timespec
Mtim Timespec
Ctim Timespec
Btim Timespec
Size int64
Blocks int64
Blksize int32
Flags uint32
Gen uint32
Lspare int32
Qspare [2]int64
}
type Statfs_t struct {

View File

@ -63,25 +63,25 @@ type Rlimit struct {
type _Gid_t uint32
type Stat_t struct {
Dev int32
Mode uint16
Nlink uint16
Ino uint64
Uid uint32
Gid uint32
Rdev int32
_ [4]byte
Atimespec Timespec
Mtimespec Timespec
Ctimespec Timespec
Birthtimespec Timespec
Size int64
Blocks int64
Blksize int32
Flags uint32
Gen uint32
Lspare int32
Qspare [2]int64
Dev int32
Mode uint16
Nlink uint16
Ino uint64
Uid uint32
Gid uint32
Rdev int32
_ [4]byte
Atim Timespec
Mtim Timespec
Ctim Timespec
Btim Timespec
Size int64
Blocks int64
Blksize int32
Flags uint32
Gen uint32
Lspare int32
Qspare [2]int64
}
type Statfs_t struct {

View File

@ -60,24 +60,24 @@ type Rlimit struct {
type _Gid_t uint32
type Stat_t struct {
Dev int32
Mode uint16
Nlink uint16
Ino uint64
Uid uint32
Gid uint32
Rdev int32
Atimespec Timespec
Mtimespec Timespec
Ctimespec Timespec
Birthtimespec Timespec
Size int64
Blocks int64
Blksize int32
Flags uint32
Gen uint32
Lspare int32
Qspare [2]int64
Dev int32
Mode uint16
Nlink uint16
Ino uint64
Uid uint32
Gid uint32
Rdev int32
Atim Timespec
Mtim Timespec
Ctim Timespec
Btim Timespec
Size int64
Blocks int64
Blksize int32
Flags uint32
Gen uint32
Lspare int32
Qspare [2]int64
}
type Statfs_t struct {

View File

@ -63,25 +63,25 @@ type Rlimit struct {
type _Gid_t uint32
type Stat_t struct {
Dev int32
Mode uint16
Nlink uint16
Ino uint64
Uid uint32
Gid uint32
Rdev int32
_ [4]byte
Atimespec Timespec
Mtimespec Timespec
Ctimespec Timespec
Birthtimespec Timespec
Size int64
Blocks int64
Blksize int32
Flags uint32
Gen uint32
Lspare int32
Qspare [2]int64
Dev int32
Mode uint16
Nlink uint16
Ino uint64
Uid uint32
Gid uint32
Rdev int32
_ [4]byte
Atim Timespec
Mtim Timespec
Ctim Timespec
Btim Timespec
Size int64
Blocks int64
Blksize int32
Flags uint32
Gen uint32
Lspare int32
Qspare [2]int64
}
type Statfs_t struct {

View File

@ -57,25 +57,25 @@ type Rlimit struct {
type _Gid_t uint32
type Stat_t struct {
Ino uint64
Nlink uint32
Dev uint32
Mode uint16
Padding1 uint16
Uid uint32
Gid uint32
Rdev uint32
Atim Timespec
Mtim Timespec
Ctim Timespec
Size int64
Blocks int64
Blksize uint32
Flags uint32
Gen uint32
Lspare int32
Qspare1 int64
Qspare2 int64
Ino uint64
Nlink uint32
Dev uint32
Mode uint16
_1 uint16
Uid uint32
Gid uint32
Rdev uint32
Atim Timespec
Mtim Timespec
Ctim Timespec
Size int64
Blocks int64
Blksize uint32
Flags uint32
Gen uint32
Lspare int32
Qspare1 int64
Qspare2 int64
}
type Statfs_t struct {

View File

@ -62,50 +62,50 @@ const (
)
type Stat_t struct {
Dev uint64
Ino uint64
Nlink uint64
Mode uint16
_0 int16
Uid uint32
Gid uint32
_1 int32
Rdev uint64
Atim_ext int32
Atim Timespec
Mtim_ext int32
Mtim Timespec
Ctim_ext int32
Ctim Timespec
Btim_ext int32
Birthtim Timespec
Size int64
Blocks int64
Blksize int32
Flags uint32
Gen uint64
Spare [10]uint64
Dev uint64
Ino uint64
Nlink uint64
Mode uint16
_0 int16
Uid uint32
Gid uint32
_1 int32
Rdev uint64
_ int32
Atim Timespec
_ int32
Mtim Timespec
_ int32
Ctim Timespec
_ int32
Btim Timespec
Size int64
Blocks int64
Blksize int32
Flags uint32
Gen uint64
Spare [10]uint64
}
type stat_freebsd11_t struct {
Dev uint32
Ino uint32
Mode uint16
Nlink uint16
Uid uint32
Gid uint32
Rdev uint32
Atim Timespec
Mtim Timespec
Ctim Timespec
Size int64
Blocks int64
Blksize int32
Flags uint32
Gen uint32
Lspare int32
Birthtim Timespec
_ [8]byte
Dev uint32
Ino uint32
Mode uint16
Nlink uint16
Uid uint32
Gid uint32
Rdev uint32
Atim Timespec
Mtim Timespec
Ctim Timespec
Size int64
Blocks int64
Blksize int32
Flags uint32
Gen uint32
Lspare int32
Btim Timespec
_ [8]byte
}
type Statfs_t struct {

View File

@ -62,45 +62,45 @@ const (
)
type Stat_t struct {
Dev uint64
Ino uint64
Nlink uint64
Mode uint16
_0 int16
Uid uint32
Gid uint32
_1 int32
Rdev uint64
Atim Timespec
Mtim Timespec
Ctim Timespec
Birthtim Timespec
Size int64
Blocks int64
Blksize int32
Flags uint32
Gen uint64
Spare [10]uint64
Dev uint64
Ino uint64
Nlink uint64
Mode uint16
_0 int16
Uid uint32
Gid uint32
_1 int32
Rdev uint64
Atim Timespec
Mtim Timespec
Ctim Timespec
Btim Timespec
Size int64
Blocks int64
Blksize int32
Flags uint32
Gen uint64
Spare [10]uint64
}
type stat_freebsd11_t struct {
Dev uint32
Ino uint32
Mode uint16
Nlink uint16
Uid uint32
Gid uint32
Rdev uint32
Atim Timespec
Mtim Timespec
Ctim Timespec
Size int64
Blocks int64
Blksize int32
Flags uint32
Gen uint32
Lspare int32
Birthtim Timespec
Dev uint32
Ino uint32
Mode uint16
Nlink uint16
Uid uint32
Gid uint32
Rdev uint32
Atim Timespec
Mtim Timespec
Ctim Timespec
Size int64
Blocks int64
Blksize int32
Flags uint32
Gen uint32
Lspare int32
Btim Timespec
}
type Statfs_t struct {

View File

@ -64,45 +64,45 @@ const (
)
type Stat_t struct {
Dev uint64
Ino uint64
Nlink uint64
Mode uint16
_0 int16
Uid uint32
Gid uint32
_1 int32
Rdev uint64
Atim Timespec
Mtim Timespec
Ctim Timespec
Birthtim Timespec
Size int64
Blocks int64
Blksize int32
Flags uint32
Gen uint64
Spare [10]uint64
Dev uint64
Ino uint64
Nlink uint64
Mode uint16
_0 int16
Uid uint32
Gid uint32
_1 int32
Rdev uint64
Atim Timespec
Mtim Timespec
Ctim Timespec
Btim Timespec
Size int64
Blocks int64
Blksize int32
Flags uint32
Gen uint64
Spare [10]uint64
}
type stat_freebsd11_t struct {
Dev uint32
Ino uint32
Mode uint16
Nlink uint16
Uid uint32
Gid uint32
Rdev uint32
Atim Timespec
Mtim Timespec
Ctim Timespec
Size int64
Blocks int64
Blksize int32
Flags uint32
Gen uint32
Lspare int32
Birthtim Timespec
Dev uint32
Ino uint32
Mode uint16
Nlink uint16
Uid uint32
Gid uint32
Rdev uint32
Atim Timespec
Mtim Timespec
Ctim Timespec
Size int64
Blocks int64
Blksize int32
Flags uint32
Gen uint32
Lspare int32
Btim Timespec
}
type Statfs_t struct {

View File

@ -62,45 +62,45 @@ const (
)
type Stat_t struct {
Dev uint64
Ino uint64
Nlink uint64
Mode uint16
_0 int16
Uid uint32
Gid uint32
_1 int32
Rdev uint64
Atim Timespec
Mtim Timespec
Ctim Timespec
Birthtim Timespec
Size int64
Blocks int64
Blksize int32
Flags uint32
Gen uint64
Spare [10]uint64
Dev uint64
Ino uint64
Nlink uint64
Mode uint16
_0 int16
Uid uint32
Gid uint32
_1 int32
Rdev uint64
Atim Timespec
Mtim Timespec
Ctim Timespec
Btim Timespec
Size int64
Blocks int64
Blksize int32
Flags uint32
Gen uint64
Spare [10]uint64
}
type stat_freebsd11_t struct {
Dev uint32
Ino uint32
Mode uint16
Nlink uint16
Uid uint32
Gid uint32
Rdev uint32
Atim Timespec
Mtim Timespec
Ctim Timespec
Size int64
Blocks int64
Blksize int32
Flags uint32
Gen uint32
Lspare int32
Birthtim Timespec
Dev uint32
Ino uint32
Mode uint16
Nlink uint16
Uid uint32
Gid uint32
Rdev uint32
Atim Timespec
Mtim Timespec
Ctim Timespec
Size int64
Blocks int64
Blksize int32
Flags uint32
Gen uint32
Lspare int32
Btim Timespec
}
type Statfs_t struct {

View File

@ -57,23 +57,23 @@ type Rlimit struct {
type _Gid_t uint32
type Stat_t struct {
Dev uint64
Mode uint32
Ino uint64
Nlink uint32
Uid uint32
Gid uint32
Rdev uint64
Atimespec Timespec
Mtimespec Timespec
Ctimespec Timespec
Birthtimespec Timespec
Size int64
Blocks int64
Blksize uint32
Flags uint32
Gen uint32
Spare [2]uint32
Dev uint64
Mode uint32
Ino uint64
Nlink uint32
Uid uint32
Gid uint32
Rdev uint64
Atim Timespec
Mtim Timespec
Ctim Timespec
Btim Timespec
Size int64
Blocks int64
Blksize uint32
Flags uint32
Gen uint32
Spare [2]uint32
}
type Statfs_t [0]byte

Some files were not shown because too many files have changed in this diff Show More