mirror of
https://github.com/status-im/consul.git
synced 2025-02-04 18:03:39 +00:00
Merge pull request #3403 from hashicorp/raft_peers_fixes
This fixes #1580
This commit is contained in:
commit
9ef2156195
@ -238,9 +238,7 @@ func (s *Server) getLeader() (bool, *metadata.Server) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Lookup the server
|
// Lookup the server
|
||||||
s.localLock.RLock()
|
server := s.serverLookup.Server(leader)
|
||||||
server := s.localConsuls[leader]
|
|
||||||
s.localLock.RUnlock()
|
|
||||||
|
|
||||||
// Server could be nil
|
// Server could be nil
|
||||||
return false, server
|
return false, server
|
||||||
|
@ -125,18 +125,14 @@ func (s *Server) localEvent(event serf.UserEvent) {
|
|||||||
// lanNodeJoin is used to handle join events on the LAN pool.
|
// lanNodeJoin is used to handle join events on the LAN pool.
|
||||||
func (s *Server) lanNodeJoin(me serf.MemberEvent) {
|
func (s *Server) lanNodeJoin(me serf.MemberEvent) {
|
||||||
for _, m := range me.Members {
|
for _, m := range me.Members {
|
||||||
ok, parts := metadata.IsConsulServer(m)
|
ok, serverMeta := metadata.IsConsulServer(m)
|
||||||
if !ok {
|
if !ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
s.logger.Printf("[INFO] consul: Adding LAN server %s", parts)
|
s.logger.Printf("[INFO] consul: Adding LAN server %s", serverMeta)
|
||||||
|
|
||||||
// See if it's configured as part of our DC.
|
// Update server lookup
|
||||||
if parts.Datacenter == s.config.Datacenter {
|
s.serverLookup.AddServer(serverMeta)
|
||||||
s.localLock.Lock()
|
|
||||||
s.localConsuls[raft.ServerAddress(parts.Addr.String())] = parts
|
|
||||||
s.localLock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we're still expecting to bootstrap, may need to handle this.
|
// If we're still expecting to bootstrap, may need to handle this.
|
||||||
if s.config.BootstrapExpect != 0 {
|
if s.config.BootstrapExpect != 0 {
|
||||||
@ -265,14 +261,13 @@ func (s *Server) maybeBootstrap() {
|
|||||||
// lanNodeFailed is used to handle fail events on the LAN pool.
|
// lanNodeFailed is used to handle fail events on the LAN pool.
|
||||||
func (s *Server) lanNodeFailed(me serf.MemberEvent) {
|
func (s *Server) lanNodeFailed(me serf.MemberEvent) {
|
||||||
for _, m := range me.Members {
|
for _, m := range me.Members {
|
||||||
ok, parts := metadata.IsConsulServer(m)
|
ok, serverMeta := metadata.IsConsulServer(m)
|
||||||
if !ok {
|
if !ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
s.logger.Printf("[INFO] consul: Removing LAN server %s", parts)
|
s.logger.Printf("[INFO] consul: Removing LAN server %s", serverMeta)
|
||||||
|
|
||||||
s.localLock.Lock()
|
// Update id to address map
|
||||||
delete(s.localConsuls, raft.ServerAddress(parts.Addr.String()))
|
s.serverLookup.RemoveServer(serverMeta)
|
||||||
s.localLock.Unlock()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -123,11 +123,6 @@ type Server struct {
|
|||||||
// strong consistency.
|
// strong consistency.
|
||||||
fsm *consulFSM
|
fsm *consulFSM
|
||||||
|
|
||||||
// localConsuls is used to track the known consuls
|
|
||||||
// in the local datacenter. Used to do leader forwarding.
|
|
||||||
localConsuls map[raft.ServerAddress]*metadata.Server
|
|
||||||
localLock sync.RWMutex
|
|
||||||
|
|
||||||
// Logger uses the provided LogOutput
|
// Logger uses the provided LogOutput
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
|
|
||||||
@ -171,6 +166,10 @@ type Server struct {
|
|||||||
// which SHOULD only consist of Consul servers
|
// which SHOULD only consist of Consul servers
|
||||||
serfWAN *serf.Serf
|
serfWAN *serf.Serf
|
||||||
|
|
||||||
|
// serverLookup tracks server consuls in the local datacenter.
|
||||||
|
// Used to do leader forwarding and provide fast lookup by server id and address
|
||||||
|
serverLookup *ServerLookup
|
||||||
|
|
||||||
// floodLock controls access to floodCh.
|
// floodLock controls access to floodCh.
|
||||||
floodLock sync.RWMutex
|
floodLock sync.RWMutex
|
||||||
floodCh []chan struct{}
|
floodCh []chan struct{}
|
||||||
@ -295,7 +294,6 @@ func NewServerLogger(config *Config, logger *log.Logger, tokens *token.Store) (*
|
|||||||
connPool: connPool,
|
connPool: connPool,
|
||||||
eventChLAN: make(chan serf.Event, 256),
|
eventChLAN: make(chan serf.Event, 256),
|
||||||
eventChWAN: make(chan serf.Event, 256),
|
eventChWAN: make(chan serf.Event, 256),
|
||||||
localConsuls: make(map[raft.ServerAddress]*metadata.Server),
|
|
||||||
logger: logger,
|
logger: logger,
|
||||||
reconcileCh: make(chan serf.Member, 32),
|
reconcileCh: make(chan serf.Member, 32),
|
||||||
router: router.NewRouter(logger, config.Datacenter),
|
router: router.NewRouter(logger, config.Datacenter),
|
||||||
@ -304,6 +302,7 @@ func NewServerLogger(config *Config, logger *log.Logger, tokens *token.Store) (*
|
|||||||
reassertLeaderCh: make(chan chan error),
|
reassertLeaderCh: make(chan chan error),
|
||||||
sessionTimers: NewSessionTimers(),
|
sessionTimers: NewSessionTimers(),
|
||||||
tombstoneGC: gc,
|
tombstoneGC: gc,
|
||||||
|
serverLookup: NewServerLookup(),
|
||||||
shutdownCh: shutdownCh,
|
shutdownCh: shutdownCh,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -494,7 +493,14 @@ func (s *Server) setupRaft() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create a transport layer.
|
// Create a transport layer.
|
||||||
trans := raft.NewNetworkTransport(s.raftLayer, 3, 10*time.Second, s.config.LogOutput)
|
transConfig := &raft.NetworkTransportConfig{
|
||||||
|
Stream: s.raftLayer,
|
||||||
|
MaxPool: 3,
|
||||||
|
Timeout: 10 * time.Second,
|
||||||
|
ServerAddressProvider: s.serverLookup,
|
||||||
|
}
|
||||||
|
|
||||||
|
trans := raft.NewNetworkTransportWithConfig(transConfig)
|
||||||
s.raftTransport = trans
|
s.raftTransport = trans
|
||||||
|
|
||||||
// Make sure we set the LogOutput.
|
// Make sure we set the LogOutput.
|
||||||
@ -694,11 +700,9 @@ func (s *Server) setupRPC(tlsWrap tlsutil.DCWrapper) error {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
s.localLock.RLock()
|
server := s.serverLookup.Server(address)
|
||||||
server, ok := s.localConsuls[address]
|
|
||||||
s.localLock.RUnlock()
|
|
||||||
|
|
||||||
if !ok {
|
if server == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
66
agent/consul/server_lookup.go
Normal file
66
agent/consul/server_lookup.go
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
package consul
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent/metadata"
|
||||||
|
"github.com/hashicorp/raft"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ServerLookup encapsulates looking up servers by id and address
|
||||||
|
type ServerLookup struct {
|
||||||
|
lock sync.RWMutex
|
||||||
|
addressToServer map[raft.ServerAddress]*metadata.Server
|
||||||
|
idToServer map[raft.ServerID]*metadata.Server
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewServerLookup() *ServerLookup {
|
||||||
|
return &ServerLookup{
|
||||||
|
addressToServer: make(map[raft.ServerAddress]*metadata.Server),
|
||||||
|
idToServer: make(map[raft.ServerID]*metadata.Server),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sl *ServerLookup) AddServer(server *metadata.Server) {
|
||||||
|
sl.lock.Lock()
|
||||||
|
defer sl.lock.Unlock()
|
||||||
|
sl.addressToServer[raft.ServerAddress(server.Addr.String())] = server
|
||||||
|
sl.idToServer[raft.ServerID(server.ID)] = server
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sl *ServerLookup) RemoveServer(server *metadata.Server) {
|
||||||
|
sl.lock.Lock()
|
||||||
|
defer sl.lock.Unlock()
|
||||||
|
delete(sl.addressToServer, raft.ServerAddress(server.Addr.String()))
|
||||||
|
delete(sl.idToServer, raft.ServerID(server.ID))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implements the ServerAddressProvider interface
|
||||||
|
func (sl *ServerLookup) ServerAddr(id raft.ServerID) (raft.ServerAddress, error) {
|
||||||
|
sl.lock.RLock()
|
||||||
|
defer sl.lock.RUnlock()
|
||||||
|
svr, ok := sl.idToServer[id]
|
||||||
|
if !ok {
|
||||||
|
return "", fmt.Errorf("Could not find address for server id %v", id)
|
||||||
|
}
|
||||||
|
return raft.ServerAddress(svr.Addr.String()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Server looks up the server by address, returns a boolean if not found
|
||||||
|
func (sl *ServerLookup) Server(addr raft.ServerAddress) *metadata.Server {
|
||||||
|
sl.lock.RLock()
|
||||||
|
defer sl.lock.RUnlock()
|
||||||
|
svr, _ := sl.addressToServer[addr]
|
||||||
|
return svr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sl *ServerLookup) Servers() []*metadata.Server {
|
||||||
|
sl.lock.RLock()
|
||||||
|
defer sl.lock.RUnlock()
|
||||||
|
var ret []*metadata.Server
|
||||||
|
for _, svr := range sl.addressToServer {
|
||||||
|
ret = append(ret, svr)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
58
agent/consul/server_lookup_test.go
Normal file
58
agent/consul/server_lookup_test.go
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
package consul
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent/metadata"
|
||||||
|
"github.com/hashicorp/raft"
|
||||||
|
)
|
||||||
|
|
||||||
|
type testAddr struct {
|
||||||
|
addr string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ta *testAddr) Network() string {
|
||||||
|
return "tcp"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ta *testAddr) String() string {
|
||||||
|
return ta.addr
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServerLookup(t *testing.T) {
|
||||||
|
lookup := NewServerLookup()
|
||||||
|
addr := "72.0.0.17:8300"
|
||||||
|
id := "1"
|
||||||
|
|
||||||
|
svr := &metadata.Server{ID: id, Addr: &testAddr{addr}}
|
||||||
|
lookup.AddServer(svr)
|
||||||
|
|
||||||
|
got, err := lookup.ServerAddr(raft.ServerID(id))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unexpected error:%v", err)
|
||||||
|
}
|
||||||
|
if string(got) != addr {
|
||||||
|
t.Fatalf("Expected %v but got %v", addr, got)
|
||||||
|
}
|
||||||
|
|
||||||
|
server := lookup.Server(raft.ServerAddress(addr))
|
||||||
|
if server == nil {
|
||||||
|
t.Fatalf("Expected lookup to return true")
|
||||||
|
}
|
||||||
|
if server.Addr.String() != addr {
|
||||||
|
t.Fatalf("Expected lookup to return address %v but got %v", addr, server.Addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
lookup.RemoveServer(svr)
|
||||||
|
|
||||||
|
got, err = lookup.ServerAddr("1")
|
||||||
|
expectedErr := fmt.Errorf("Could not find address for server id 1")
|
||||||
|
if expectedErr.Error() != err.Error() {
|
||||||
|
t.Fatalf("Unexpected error, got %v wanted %v", err, expectedErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
svr2 := &metadata.Server{ID: "2", Addr: &testAddr{"123.4.5.6"}}
|
||||||
|
lookup.RemoveServer(svr2)
|
||||||
|
|
||||||
|
}
|
@ -342,7 +342,7 @@ func TestServer_JoinSeparateLanAndWanAddresses(t *testing.T) {
|
|||||||
if len(s2.router.GetDatacenters()) != 2 {
|
if len(s2.router.GetDatacenters()) != 2 {
|
||||||
r.Fatalf("remote consul missing")
|
r.Fatalf("remote consul missing")
|
||||||
}
|
}
|
||||||
if len(s2.localConsuls) != 2 {
|
if len(s2.serverLookup.Servers()) != 2 {
|
||||||
r.Fatalf("local consul fellow s3 for s2 missing")
|
r.Fatalf("local consul fellow s3 for s2 missing")
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -666,14 +666,12 @@ func testVerifyRPC(s1, s2 *Server, t *testing.T) (bool, error) {
|
|||||||
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s2, 2)) })
|
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s2, 2)) })
|
||||||
|
|
||||||
// Have s2 make an RPC call to s1
|
// Have s2 make an RPC call to s1
|
||||||
s2.localLock.RLock()
|
|
||||||
var leader *metadata.Server
|
var leader *metadata.Server
|
||||||
for _, server := range s2.localConsuls {
|
for _, server := range s2.serverLookup.Servers() {
|
||||||
if server.Name == s1.config.NodeName {
|
if server.Name == s1.config.NodeName {
|
||||||
leader = server
|
leader = server
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s2.localLock.RUnlock()
|
|
||||||
if leader == nil {
|
if leader == nil {
|
||||||
t.Fatal("no leader")
|
t.Fatal("no leader")
|
||||||
}
|
}
|
||||||
|
11
vendor/github.com/hashicorp/raft-boltdb/Makefile
generated
vendored
Normal file
11
vendor/github.com/hashicorp/raft-boltdb/Makefile
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
DEPS = $(go list -f '{{range .TestImports}}{{.}} {{end}}' ./...)
|
||||||
|
|
||||||
|
.PHONY: test deps
|
||||||
|
|
||||||
|
test:
|
||||||
|
go test -timeout=30s ./...
|
||||||
|
|
||||||
|
deps:
|
||||||
|
go get -d -v ./...
|
||||||
|
echo $(DEPS) | xargs -n1 go get -d
|
||||||
|
|
1
vendor/github.com/hashicorp/raft/api.go
generated
vendored
1
vendor/github.com/hashicorp/raft/api.go
generated
vendored
@ -492,6 +492,7 @@ func NewRaft(conf *Config, fsm FSM, logs LogStore, stable StableStore, snaps Sna
|
|||||||
}
|
}
|
||||||
r.processConfigurationLogEntry(&entry)
|
r.processConfigurationLogEntry(&entry)
|
||||||
}
|
}
|
||||||
|
|
||||||
r.logger.Printf("[INFO] raft: Initial configuration (index=%d): %+v",
|
r.logger.Printf("[INFO] raft: Initial configuration (index=%d): %+v",
|
||||||
r.configurations.latestIndex, r.configurations.latest.Servers)
|
r.configurations.latestIndex, r.configurations.latest.Servers)
|
||||||
|
|
||||||
|
2
vendor/github.com/hashicorp/raft/configuration.go
generated
vendored
2
vendor/github.com/hashicorp/raft/configuration.go
generated
vendored
@ -283,7 +283,7 @@ func encodePeers(configuration Configuration, trans Transport) []byte {
|
|||||||
var encPeers [][]byte
|
var encPeers [][]byte
|
||||||
for _, server := range configuration.Servers {
|
for _, server := range configuration.Servers {
|
||||||
if server.Suffrage == Voter {
|
if server.Suffrage == Voter {
|
||||||
encPeers = append(encPeers, trans.EncodePeer(server.Address))
|
encPeers = append(encPeers, trans.EncodePeer(server.ID, server.Address))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
10
vendor/github.com/hashicorp/raft/inmem_transport.go
generated
vendored
10
vendor/github.com/hashicorp/raft/inmem_transport.go
generated
vendored
@ -75,7 +75,7 @@ func (i *InmemTransport) LocalAddr() ServerAddress {
|
|||||||
|
|
||||||
// AppendEntriesPipeline returns an interface that can be used to pipeline
|
// AppendEntriesPipeline returns an interface that can be used to pipeline
|
||||||
// AppendEntries requests.
|
// AppendEntries requests.
|
||||||
func (i *InmemTransport) AppendEntriesPipeline(target ServerAddress) (AppendPipeline, error) {
|
func (i *InmemTransport) AppendEntriesPipeline(id ServerID, target ServerAddress) (AppendPipeline, error) {
|
||||||
i.RLock()
|
i.RLock()
|
||||||
peer, ok := i.peers[target]
|
peer, ok := i.peers[target]
|
||||||
i.RUnlock()
|
i.RUnlock()
|
||||||
@ -90,7 +90,7 @@ func (i *InmemTransport) AppendEntriesPipeline(target ServerAddress) (AppendPipe
|
|||||||
}
|
}
|
||||||
|
|
||||||
// AppendEntries implements the Transport interface.
|
// AppendEntries implements the Transport interface.
|
||||||
func (i *InmemTransport) AppendEntries(target ServerAddress, args *AppendEntriesRequest, resp *AppendEntriesResponse) error {
|
func (i *InmemTransport) AppendEntries(id ServerID, target ServerAddress, args *AppendEntriesRequest, resp *AppendEntriesResponse) error {
|
||||||
rpcResp, err := i.makeRPC(target, args, nil, i.timeout)
|
rpcResp, err := i.makeRPC(target, args, nil, i.timeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -103,7 +103,7 @@ func (i *InmemTransport) AppendEntries(target ServerAddress, args *AppendEntries
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RequestVote implements the Transport interface.
|
// RequestVote implements the Transport interface.
|
||||||
func (i *InmemTransport) RequestVote(target ServerAddress, args *RequestVoteRequest, resp *RequestVoteResponse) error {
|
func (i *InmemTransport) RequestVote(id ServerID, target ServerAddress, args *RequestVoteRequest, resp *RequestVoteResponse) error {
|
||||||
rpcResp, err := i.makeRPC(target, args, nil, i.timeout)
|
rpcResp, err := i.makeRPC(target, args, nil, i.timeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -116,7 +116,7 @@ func (i *InmemTransport) RequestVote(target ServerAddress, args *RequestVoteRequ
|
|||||||
}
|
}
|
||||||
|
|
||||||
// InstallSnapshot implements the Transport interface.
|
// InstallSnapshot implements the Transport interface.
|
||||||
func (i *InmemTransport) InstallSnapshot(target ServerAddress, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error {
|
func (i *InmemTransport) InstallSnapshot(id ServerID, target ServerAddress, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error {
|
||||||
rpcResp, err := i.makeRPC(target, args, data, 10*i.timeout)
|
rpcResp, err := i.makeRPC(target, args, data, 10*i.timeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -159,7 +159,7 @@ func (i *InmemTransport) makeRPC(target ServerAddress, args interface{}, r io.Re
|
|||||||
}
|
}
|
||||||
|
|
||||||
// EncodePeer implements the Transport interface.
|
// EncodePeer implements the Transport interface.
|
||||||
func (i *InmemTransport) EncodePeer(p ServerAddress) []byte {
|
func (i *InmemTransport) EncodePeer(id ServerID, p ServerAddress) []byte {
|
||||||
return []byte(p)
|
return []byte(p)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
112
vendor/github.com/hashicorp/raft/net_transport.go
generated
vendored
112
vendor/github.com/hashicorp/raft/net_transport.go
generated
vendored
@ -68,6 +68,8 @@ type NetworkTransport struct {
|
|||||||
|
|
||||||
maxPool int
|
maxPool int
|
||||||
|
|
||||||
|
serverAddressProvider ServerAddressProvider
|
||||||
|
|
||||||
shutdown bool
|
shutdown bool
|
||||||
shutdownCh chan struct{}
|
shutdownCh chan struct{}
|
||||||
shutdownLock sync.Mutex
|
shutdownLock sync.Mutex
|
||||||
@ -78,6 +80,28 @@ type NetworkTransport struct {
|
|||||||
TimeoutScale int
|
TimeoutScale int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NetworkTransportConfig encapsulates configuration for the network transport layer.
|
||||||
|
type NetworkTransportConfig struct {
|
||||||
|
// ServerAddressProvider is used to override the target address when establishing a connection to invoke an RPC
|
||||||
|
ServerAddressProvider ServerAddressProvider
|
||||||
|
|
||||||
|
Logger *log.Logger
|
||||||
|
|
||||||
|
// Dialer
|
||||||
|
Stream StreamLayer
|
||||||
|
|
||||||
|
// MaxPool controls how many connections we will pool
|
||||||
|
MaxPool int
|
||||||
|
|
||||||
|
// Timeout is used to apply I/O deadlines. For InstallSnapshot, we multiply
|
||||||
|
// the timeout by (SnapshotSize / TimeoutScale).
|
||||||
|
Timeout time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
type ServerAddressProvider interface {
|
||||||
|
ServerAddr(id ServerID) (ServerAddress, error)
|
||||||
|
}
|
||||||
|
|
||||||
// StreamLayer is used with the NetworkTransport to provide
|
// StreamLayer is used with the NetworkTransport to provide
|
||||||
// the low level stream abstraction.
|
// the low level stream abstraction.
|
||||||
type StreamLayer interface {
|
type StreamLayer interface {
|
||||||
@ -112,6 +136,28 @@ type netPipeline struct {
|
|||||||
shutdownLock sync.Mutex
|
shutdownLock sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewNetworkTransportWithConfig creates a new network transport with the given config struct
|
||||||
|
func NewNetworkTransportWithConfig(
|
||||||
|
config *NetworkTransportConfig,
|
||||||
|
) *NetworkTransport {
|
||||||
|
if config.Logger == nil {
|
||||||
|
config.Logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||||
|
}
|
||||||
|
trans := &NetworkTransport{
|
||||||
|
connPool: make(map[ServerAddress][]*netConn),
|
||||||
|
consumeCh: make(chan RPC),
|
||||||
|
logger: config.Logger,
|
||||||
|
maxPool: config.MaxPool,
|
||||||
|
shutdownCh: make(chan struct{}),
|
||||||
|
stream: config.Stream,
|
||||||
|
timeout: config.Timeout,
|
||||||
|
TimeoutScale: DefaultTimeoutScale,
|
||||||
|
serverAddressProvider: config.ServerAddressProvider,
|
||||||
|
}
|
||||||
|
go trans.listen()
|
||||||
|
return trans
|
||||||
|
}
|
||||||
|
|
||||||
// NewNetworkTransport creates a new network transport with the given dialer
|
// NewNetworkTransport creates a new network transport with the given dialer
|
||||||
// and listener. The maxPool controls how many connections we will pool. The
|
// and listener. The maxPool controls how many connections we will pool. The
|
||||||
// timeout is used to apply I/O deadlines. For InstallSnapshot, we multiply
|
// timeout is used to apply I/O deadlines. For InstallSnapshot, we multiply
|
||||||
@ -125,10 +171,12 @@ func NewNetworkTransport(
|
|||||||
if logOutput == nil {
|
if logOutput == nil {
|
||||||
logOutput = os.Stderr
|
logOutput = os.Stderr
|
||||||
}
|
}
|
||||||
return NewNetworkTransportWithLogger(stream, maxPool, timeout, log.New(logOutput, "", log.LstdFlags))
|
logger := log.New(logOutput, "", log.LstdFlags)
|
||||||
|
config := &NetworkTransportConfig{Stream: stream, MaxPool: maxPool, Timeout: timeout, Logger: logger}
|
||||||
|
return NewNetworkTransportWithConfig(config)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewNetworkTransportWithLogger creates a new network transport with the given dialer
|
// NewNetworkTransportWithLogger creates a new network transport with the given logger, dialer
|
||||||
// and listener. The maxPool controls how many connections we will pool. The
|
// and listener. The maxPool controls how many connections we will pool. The
|
||||||
// timeout is used to apply I/O deadlines. For InstallSnapshot, we multiply
|
// timeout is used to apply I/O deadlines. For InstallSnapshot, we multiply
|
||||||
// the timeout by (SnapshotSize / TimeoutScale).
|
// the timeout by (SnapshotSize / TimeoutScale).
|
||||||
@ -138,21 +186,8 @@ func NewNetworkTransportWithLogger(
|
|||||||
timeout time.Duration,
|
timeout time.Duration,
|
||||||
logger *log.Logger,
|
logger *log.Logger,
|
||||||
) *NetworkTransport {
|
) *NetworkTransport {
|
||||||
if logger == nil {
|
config := &NetworkTransportConfig{Stream: stream, MaxPool: maxPool, Timeout: timeout, Logger: logger}
|
||||||
logger = log.New(os.Stderr, "", log.LstdFlags)
|
return NewNetworkTransportWithConfig(config)
|
||||||
}
|
|
||||||
trans := &NetworkTransport{
|
|
||||||
connPool: make(map[ServerAddress][]*netConn),
|
|
||||||
consumeCh: make(chan RPC),
|
|
||||||
logger: logger,
|
|
||||||
maxPool: maxPool,
|
|
||||||
shutdownCh: make(chan struct{}),
|
|
||||||
stream: stream,
|
|
||||||
timeout: timeout,
|
|
||||||
TimeoutScale: DefaultTimeoutScale,
|
|
||||||
}
|
|
||||||
go trans.listen()
|
|
||||||
return trans
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetHeartbeatHandler is used to setup a heartbeat handler
|
// SetHeartbeatHandler is used to setup a heartbeat handler
|
||||||
@ -214,6 +249,24 @@ func (n *NetworkTransport) getPooledConn(target ServerAddress) *netConn {
|
|||||||
return conn
|
return conn
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getConnFromAddressProvider returns a connection from the server address provider if available, or defaults to a connection using the target server address
|
||||||
|
func (n *NetworkTransport) getConnFromAddressProvider(id ServerID, target ServerAddress) (*netConn, error) {
|
||||||
|
address := n.getProviderAddressOrFallback(id, target)
|
||||||
|
return n.getConn(address)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *NetworkTransport) getProviderAddressOrFallback(id ServerID, target ServerAddress) ServerAddress {
|
||||||
|
if n.serverAddressProvider != nil {
|
||||||
|
serverAddressOverride, err := n.serverAddressProvider.ServerAddr(id)
|
||||||
|
if err != nil {
|
||||||
|
n.logger.Printf("[WARN] Unable to get address for server id %v, using fallback address %v: %v", id, target, err)
|
||||||
|
} else {
|
||||||
|
return serverAddressOverride
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return target
|
||||||
|
}
|
||||||
|
|
||||||
// getConn is used to get a connection from the pool.
|
// getConn is used to get a connection from the pool.
|
||||||
func (n *NetworkTransport) getConn(target ServerAddress) (*netConn, error) {
|
func (n *NetworkTransport) getConn(target ServerAddress) (*netConn, error) {
|
||||||
// Check for a pooled conn
|
// Check for a pooled conn
|
||||||
@ -260,9 +313,9 @@ func (n *NetworkTransport) returnConn(conn *netConn) {
|
|||||||
|
|
||||||
// AppendEntriesPipeline returns an interface that can be used to pipeline
|
// AppendEntriesPipeline returns an interface that can be used to pipeline
|
||||||
// AppendEntries requests.
|
// AppendEntries requests.
|
||||||
func (n *NetworkTransport) AppendEntriesPipeline(target ServerAddress) (AppendPipeline, error) {
|
func (n *NetworkTransport) AppendEntriesPipeline(id ServerID, target ServerAddress) (AppendPipeline, error) {
|
||||||
// Get a connection
|
// Get a connection
|
||||||
conn, err := n.getConn(target)
|
conn, err := n.getConnFromAddressProvider(id, target)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -272,19 +325,19 @@ func (n *NetworkTransport) AppendEntriesPipeline(target ServerAddress) (AppendPi
|
|||||||
}
|
}
|
||||||
|
|
||||||
// AppendEntries implements the Transport interface.
|
// AppendEntries implements the Transport interface.
|
||||||
func (n *NetworkTransport) AppendEntries(target ServerAddress, args *AppendEntriesRequest, resp *AppendEntriesResponse) error {
|
func (n *NetworkTransport) AppendEntries(id ServerID, target ServerAddress, args *AppendEntriesRequest, resp *AppendEntriesResponse) error {
|
||||||
return n.genericRPC(target, rpcAppendEntries, args, resp)
|
return n.genericRPC(id, target, rpcAppendEntries, args, resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestVote implements the Transport interface.
|
// RequestVote implements the Transport interface.
|
||||||
func (n *NetworkTransport) RequestVote(target ServerAddress, args *RequestVoteRequest, resp *RequestVoteResponse) error {
|
func (n *NetworkTransport) RequestVote(id ServerID, target ServerAddress, args *RequestVoteRequest, resp *RequestVoteResponse) error {
|
||||||
return n.genericRPC(target, rpcRequestVote, args, resp)
|
return n.genericRPC(id, target, rpcRequestVote, args, resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
// genericRPC handles a simple request/response RPC.
|
// genericRPC handles a simple request/response RPC.
|
||||||
func (n *NetworkTransport) genericRPC(target ServerAddress, rpcType uint8, args interface{}, resp interface{}) error {
|
func (n *NetworkTransport) genericRPC(id ServerID, target ServerAddress, rpcType uint8, args interface{}, resp interface{}) error {
|
||||||
// Get a conn
|
// Get a conn
|
||||||
conn, err := n.getConn(target)
|
conn, err := n.getConnFromAddressProvider(id, target)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -308,9 +361,9 @@ func (n *NetworkTransport) genericRPC(target ServerAddress, rpcType uint8, args
|
|||||||
}
|
}
|
||||||
|
|
||||||
// InstallSnapshot implements the Transport interface.
|
// InstallSnapshot implements the Transport interface.
|
||||||
func (n *NetworkTransport) InstallSnapshot(target ServerAddress, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error {
|
func (n *NetworkTransport) InstallSnapshot(id ServerID, target ServerAddress, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error {
|
||||||
// Get a conn, always close for InstallSnapshot
|
// Get a conn, always close for InstallSnapshot
|
||||||
conn, err := n.getConn(target)
|
conn, err := n.getConnFromAddressProvider(id, target)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -346,8 +399,9 @@ func (n *NetworkTransport) InstallSnapshot(target ServerAddress, args *InstallSn
|
|||||||
}
|
}
|
||||||
|
|
||||||
// EncodePeer implements the Transport interface.
|
// EncodePeer implements the Transport interface.
|
||||||
func (n *NetworkTransport) EncodePeer(p ServerAddress) []byte {
|
func (n *NetworkTransport) EncodePeer(id ServerID, p ServerAddress) []byte {
|
||||||
return []byte(p)
|
address := n.getProviderAddressOrFallback(id, p)
|
||||||
|
return []byte(address)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DecodePeer implements the Transport interface.
|
// DecodePeer implements the Transport interface.
|
||||||
|
4
vendor/github.com/hashicorp/raft/raft.go
generated
vendored
4
vendor/github.com/hashicorp/raft/raft.go
generated
vendored
@ -1379,7 +1379,7 @@ func (r *Raft) electSelf() <-chan *voteResult {
|
|||||||
req := &RequestVoteRequest{
|
req := &RequestVoteRequest{
|
||||||
RPCHeader: r.getRPCHeader(),
|
RPCHeader: r.getRPCHeader(),
|
||||||
Term: r.getCurrentTerm(),
|
Term: r.getCurrentTerm(),
|
||||||
Candidate: r.trans.EncodePeer(r.localAddr),
|
Candidate: r.trans.EncodePeer(r.localID, r.localAddr),
|
||||||
LastLogIndex: lastIdx,
|
LastLogIndex: lastIdx,
|
||||||
LastLogTerm: lastTerm,
|
LastLogTerm: lastTerm,
|
||||||
}
|
}
|
||||||
@ -1389,7 +1389,7 @@ func (r *Raft) electSelf() <-chan *voteResult {
|
|||||||
r.goFunc(func() {
|
r.goFunc(func() {
|
||||||
defer metrics.MeasureSince([]string{"raft", "candidate", "electSelf"}, time.Now())
|
defer metrics.MeasureSince([]string{"raft", "candidate", "electSelf"}, time.Now())
|
||||||
resp := &voteResult{voterID: peer.ID}
|
resp := &voteResult{voterID: peer.ID}
|
||||||
err := r.trans.RequestVote(peer.Address, req, &resp.RequestVoteResponse)
|
err := r.trans.RequestVote(peer.ID, peer.Address, req, &resp.RequestVoteResponse)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.logger.Printf("[ERR] raft: Failed to make RequestVote RPC to %v: %v", peer, err)
|
r.logger.Printf("[ERR] raft: Failed to make RequestVote RPC to %v: %v", peer, err)
|
||||||
resp.Term = req.Term
|
resp.Term = req.Term
|
||||||
|
16
vendor/github.com/hashicorp/raft/replication.go
generated
vendored
16
vendor/github.com/hashicorp/raft/replication.go
generated
vendored
@ -157,7 +157,7 @@ PIPELINE:
|
|||||||
goto RPC
|
goto RPC
|
||||||
}
|
}
|
||||||
|
|
||||||
// replicateTo is a hepler to replicate(), used to replicate the logs up to a
|
// replicateTo is a helper to replicate(), used to replicate the logs up to a
|
||||||
// given last index.
|
// given last index.
|
||||||
// If the follower log is behind, we take care to bring them up to date.
|
// If the follower log is behind, we take care to bring them up to date.
|
||||||
func (r *Raft) replicateTo(s *followerReplication, lastIndex uint64) (shouldStop bool) {
|
func (r *Raft) replicateTo(s *followerReplication, lastIndex uint64) (shouldStop bool) {
|
||||||
@ -183,7 +183,7 @@ START:
|
|||||||
|
|
||||||
// Make the RPC call
|
// Make the RPC call
|
||||||
start = time.Now()
|
start = time.Now()
|
||||||
if err := r.trans.AppendEntries(s.peer.Address, &req, &resp); err != nil {
|
if err := r.trans.AppendEntries(s.peer.ID, s.peer.Address, &req, &resp); err != nil {
|
||||||
r.logger.Printf("[ERR] raft: Failed to AppendEntries to %v: %v", s.peer, err)
|
r.logger.Printf("[ERR] raft: Failed to AppendEntries to %v: %v", s.peer, err)
|
||||||
s.failures++
|
s.failures++
|
||||||
return
|
return
|
||||||
@ -278,7 +278,7 @@ func (r *Raft) sendLatestSnapshot(s *followerReplication) (bool, error) {
|
|||||||
RPCHeader: r.getRPCHeader(),
|
RPCHeader: r.getRPCHeader(),
|
||||||
SnapshotVersion: meta.Version,
|
SnapshotVersion: meta.Version,
|
||||||
Term: s.currentTerm,
|
Term: s.currentTerm,
|
||||||
Leader: r.trans.EncodePeer(r.localAddr),
|
Leader: r.trans.EncodePeer(r.localID, r.localAddr),
|
||||||
LastLogIndex: meta.Index,
|
LastLogIndex: meta.Index,
|
||||||
LastLogTerm: meta.Term,
|
LastLogTerm: meta.Term,
|
||||||
Peers: meta.Peers,
|
Peers: meta.Peers,
|
||||||
@ -290,7 +290,7 @@ func (r *Raft) sendLatestSnapshot(s *followerReplication) (bool, error) {
|
|||||||
// Make the call
|
// Make the call
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
var resp InstallSnapshotResponse
|
var resp InstallSnapshotResponse
|
||||||
if err := r.trans.InstallSnapshot(s.peer.Address, &req, &resp, snapshot); err != nil {
|
if err := r.trans.InstallSnapshot(s.peer.ID, s.peer.Address, &req, &resp, snapshot); err != nil {
|
||||||
r.logger.Printf("[ERR] raft: Failed to install snapshot %v: %v", snapID, err)
|
r.logger.Printf("[ERR] raft: Failed to install snapshot %v: %v", snapID, err)
|
||||||
s.failures++
|
s.failures++
|
||||||
return false, err
|
return false, err
|
||||||
@ -332,7 +332,7 @@ func (r *Raft) heartbeat(s *followerReplication, stopCh chan struct{}) {
|
|||||||
req := AppendEntriesRequest{
|
req := AppendEntriesRequest{
|
||||||
RPCHeader: r.getRPCHeader(),
|
RPCHeader: r.getRPCHeader(),
|
||||||
Term: s.currentTerm,
|
Term: s.currentTerm,
|
||||||
Leader: r.trans.EncodePeer(r.localAddr),
|
Leader: r.trans.EncodePeer(r.localID, r.localAddr),
|
||||||
}
|
}
|
||||||
var resp AppendEntriesResponse
|
var resp AppendEntriesResponse
|
||||||
for {
|
for {
|
||||||
@ -345,7 +345,7 @@ func (r *Raft) heartbeat(s *followerReplication, stopCh chan struct{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
if err := r.trans.AppendEntries(s.peer.Address, &req, &resp); err != nil {
|
if err := r.trans.AppendEntries(s.peer.ID, s.peer.Address, &req, &resp); err != nil {
|
||||||
r.logger.Printf("[ERR] raft: Failed to heartbeat to %v: %v", s.peer.Address, err)
|
r.logger.Printf("[ERR] raft: Failed to heartbeat to %v: %v", s.peer.Address, err)
|
||||||
failures++
|
failures++
|
||||||
select {
|
select {
|
||||||
@ -367,7 +367,7 @@ func (r *Raft) heartbeat(s *followerReplication, stopCh chan struct{}) {
|
|||||||
// back to the standard replication which can handle more complex situations.
|
// back to the standard replication which can handle more complex situations.
|
||||||
func (r *Raft) pipelineReplicate(s *followerReplication) error {
|
func (r *Raft) pipelineReplicate(s *followerReplication) error {
|
||||||
// Create a new pipeline
|
// Create a new pipeline
|
||||||
pipeline, err := r.trans.AppendEntriesPipeline(s.peer.Address)
|
pipeline, err := r.trans.AppendEntriesPipeline(s.peer.ID, s.peer.Address)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -476,7 +476,7 @@ func (r *Raft) pipelineDecode(s *followerReplication, p AppendPipeline, stopCh,
|
|||||||
func (r *Raft) setupAppendEntries(s *followerReplication, req *AppendEntriesRequest, nextIndex, lastIndex uint64) error {
|
func (r *Raft) setupAppendEntries(s *followerReplication, req *AppendEntriesRequest, nextIndex, lastIndex uint64) error {
|
||||||
req.RPCHeader = r.getRPCHeader()
|
req.RPCHeader = r.getRPCHeader()
|
||||||
req.Term = s.currentTerm
|
req.Term = s.currentTerm
|
||||||
req.Leader = r.trans.EncodePeer(r.localAddr)
|
req.Leader = r.trans.EncodePeer(r.localID, r.localAddr)
|
||||||
req.LeaderCommitIndex = r.getCommitIndex()
|
req.LeaderCommitIndex = r.getCommitIndex()
|
||||||
if err := r.setPreviousLog(req, nextIndex); err != nil {
|
if err := r.setPreviousLog(req, nextIndex); err != nil {
|
||||||
return err
|
return err
|
||||||
|
19
vendor/github.com/hashicorp/raft/tcp_transport.go
generated
vendored
19
vendor/github.com/hashicorp/raft/tcp_transport.go
generated
vendored
@ -28,7 +28,7 @@ func NewTCPTransport(
|
|||||||
timeout time.Duration,
|
timeout time.Duration,
|
||||||
logOutput io.Writer,
|
logOutput io.Writer,
|
||||||
) (*NetworkTransport, error) {
|
) (*NetworkTransport, error) {
|
||||||
return newTCPTransport(bindAddr, advertise, maxPool, timeout, func(stream StreamLayer) *NetworkTransport {
|
return newTCPTransport(bindAddr, advertise, func(stream StreamLayer) *NetworkTransport {
|
||||||
return NewNetworkTransport(stream, maxPool, timeout, logOutput)
|
return NewNetworkTransport(stream, maxPool, timeout, logOutput)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -42,15 +42,26 @@ func NewTCPTransportWithLogger(
|
|||||||
timeout time.Duration,
|
timeout time.Duration,
|
||||||
logger *log.Logger,
|
logger *log.Logger,
|
||||||
) (*NetworkTransport, error) {
|
) (*NetworkTransport, error) {
|
||||||
return newTCPTransport(bindAddr, advertise, maxPool, timeout, func(stream StreamLayer) *NetworkTransport {
|
return newTCPTransport(bindAddr, advertise, func(stream StreamLayer) *NetworkTransport {
|
||||||
return NewNetworkTransportWithLogger(stream, maxPool, timeout, logger)
|
return NewNetworkTransportWithLogger(stream, maxPool, timeout, logger)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewTCPTransportWithLogger returns a NetworkTransport that is built on top of
|
||||||
|
// a TCP streaming transport layer, using a default logger and the address provider
|
||||||
|
func NewTCPTransportWithConfig(
|
||||||
|
bindAddr string,
|
||||||
|
advertise net.Addr,
|
||||||
|
config *NetworkTransportConfig,
|
||||||
|
) (*NetworkTransport, error) {
|
||||||
|
return newTCPTransport(bindAddr, advertise, func(stream StreamLayer) *NetworkTransport {
|
||||||
|
config.Stream = stream
|
||||||
|
return NewNetworkTransportWithConfig(config)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func newTCPTransport(bindAddr string,
|
func newTCPTransport(bindAddr string,
|
||||||
advertise net.Addr,
|
advertise net.Addr,
|
||||||
maxPool int,
|
|
||||||
timeout time.Duration,
|
|
||||||
transportCreator func(stream StreamLayer) *NetworkTransport) (*NetworkTransport, error) {
|
transportCreator func(stream StreamLayer) *NetworkTransport) (*NetworkTransport, error) {
|
||||||
// Try to bind
|
// Try to bind
|
||||||
list, err := net.Listen("tcp", bindAddr)
|
list, err := net.Listen("tcp", bindAddr)
|
||||||
|
10
vendor/github.com/hashicorp/raft/transport.go
generated
vendored
10
vendor/github.com/hashicorp/raft/transport.go
generated
vendored
@ -35,20 +35,20 @@ type Transport interface {
|
|||||||
|
|
||||||
// AppendEntriesPipeline returns an interface that can be used to pipeline
|
// AppendEntriesPipeline returns an interface that can be used to pipeline
|
||||||
// AppendEntries requests.
|
// AppendEntries requests.
|
||||||
AppendEntriesPipeline(target ServerAddress) (AppendPipeline, error)
|
AppendEntriesPipeline(id ServerID, target ServerAddress) (AppendPipeline, error)
|
||||||
|
|
||||||
// AppendEntries sends the appropriate RPC to the target node.
|
// AppendEntries sends the appropriate RPC to the target node.
|
||||||
AppendEntries(target ServerAddress, args *AppendEntriesRequest, resp *AppendEntriesResponse) error
|
AppendEntries(id ServerID, target ServerAddress, args *AppendEntriesRequest, resp *AppendEntriesResponse) error
|
||||||
|
|
||||||
// RequestVote sends the appropriate RPC to the target node.
|
// RequestVote sends the appropriate RPC to the target node.
|
||||||
RequestVote(target ServerAddress, args *RequestVoteRequest, resp *RequestVoteResponse) error
|
RequestVote(id ServerID, target ServerAddress, args *RequestVoteRequest, resp *RequestVoteResponse) error
|
||||||
|
|
||||||
// InstallSnapshot is used to push a snapshot down to a follower. The data is read from
|
// InstallSnapshot is used to push a snapshot down to a follower. The data is read from
|
||||||
// the ReadCloser and streamed to the client.
|
// the ReadCloser and streamed to the client.
|
||||||
InstallSnapshot(target ServerAddress, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error
|
InstallSnapshot(id ServerID, target ServerAddress, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error
|
||||||
|
|
||||||
// EncodePeer is used to serialize a peer's address.
|
// EncodePeer is used to serialize a peer's address.
|
||||||
EncodePeer(ServerAddress) []byte
|
EncodePeer(id ServerID, addr ServerAddress) []byte
|
||||||
|
|
||||||
// DecodePeer is used to deserialize a peer's address.
|
// DecodePeer is used to deserialize a peer's address.
|
||||||
DecodePeer([]byte) ServerAddress
|
DecodePeer([]byte) ServerAddress
|
||||||
|
2
vendor/vendor.json
vendored
2
vendor/vendor.json
vendored
@ -57,7 +57,7 @@
|
|||||||
{"checksumSHA1":"vt+P9D2yWDO3gdvdgCzwqunlhxU=","path":"github.com/hashicorp/logutils","revision":"0dc08b1671f34c4250ce212759ebd880f743d883","revisionTime":"2015-06-09T07:04:31Z"},
|
{"checksumSHA1":"vt+P9D2yWDO3gdvdgCzwqunlhxU=","path":"github.com/hashicorp/logutils","revision":"0dc08b1671f34c4250ce212759ebd880f743d883","revisionTime":"2015-06-09T07:04:31Z"},
|
||||||
{"checksumSHA1":"ml0MTqOsKTrsqv/mZhy78Vz4SfA=","path":"github.com/hashicorp/memberlist","revision":"d6c1fb0b99c33d0a8e22acea9da9709b369b5d39","revisionTime":"2017-08-15T22:46:17Z"},
|
{"checksumSHA1":"ml0MTqOsKTrsqv/mZhy78Vz4SfA=","path":"github.com/hashicorp/memberlist","revision":"d6c1fb0b99c33d0a8e22acea9da9709b369b5d39","revisionTime":"2017-08-15T22:46:17Z"},
|
||||||
{"checksumSHA1":"qnlqWJYV81ENr61SZk9c65R1mDo=","path":"github.com/hashicorp/net-rpc-msgpackrpc","revision":"a14192a58a694c123d8fe5481d4a4727d6ae82f3","revisionTime":"2015-11-16T02:03:38Z"},
|
{"checksumSHA1":"qnlqWJYV81ENr61SZk9c65R1mDo=","path":"github.com/hashicorp/net-rpc-msgpackrpc","revision":"a14192a58a694c123d8fe5481d4a4727d6ae82f3","revisionTime":"2015-11-16T02:03:38Z"},
|
||||||
{"checksumSHA1":"RVDP6/BNLtrGbyoiGU2GjTun9Kk=","path":"github.com/hashicorp/raft","revision":"2356637a1c1ffe894b753680363ad970480215aa","revisionTime":"2017-08-24T21:39:20Z","version":"library-v2-stage-one","versionExact":"library-v2-stage-one"},
|
{"checksumSHA1":"f2QYddVWZ2eWxdlCEhearTH4XOs=","path":"github.com/hashicorp/raft","revision":"c837e57a6077e74a4a3749959fb6cfefc26d7705","revisionTime":"2017-08-30T14:31:53Z","version":"library-v2-stage-one","versionExact":"library-v2-stage-one"},
|
||||||
{"checksumSHA1":"QAxukkv54/iIvLfsUP6IK4R0m/A=","path":"github.com/hashicorp/raft-boltdb","revision":"d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee","revisionTime":"2015-02-01T20:08:39Z"},
|
{"checksumSHA1":"QAxukkv54/iIvLfsUP6IK4R0m/A=","path":"github.com/hashicorp/raft-boltdb","revision":"d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee","revisionTime":"2015-02-01T20:08:39Z"},
|
||||||
{"checksumSHA1":"/oss17GO4hXGM7QnUdI3VzcAHzA=","comment":"v0.7.0-66-g6c4672d","path":"github.com/hashicorp/serf/coordinate","revision":"c2e4be24cdc9031eb0ad869c5d160775efdf7d7a","revisionTime":"2017-05-25T23:15:04Z"},
|
{"checksumSHA1":"/oss17GO4hXGM7QnUdI3VzcAHzA=","comment":"v0.7.0-66-g6c4672d","path":"github.com/hashicorp/serf/coordinate","revision":"c2e4be24cdc9031eb0ad869c5d160775efdf7d7a","revisionTime":"2017-05-25T23:15:04Z"},
|
||||||
{"checksumSHA1":"3WPnGSL9ZK6EmkAE6tEW5SCxrd8=","comment":"v0.7.0-66-g6c4672d","path":"github.com/hashicorp/serf/serf","revision":"b84a66cc5575994cb672940d244a2404141688c0","revisionTime":"2017-08-17T21:22:02Z"},
|
{"checksumSHA1":"3WPnGSL9ZK6EmkAE6tEW5SCxrd8=","comment":"v0.7.0-66-g6c4672d","path":"github.com/hashicorp/serf/serf","revision":"b84a66cc5575994cb672940d244a2404141688c0","revisionTime":"2017-08-17T21:22:02Z"},
|
||||||
|
Loading…
x
Reference in New Issue
Block a user