mirror of
https://github.com/status-im/consul.git
synced 2025-03-02 22:30:43 +00:00
Move RPC router from Client/Server and into BaseDeps (#8559)
This will allow it to be a shared component which is needed for AutoConfig
This commit is contained in:
parent
3d1e043966
commit
fafc6cf7ff
@ -18,6 +18,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/dns"
|
"github.com/hashicorp/consul/agent/dns"
|
||||||
|
"github.com/hashicorp/consul/agent/router"
|
||||||
"github.com/hashicorp/go-connlimit"
|
"github.com/hashicorp/go-connlimit"
|
||||||
"github.com/hashicorp/go-hclog"
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/go-memdb"
|
"github.com/hashicorp/go-memdb"
|
||||||
@ -307,6 +308,9 @@ type Agent struct {
|
|||||||
// Connection Pool
|
// Connection Pool
|
||||||
connPool *pool.ConnPool
|
connPool *pool.ConnPool
|
||||||
|
|
||||||
|
// Shared RPC Router
|
||||||
|
router *router.Router
|
||||||
|
|
||||||
// enterpriseAgent embeds fields that we only access in consul-enterprise builds
|
// enterpriseAgent embeds fields that we only access in consul-enterprise builds
|
||||||
enterpriseAgent
|
enterpriseAgent
|
||||||
}
|
}
|
||||||
@ -352,6 +356,7 @@ func New(bd BaseDeps) (*Agent, error) {
|
|||||||
MemSink: bd.MetricsHandler,
|
MemSink: bd.MetricsHandler,
|
||||||
connPool: bd.ConnPool,
|
connPool: bd.ConnPool,
|
||||||
autoConf: bd.AutoConfig,
|
autoConf: bd.AutoConfig,
|
||||||
|
router: bd.Router,
|
||||||
}
|
}
|
||||||
|
|
||||||
a.serviceManager = NewServiceManager(&a)
|
a.serviceManager = NewServiceManager(&a)
|
||||||
@ -463,6 +468,7 @@ func (a *Agent) Start(ctx context.Context) error {
|
|||||||
consul.WithTokenStore(a.tokens),
|
consul.WithTokenStore(a.tokens),
|
||||||
consul.WithTLSConfigurator(a.tlsConfigurator),
|
consul.WithTLSConfigurator(a.tlsConfigurator),
|
||||||
consul.WithConnectionPool(a.connPool),
|
consul.WithConnectionPool(a.connPool),
|
||||||
|
consul.WithRouter(a.router),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Setup either the client or the server.
|
// Setup either the client or the server.
|
||||||
|
@ -4752,3 +4752,33 @@ func TestAgent_AutoEncrypt(t *testing.T) {
|
|||||||
require.Len(t, x509Cert.URIs, 1)
|
require.Len(t, x509Cert.URIs, 1)
|
||||||
require.Equal(t, id.URI(), x509Cert.URIs[0])
|
require.Equal(t, id.URI(), x509Cert.URIs[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSharedRPCRouter(t *testing.T) {
|
||||||
|
// this test runs both a server and client and ensures that the shared
|
||||||
|
// router is being used. It would be possible for the Client and Server
|
||||||
|
// types to create and use their own routers and for RPCs such as the
|
||||||
|
// ones used in WaitForTestAgent to succeed. However accessing the
|
||||||
|
// router stored on the agent ensures that Serf information from the
|
||||||
|
// Client/Server types are being set in the same shared rpc router.
|
||||||
|
|
||||||
|
srv := NewTestAgent(t, "")
|
||||||
|
defer srv.Shutdown()
|
||||||
|
|
||||||
|
testrpc.WaitForTestAgent(t, srv.RPC, "dc1")
|
||||||
|
|
||||||
|
mgr, server := srv.Agent.router.FindLANRoute()
|
||||||
|
require.NotNil(t, mgr)
|
||||||
|
require.NotNil(t, server)
|
||||||
|
|
||||||
|
client := NewTestAgent(t, `
|
||||||
|
server = false
|
||||||
|
bootstrap = false
|
||||||
|
retry_join = ["`+srv.Config.SerfBindAddrLAN.String()+`"]
|
||||||
|
`)
|
||||||
|
|
||||||
|
testrpc.WaitForTestAgent(t, client.RPC, "dc1")
|
||||||
|
|
||||||
|
mgr, server = client.Agent.router.FindLANRoute()
|
||||||
|
require.NotNil(t, mgr)
|
||||||
|
require.NotNil(t, server)
|
||||||
|
}
|
||||||
|
@ -73,7 +73,7 @@ func (c *Client) RequestAutoEncryptCerts(ctx context.Context, servers []string,
|
|||||||
// Check if we know about a server already through gossip. Depending on
|
// Check if we know about a server already through gossip. Depending on
|
||||||
// how the agent joined, there might already be one. Also in case this
|
// how the agent joined, there might already be one. Also in case this
|
||||||
// gets called because the cert expired.
|
// gets called because the cert expired.
|
||||||
server := c.routers.FindServer()
|
server := c.router.FindLANServer()
|
||||||
if server != nil {
|
if server != nil {
|
||||||
servers = []string{server.Addr.String()}
|
servers = []string{server.Addr.String()}
|
||||||
}
|
}
|
||||||
|
@ -15,6 +15,7 @@ import (
|
|||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
"github.com/hashicorp/consul/logging"
|
"github.com/hashicorp/consul/logging"
|
||||||
"github.com/hashicorp/consul/tlsutil"
|
"github.com/hashicorp/consul/tlsutil"
|
||||||
|
"github.com/hashicorp/consul/types"
|
||||||
"github.com/hashicorp/go-hclog"
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/serf/serf"
|
"github.com/hashicorp/serf/serf"
|
||||||
"golang.org/x/time/rate"
|
"golang.org/x/time/rate"
|
||||||
@ -59,9 +60,9 @@ type Client struct {
|
|||||||
// Connection pool to consul servers
|
// Connection pool to consul servers
|
||||||
connPool *pool.ConnPool
|
connPool *pool.ConnPool
|
||||||
|
|
||||||
// routers is responsible for the selection and maintenance of
|
// router is responsible for the selection and maintenance of
|
||||||
// Consul servers this agent uses for RPC requests
|
// Consul servers this agent uses for RPC requests
|
||||||
routers *router.Manager
|
router *router.Router
|
||||||
|
|
||||||
// rpcLimiter is used to rate limit the total number of RPCs initiated
|
// rpcLimiter is used to rate limit the total number of RPCs initiated
|
||||||
// from an agent.
|
// from an agent.
|
||||||
@ -120,12 +121,14 @@ func NewClient(config *Config, options ...ConsulOption) (*Client, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger := flat.logger.NamedIntercept(logging.ConsulClient)
|
||||||
|
|
||||||
// Create client
|
// Create client
|
||||||
c := &Client{
|
c := &Client{
|
||||||
config: config,
|
config: config,
|
||||||
connPool: connPool,
|
connPool: connPool,
|
||||||
eventCh: make(chan serf.Event, serfEventBacklog),
|
eventCh: make(chan serf.Event, serfEventBacklog),
|
||||||
logger: flat.logger.NamedIntercept(logging.ConsulClient),
|
logger: logger,
|
||||||
shutdownCh: make(chan struct{}),
|
shutdownCh: make(chan struct{}),
|
||||||
tlsConfigurator: tlsConfigurator,
|
tlsConfigurator: tlsConfigurator,
|
||||||
}
|
}
|
||||||
@ -160,15 +163,22 @@ func NewClient(config *Config, options ...ConsulOption) (*Client, error) {
|
|||||||
return nil, fmt.Errorf("Failed to start lan serf: %v", err)
|
return nil, fmt.Errorf("Failed to start lan serf: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start maintenance task for servers
|
rpcRouter := flat.router
|
||||||
c.routers = router.New(c.logger, c.shutdownCh, c.serf, c.connPool, "")
|
if rpcRouter == nil {
|
||||||
go c.routers.Start()
|
rpcRouter = router.NewRouter(logger, config.Datacenter, fmt.Sprintf("%s.%s", config.NodeName, config.Datacenter))
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := rpcRouter.AddArea(types.AreaLAN, c.serf, c.connPool); err != nil {
|
||||||
|
c.Shutdown()
|
||||||
|
return nil, fmt.Errorf("Failed to add LAN area to the RPC router: %w", err)
|
||||||
|
}
|
||||||
|
c.router = rpcRouter
|
||||||
|
|
||||||
// Start LAN event handlers after the router is complete since the event
|
// Start LAN event handlers after the router is complete since the event
|
||||||
// handlers depend on the router and the router depends on Serf.
|
// handlers depend on the router and the router depends on Serf.
|
||||||
go c.lanEventHandler()
|
go c.lanEventHandler()
|
||||||
|
|
||||||
// This needs to happen after initializing c.routers to prevent a race
|
// This needs to happen after initializing c.router to prevent a race
|
||||||
// condition where the router manager is used when the pointer is nil
|
// condition where the router manager is used when the pointer is nil
|
||||||
if c.acls.ACLsEnabled() {
|
if c.acls.ACLsEnabled() {
|
||||||
go c.monitorACLMode()
|
go c.monitorACLMode()
|
||||||
@ -276,7 +286,7 @@ func (c *Client) RPC(method string, args interface{}, reply interface{}) error {
|
|||||||
firstCheck := time.Now()
|
firstCheck := time.Now()
|
||||||
|
|
||||||
TRY:
|
TRY:
|
||||||
server := c.routers.FindServer()
|
manager, server := c.router.FindLANRoute()
|
||||||
if server == nil {
|
if server == nil {
|
||||||
return structs.ErrNoServers
|
return structs.ErrNoServers
|
||||||
}
|
}
|
||||||
@ -301,7 +311,7 @@ TRY:
|
|||||||
"error", rpcErr,
|
"error", rpcErr,
|
||||||
)
|
)
|
||||||
metrics.IncrCounterWithLabels([]string{"client", "rpc", "failed"}, 1, []metrics.Label{{Name: "server", Value: server.Name}})
|
metrics.IncrCounterWithLabels([]string{"client", "rpc", "failed"}, 1, []metrics.Label{{Name: "server", Value: server.Name}})
|
||||||
c.routers.NotifyFailedServer(server)
|
manager.NotifyFailedServer(server)
|
||||||
if retry := canRetry(args, rpcErr); !retry {
|
if retry := canRetry(args, rpcErr); !retry {
|
||||||
return rpcErr
|
return rpcErr
|
||||||
}
|
}
|
||||||
@ -323,7 +333,7 @@ TRY:
|
|||||||
// operation.
|
// operation.
|
||||||
func (c *Client) SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io.Writer,
|
func (c *Client) SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io.Writer,
|
||||||
replyFn structs.SnapshotReplyFn) error {
|
replyFn structs.SnapshotReplyFn) error {
|
||||||
server := c.routers.FindServer()
|
manager, server := c.router.FindLANRoute()
|
||||||
if server == nil {
|
if server == nil {
|
||||||
return structs.ErrNoServers
|
return structs.ErrNoServers
|
||||||
}
|
}
|
||||||
@ -339,6 +349,7 @@ func (c *Client) SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io
|
|||||||
var reply structs.SnapshotResponse
|
var reply structs.SnapshotResponse
|
||||||
snap, err := SnapshotRPC(c.connPool, c.config.Datacenter, server.ShortName, server.Addr, args, in, &reply)
|
snap, err := SnapshotRPC(c.connPool, c.config.Datacenter, server.ShortName, server.Addr, args, in, &reply)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
manager.NotifyFailedServer(server)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -367,7 +378,7 @@ func (c *Client) SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io
|
|||||||
// Stats is used to return statistics for debugging and insight
|
// Stats is used to return statistics for debugging and insight
|
||||||
// for various sub-systems
|
// for various sub-systems
|
||||||
func (c *Client) Stats() map[string]map[string]string {
|
func (c *Client) Stats() map[string]map[string]string {
|
||||||
numServers := c.routers.NumServers()
|
numServers := c.router.GetLANManager().NumServers()
|
||||||
|
|
||||||
toString := func(v uint64) string {
|
toString := func(v uint64) string {
|
||||||
return strconv.FormatUint(v, 10)
|
return strconv.FormatUint(v, 10)
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
"github.com/hashicorp/consul/logging"
|
"github.com/hashicorp/consul/logging"
|
||||||
|
"github.com/hashicorp/consul/types"
|
||||||
"github.com/hashicorp/go-hclog"
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/serf/serf"
|
"github.com/hashicorp/serf/serf"
|
||||||
)
|
)
|
||||||
@ -115,7 +116,7 @@ func (c *Client) nodeJoin(me serf.MemberEvent) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
c.logger.Info("adding server", "server", parts)
|
c.logger.Info("adding server", "server", parts)
|
||||||
c.routers.AddServer(parts)
|
c.router.AddServer(types.AreaLAN, parts)
|
||||||
|
|
||||||
// Trigger the callback
|
// Trigger the callback
|
||||||
if c.config.ServerUp != nil {
|
if c.config.ServerUp != nil {
|
||||||
@ -139,7 +140,7 @@ func (c *Client) nodeUpdate(me serf.MemberEvent) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
c.logger.Info("updating server", "server", parts.String())
|
c.logger.Info("updating server", "server", parts.String())
|
||||||
c.routers.AddServer(parts)
|
c.router.AddServer(types.AreaLAN, parts)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -151,7 +152,7 @@ func (c *Client) nodeFail(me serf.MemberEvent) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
c.logger.Info("removing server", "server", parts.String())
|
c.logger.Info("removing server", "server", parts.String())
|
||||||
c.routers.RemoveServer(parts)
|
c.router.RemoveServer(types.AreaLAN, parts)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -115,7 +115,7 @@ func TestClient_JoinLAN(t *testing.T) {
|
|||||||
joinLAN(t, c1, s1)
|
joinLAN(t, c1, s1)
|
||||||
testrpc.WaitForTestAgent(t, c1.RPC, "dc1")
|
testrpc.WaitForTestAgent(t, c1.RPC, "dc1")
|
||||||
retry.Run(t, func(r *retry.R) {
|
retry.Run(t, func(r *retry.R) {
|
||||||
if got, want := c1.routers.NumServers(), 1; got != want {
|
if got, want := c1.router.GetLANManager().NumServers(), 1; got != want {
|
||||||
r.Fatalf("got %d servers want %d", got, want)
|
r.Fatalf("got %d servers want %d", got, want)
|
||||||
}
|
}
|
||||||
if got, want := len(s1.LANMembers()), 2; got != want {
|
if got, want := len(s1.LANMembers()), 2; got != want {
|
||||||
@ -153,7 +153,7 @@ func TestClient_LANReap(t *testing.T) {
|
|||||||
|
|
||||||
// Check the router has both
|
// Check the router has both
|
||||||
retry.Run(t, func(r *retry.R) {
|
retry.Run(t, func(r *retry.R) {
|
||||||
server := c1.routers.FindServer()
|
server := c1.router.FindLANServer()
|
||||||
require.NotNil(t, server)
|
require.NotNil(t, server)
|
||||||
require.Equal(t, s1.config.NodeName, server.Name)
|
require.Equal(t, s1.config.NodeName, server.Name)
|
||||||
})
|
})
|
||||||
@ -163,7 +163,7 @@ func TestClient_LANReap(t *testing.T) {
|
|||||||
|
|
||||||
retry.Run(t, func(r *retry.R) {
|
retry.Run(t, func(r *retry.R) {
|
||||||
require.Len(r, c1.LANMembers(), 1)
|
require.Len(r, c1.LANMembers(), 1)
|
||||||
server := c1.routers.FindServer()
|
server := c1.router.FindLANServer()
|
||||||
require.Nil(t, server)
|
require.Nil(t, server)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -393,7 +393,7 @@ func TestClient_RPC_ConsulServerPing(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Sleep to allow Serf to sync, shuffle, and let the shuffle complete
|
// Sleep to allow Serf to sync, shuffle, and let the shuffle complete
|
||||||
c.routers.ResetRebalanceTimer()
|
c.router.GetLANManager().ResetRebalanceTimer()
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
|
|
||||||
if len(c.LANMembers()) != numServers+numClients {
|
if len(c.LANMembers()) != numServers+numClients {
|
||||||
@ -409,7 +409,7 @@ func TestClient_RPC_ConsulServerPing(t *testing.T) {
|
|||||||
var pingCount int
|
var pingCount int
|
||||||
for range servers {
|
for range servers {
|
||||||
time.Sleep(200 * time.Millisecond)
|
time.Sleep(200 * time.Millisecond)
|
||||||
s := c.routers.FindServer()
|
m, s := c.router.FindLANRoute()
|
||||||
ok, err := c.connPool.Ping(s.Datacenter, s.ShortName, s.Addr)
|
ok, err := c.connPool.Ping(s.Datacenter, s.ShortName, s.Addr)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Errorf("Unable to ping server %v: %s", s.String(), err)
|
t.Errorf("Unable to ping server %v: %s", s.String(), err)
|
||||||
@ -418,7 +418,7 @@ func TestClient_RPC_ConsulServerPing(t *testing.T) {
|
|||||||
|
|
||||||
// Artificially fail the server in order to rotate the server
|
// Artificially fail the server in order to rotate the server
|
||||||
// list
|
// list
|
||||||
c.routers.NotifyFailedServer(s)
|
m.NotifyFailedServer(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
if pingCount != numServers {
|
if pingCount != numServers {
|
||||||
@ -527,7 +527,7 @@ func TestClient_SnapshotRPC(t *testing.T) {
|
|||||||
|
|
||||||
// Wait until we've got a healthy server.
|
// Wait until we've got a healthy server.
|
||||||
retry.Run(t, func(r *retry.R) {
|
retry.Run(t, func(r *retry.R) {
|
||||||
if got, want := c1.routers.NumServers(), 1; got != want {
|
if got, want := c1.router.GetLANManager().NumServers(), 1; got != want {
|
||||||
r.Fatalf("got %d servers want %d", got, want)
|
r.Fatalf("got %d servers want %d", got, want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -562,7 +562,7 @@ func TestClient_SnapshotRPC_RateLimit(t *testing.T) {
|
|||||||
|
|
||||||
joinLAN(t, c1, s1)
|
joinLAN(t, c1, s1)
|
||||||
retry.Run(t, func(r *retry.R) {
|
retry.Run(t, func(r *retry.R) {
|
||||||
if got, want := c1.routers.NumServers(), 1; got != want {
|
if got, want := c1.router.GetLANManager().NumServers(), 1; got != want {
|
||||||
r.Fatalf("got %d servers want %d", got, want)
|
r.Fatalf("got %d servers want %d", got, want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -610,7 +610,7 @@ func TestClient_SnapshotRPC_TLS(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Wait until we've got a healthy server.
|
// Wait until we've got a healthy server.
|
||||||
if got, want := c1.routers.NumServers(), 1; got != want {
|
if got, want := c1.router.GetLANManager().NumServers(), 1; got != want {
|
||||||
r.Fatalf("got %d servers want %d", got, want)
|
r.Fatalf("got %d servers want %d", got, want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -10,6 +10,7 @@ import (
|
|||||||
"github.com/hashicorp/consul/agent/consul/state"
|
"github.com/hashicorp/consul/agent/consul/state"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/logging"
|
"github.com/hashicorp/consul/logging"
|
||||||
|
"github.com/hashicorp/consul/types"
|
||||||
"github.com/hashicorp/go-hclog"
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/go-memdb"
|
"github.com/hashicorp/go-memdb"
|
||||||
)
|
)
|
||||||
@ -161,23 +162,32 @@ func (c *Coordinate) Update(args *structs.CoordinateUpdateRequest, reply *struct
|
|||||||
|
|
||||||
// ListDatacenters returns the list of datacenters and their respective nodes
|
// ListDatacenters returns the list of datacenters and their respective nodes
|
||||||
// and the raw coordinates of those nodes (if no coordinates are available for
|
// and the raw coordinates of those nodes (if no coordinates are available for
|
||||||
// any of the nodes, the node list may be empty).
|
// any of the nodes, the node list may be empty). This endpoint will not return
|
||||||
|
// information about the LAN network area.
|
||||||
func (c *Coordinate) ListDatacenters(args *struct{}, reply *[]structs.DatacenterMap) error {
|
func (c *Coordinate) ListDatacenters(args *struct{}, reply *[]structs.DatacenterMap) error {
|
||||||
maps, err := c.srv.router.GetDatacenterMaps()
|
maps, err := c.srv.router.GetDatacenterMaps()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var out []structs.DatacenterMap
|
||||||
|
|
||||||
// Strip the datacenter suffixes from all the node names.
|
// Strip the datacenter suffixes from all the node names.
|
||||||
for i := range maps {
|
for _, dcMap := range maps {
|
||||||
suffix := fmt.Sprintf(".%s", maps[i].Datacenter)
|
if dcMap.AreaID == types.AreaLAN {
|
||||||
for j := range maps[i].Coordinates {
|
continue
|
||||||
node := maps[i].Coordinates[j].Node
|
|
||||||
maps[i].Coordinates[j].Node = strings.TrimSuffix(node, suffix)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
suffix := fmt.Sprintf(".%s", dcMap.Datacenter)
|
||||||
|
for j := range dcMap.Coordinates {
|
||||||
|
node := dcMap.Coordinates[j].Node
|
||||||
|
dcMap.Coordinates[j].Node = strings.TrimSuffix(node, suffix)
|
||||||
|
}
|
||||||
|
|
||||||
|
out = append(out, dcMap)
|
||||||
}
|
}
|
||||||
|
|
||||||
*reply = maps
|
*reply = out
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,6 +2,7 @@ package consul
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/hashicorp/consul/agent/pool"
|
"github.com/hashicorp/consul/agent/pool"
|
||||||
|
"github.com/hashicorp/consul/agent/router"
|
||||||
"github.com/hashicorp/consul/agent/token"
|
"github.com/hashicorp/consul/agent/token"
|
||||||
"github.com/hashicorp/consul/tlsutil"
|
"github.com/hashicorp/consul/tlsutil"
|
||||||
"github.com/hashicorp/go-hclog"
|
"github.com/hashicorp/go-hclog"
|
||||||
@ -12,6 +13,7 @@ type consulOptions struct {
|
|||||||
tlsConfigurator *tlsutil.Configurator
|
tlsConfigurator *tlsutil.Configurator
|
||||||
connPool *pool.ConnPool
|
connPool *pool.ConnPool
|
||||||
tokens *token.Store
|
tokens *token.Store
|
||||||
|
router *router.Router
|
||||||
}
|
}
|
||||||
|
|
||||||
type ConsulOption func(*consulOptions)
|
type ConsulOption func(*consulOptions)
|
||||||
@ -40,6 +42,12 @@ func WithTokenStore(tokens *token.Store) ConsulOption {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func WithRouter(router *router.Router) ConsulOption {
|
||||||
|
return func(opt *consulOptions) {
|
||||||
|
opt.router = router
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func flattenConsulOptions(options []ConsulOption) consulOptions {
|
func flattenConsulOptions(options []ConsulOption) consulOptions {
|
||||||
var flat consulOptions
|
var flat consulOptions
|
||||||
for _, opt := range options {
|
for _, opt := range options {
|
||||||
|
@ -322,6 +322,7 @@ func NewServer(config *Config, options ...ConsulOption) (*Server, error) {
|
|||||||
tokens := flat.tokens
|
tokens := flat.tokens
|
||||||
tlsConfigurator := flat.tlsConfigurator
|
tlsConfigurator := flat.tlsConfigurator
|
||||||
connPool := flat.connPool
|
connPool := flat.connPool
|
||||||
|
rpcRouter := flat.router
|
||||||
|
|
||||||
if err := config.CheckProtocolVersion(); err != nil {
|
if err := config.CheckProtocolVersion(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -377,6 +378,11 @@ func NewServer(config *Config, options ...ConsulOption) (*Server, error) {
|
|||||||
|
|
||||||
serverLogger := logger.NamedIntercept(logging.ConsulServer)
|
serverLogger := logger.NamedIntercept(logging.ConsulServer)
|
||||||
loggers := newLoggerStore(serverLogger)
|
loggers := newLoggerStore(serverLogger)
|
||||||
|
|
||||||
|
if rpcRouter == nil {
|
||||||
|
rpcRouter = router.NewRouter(serverLogger, config.Datacenter, fmt.Sprintf("%s.%s", config.NodeName, config.Datacenter))
|
||||||
|
}
|
||||||
|
|
||||||
// Create server.
|
// Create server.
|
||||||
s := &Server{
|
s := &Server{
|
||||||
config: config,
|
config: config,
|
||||||
@ -388,7 +394,7 @@ func NewServer(config *Config, options ...ConsulOption) (*Server, error) {
|
|||||||
loggers: loggers,
|
loggers: loggers,
|
||||||
leaveCh: make(chan struct{}),
|
leaveCh: make(chan struct{}),
|
||||||
reconcileCh: make(chan serf.Member, reconcileChSize),
|
reconcileCh: make(chan serf.Member, reconcileChSize),
|
||||||
router: router.NewRouter(serverLogger, config.Datacenter, fmt.Sprintf("%s.%s", config.NodeName, config.Datacenter)),
|
router: rpcRouter,
|
||||||
rpcServer: rpc.NewServer(),
|
rpcServer: rpc.NewServer(),
|
||||||
insecureRPCServer: rpc.NewServer(),
|
insecureRPCServer: rpc.NewServer(),
|
||||||
tlsConfigurator: tlsConfigurator,
|
tlsConfigurator: tlsConfigurator,
|
||||||
@ -545,6 +551,11 @@ func NewServer(config *Config, options ...ConsulOption) (*Server, error) {
|
|||||||
s.Shutdown()
|
s.Shutdown()
|
||||||
return nil, fmt.Errorf("Failed to start LAN Serf: %v", err)
|
return nil, fmt.Errorf("Failed to start LAN Serf: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := s.router.AddArea(types.AreaLAN, s.serfLAN, s.connPool); err != nil {
|
||||||
|
s.Shutdown()
|
||||||
|
return nil, fmt.Errorf("Failed to add LAN serf route: %w", err)
|
||||||
|
}
|
||||||
go s.lanEventHandler()
|
go s.lanEventHandler()
|
||||||
|
|
||||||
// Start the flooders after the LAN event handler is wired up.
|
// Start the flooders after the LAN event handler is wired up.
|
||||||
|
@ -366,7 +366,7 @@ func (c *Client) CheckServers(datacenter string, fn func(*metadata.Server) bool)
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
c.routers.CheckServers(fn)
|
c.router.CheckServers(datacenter, fn)
|
||||||
}
|
}
|
||||||
|
|
||||||
type serversACLMode struct {
|
type serversACLMode struct {
|
||||||
|
@ -254,6 +254,9 @@ func (m *Manager) CheckServers(fn func(srv *metadata.Server) bool) {
|
|||||||
// getServerList is a convenience method which hides the locking semantics
|
// getServerList is a convenience method which hides the locking semantics
|
||||||
// of atomic.Value from the caller.
|
// of atomic.Value from the caller.
|
||||||
func (m *Manager) getServerList() serverList {
|
func (m *Manager) getServerList() serverList {
|
||||||
|
if m == nil {
|
||||||
|
return serverList{}
|
||||||
|
}
|
||||||
return m.listValue.Load().(serverList)
|
return m.listValue.Load().(serverList)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -144,6 +144,12 @@ func (r *Router) AddArea(areaID types.AreaID, cluster RouterSerfCluster, pinger
|
|||||||
}
|
}
|
||||||
r.areas[areaID] = area
|
r.areas[areaID] = area
|
||||||
|
|
||||||
|
// always ensure we have a started manager for the LAN area
|
||||||
|
if areaID == types.AreaLAN {
|
||||||
|
r.logger.Info("Initializing LAN area manager")
|
||||||
|
r.maybeInitializeManager(area, r.localDatacenter)
|
||||||
|
}
|
||||||
|
|
||||||
// Do an initial populate of the manager so that we don't have to wait
|
// Do an initial populate of the manager so that we don't have to wait
|
||||||
// for events to fire. This lets us attempt to use all the known servers
|
// for events to fire. This lets us attempt to use all the known servers
|
||||||
// initially, and then will quickly detect that they are failed if we
|
// initially, and then will quickly detect that they are failed if we
|
||||||
@ -151,10 +157,12 @@ func (r *Router) AddArea(areaID types.AreaID, cluster RouterSerfCluster, pinger
|
|||||||
for _, m := range cluster.Members() {
|
for _, m := range cluster.Members() {
|
||||||
ok, parts := metadata.IsConsulServer(m)
|
ok, parts := metadata.IsConsulServer(m)
|
||||||
if !ok {
|
if !ok {
|
||||||
r.logger.Warn("Non-server in server-only area",
|
if areaID != types.AreaLAN {
|
||||||
"non_server", m.Name,
|
r.logger.Warn("Non-server in server-only area",
|
||||||
"area", areaID,
|
"non_server", m.Name,
|
||||||
)
|
"area", areaID,
|
||||||
|
)
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -233,24 +241,35 @@ func (r *Router) RemoveArea(areaID types.AreaID) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// maybeInitializeManager will initialize a new manager for the given area/dc
|
||||||
|
// if its not already created. Calling this function should only be done if
|
||||||
|
// holding a write lock on the Router.
|
||||||
|
func (r *Router) maybeInitializeManager(area *areaInfo, dc string) *Manager {
|
||||||
|
info, ok := area.managers[dc]
|
||||||
|
if ok {
|
||||||
|
return info.manager
|
||||||
|
}
|
||||||
|
|
||||||
|
shutdownCh := make(chan struct{})
|
||||||
|
manager := New(r.logger, shutdownCh, area.cluster, area.pinger, r.serverName)
|
||||||
|
info = &managerInfo{
|
||||||
|
manager: manager,
|
||||||
|
shutdownCh: shutdownCh,
|
||||||
|
}
|
||||||
|
area.managers[dc] = info
|
||||||
|
|
||||||
|
managers := r.managers[dc]
|
||||||
|
r.managers[dc] = append(managers, manager)
|
||||||
|
go manager.Start()
|
||||||
|
|
||||||
|
return manager
|
||||||
|
}
|
||||||
|
|
||||||
// addServer does the work of AddServer once the write lock is held.
|
// addServer does the work of AddServer once the write lock is held.
|
||||||
func (r *Router) addServer(area *areaInfo, s *metadata.Server) error {
|
func (r *Router) addServer(area *areaInfo, s *metadata.Server) error {
|
||||||
// Make the manager on the fly if this is the first we've seen of it,
|
// Make the manager on the fly if this is the first we've seen of it,
|
||||||
// and add it to the index.
|
// and add it to the index.
|
||||||
info, ok := area.managers[s.Datacenter]
|
manager := r.maybeInitializeManager(area, s.Datacenter)
|
||||||
if !ok {
|
|
||||||
shutdownCh := make(chan struct{})
|
|
||||||
manager := New(r.logger, shutdownCh, area.cluster, area.pinger, r.serverName)
|
|
||||||
info = &managerInfo{
|
|
||||||
manager: manager,
|
|
||||||
shutdownCh: shutdownCh,
|
|
||||||
}
|
|
||||||
area.managers[s.Datacenter] = info
|
|
||||||
|
|
||||||
managers := r.managers[s.Datacenter]
|
|
||||||
r.managers[s.Datacenter] = append(managers, manager)
|
|
||||||
go manager.Start()
|
|
||||||
}
|
|
||||||
|
|
||||||
// If TLS is enabled for the area, set it on the server so the manager
|
// If TLS is enabled for the area, set it on the server so the manager
|
||||||
// knows to use TLS when pinging it.
|
// knows to use TLS when pinging it.
|
||||||
@ -258,7 +277,7 @@ func (r *Router) addServer(area *areaInfo, s *metadata.Server) error {
|
|||||||
s.UseTLS = true
|
s.UseTLS = true
|
||||||
}
|
}
|
||||||
|
|
||||||
info.manager.AddServer(s)
|
manager.AddServer(s)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -341,6 +360,28 @@ func (r *Router) FindRoute(datacenter string) (*Manager, *metadata.Server, bool)
|
|||||||
return r.routeFn(datacenter)
|
return r.routeFn(datacenter)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FindLANRoute returns a healthy server within the local datacenter. In some
|
||||||
|
// cases this may return a best-effort unhealthy server that can be used for a
|
||||||
|
// connection attempt. If any problem occurs with the given server, the caller
|
||||||
|
// should feed that back to the manager associated with the server, which is
|
||||||
|
// also returned, by calling NotifyFailedServer().
|
||||||
|
func (r *Router) FindLANRoute() (*Manager, *metadata.Server) {
|
||||||
|
mgr := r.GetLANManager()
|
||||||
|
|
||||||
|
if mgr == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return mgr, mgr.FindServer()
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindLANServer will look for a server in the local datacenter.
|
||||||
|
// This function may return a nil value if no server is available.
|
||||||
|
func (r *Router) FindLANServer() *metadata.Server {
|
||||||
|
_, srv := r.FindLANRoute()
|
||||||
|
return srv
|
||||||
|
}
|
||||||
|
|
||||||
// findDirectRoute looks for a route to the given datacenter if it's directly
|
// findDirectRoute looks for a route to the given datacenter if it's directly
|
||||||
// adjacent to the server.
|
// adjacent to the server.
|
||||||
func (r *Router) findDirectRoute(datacenter string) (*Manager, *metadata.Server, bool) {
|
func (r *Router) findDirectRoute(datacenter string) (*Manager, *metadata.Server, bool) {
|
||||||
@ -432,6 +473,24 @@ func (r *Router) HasDatacenter(dc string) bool {
|
|||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetLANManager returns the Manager for the LAN area and the local datacenter
|
||||||
|
func (r *Router) GetLANManager() *Manager {
|
||||||
|
r.RLock()
|
||||||
|
defer r.RUnlock()
|
||||||
|
|
||||||
|
area, ok := r.areas[types.AreaLAN]
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
managerInfo, ok := area.managers[r.localDatacenter]
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return managerInfo.manager
|
||||||
|
}
|
||||||
|
|
||||||
// datacenterSorter takes a list of DC names and a parallel vector of distances
|
// datacenterSorter takes a list of DC names and a parallel vector of distances
|
||||||
// and implements sort.Interface, keeping both structures coherent and sorting
|
// and implements sort.Interface, keeping both structures coherent and sorting
|
||||||
// by distance.
|
// by distance.
|
||||||
|
@ -15,6 +15,8 @@ import (
|
|||||||
"github.com/hashicorp/consul/types"
|
"github.com/hashicorp/consul/types"
|
||||||
"github.com/hashicorp/serf/coordinate"
|
"github.com/hashicorp/serf/coordinate"
|
||||||
"github.com/hashicorp/serf/serf"
|
"github.com/hashicorp/serf/serf"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
type mockCluster struct {
|
type mockCluster struct {
|
||||||
@ -69,6 +71,26 @@ func (m *mockCluster) AddMember(dc string, name string, coord *coordinate.Coordi
|
|||||||
m.addr++
|
m.addr++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *mockCluster) AddLANMember(dc, name, role string, coord *coordinate.Coordinate) {
|
||||||
|
member := serf.Member{
|
||||||
|
Name: name,
|
||||||
|
Addr: net.ParseIP(fmt.Sprintf("127.0.0.%d", m.addr)),
|
||||||
|
Port: 8300,
|
||||||
|
Tags: map[string]string{
|
||||||
|
"dc": dc,
|
||||||
|
"role": role,
|
||||||
|
"port": "8300",
|
||||||
|
"build": "0.8.0",
|
||||||
|
"vsn": "3",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
m.members = append(m.members, member)
|
||||||
|
if coord != nil {
|
||||||
|
m.coords[member.Name] = coord
|
||||||
|
}
|
||||||
|
m.addr++
|
||||||
|
}
|
||||||
|
|
||||||
// testCluster is used to generate a single WAN-like area with a known set of
|
// testCluster is used to generate a single WAN-like area with a known set of
|
||||||
// member and RTT topology.
|
// member and RTT topology.
|
||||||
//
|
//
|
||||||
@ -487,3 +509,22 @@ func TestRouter_GetDatacenterMaps(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRouter_FindLANServer(t *testing.T) {
|
||||||
|
r := testRouter(t, "dc0")
|
||||||
|
|
||||||
|
lan := newMockCluster("node4.dc0")
|
||||||
|
lan.AddLANMember("dc0", "node0", "consul", lib.GenerateCoordinate(10*time.Millisecond))
|
||||||
|
lan.AddLANMember("dc0", "node1", "", lib.GenerateCoordinate(20*time.Millisecond))
|
||||||
|
lan.AddLANMember("dc0", "node2", "", lib.GenerateCoordinate(21*time.Millisecond))
|
||||||
|
|
||||||
|
require.NoError(t, r.AddArea(types.AreaLAN, lan, &fauxConnPool{}))
|
||||||
|
|
||||||
|
srv := r.FindLANServer()
|
||||||
|
require.NotNil(t, srv)
|
||||||
|
require.Equal(t, "127.0.0.1:8300", srv.Addr.String())
|
||||||
|
|
||||||
|
mgr, srv2 := r.FindLANRoute()
|
||||||
|
require.NotNil(t, mgr)
|
||||||
|
require.Equal(t, srv, srv2)
|
||||||
|
}
|
||||||
|
@ -13,6 +13,7 @@ import (
|
|||||||
certmon "github.com/hashicorp/consul/agent/cert-monitor"
|
certmon "github.com/hashicorp/consul/agent/cert-monitor"
|
||||||
"github.com/hashicorp/consul/agent/config"
|
"github.com/hashicorp/consul/agent/config"
|
||||||
"github.com/hashicorp/consul/agent/pool"
|
"github.com/hashicorp/consul/agent/pool"
|
||||||
|
"github.com/hashicorp/consul/agent/router"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/agent/token"
|
"github.com/hashicorp/consul/agent/token"
|
||||||
"github.com/hashicorp/consul/ipaddr"
|
"github.com/hashicorp/consul/ipaddr"
|
||||||
@ -35,6 +36,7 @@ type BaseDeps struct {
|
|||||||
Cache *cache.Cache
|
Cache *cache.Cache
|
||||||
AutoConfig *autoconf.AutoConfig // TODO: use an interface
|
AutoConfig *autoconf.AutoConfig // TODO: use an interface
|
||||||
ConnPool *pool.ConnPool // TODO: use an interface
|
ConnPool *pool.ConnPool // TODO: use an interface
|
||||||
|
Router *router.Router
|
||||||
}
|
}
|
||||||
|
|
||||||
// MetricsHandler provides an http.Handler for displaying metrics.
|
// MetricsHandler provides an http.Handler for displaying metrics.
|
||||||
@ -86,6 +88,8 @@ func NewBaseDeps(configLoader ConfigLoader, logOut io.Writer) (BaseDeps, error)
|
|||||||
|
|
||||||
deferredAC := &deferredAutoConfig{}
|
deferredAC := &deferredAutoConfig{}
|
||||||
|
|
||||||
|
d.Router = router.NewRouter(d.Logger, cfg.Datacenter, fmt.Sprintf("%s.%s", cfg.NodeName, cfg.Datacenter))
|
||||||
|
|
||||||
cmConf := new(certmon.Config).
|
cmConf := new(certmon.Config).
|
||||||
WithCache(d.Cache).
|
WithCache(d.Cache).
|
||||||
WithTLSConfigurator(d.TLSConfigurator).
|
WithTLSConfigurator(d.TLSConfigurator).
|
||||||
|
@ -7,3 +7,7 @@ type AreaID string
|
|||||||
// This represents the existing WAN area that's built in to Consul. Consul
|
// This represents the existing WAN area that's built in to Consul. Consul
|
||||||
// Enterprise generalizes areas, which are represented with UUIDs.
|
// Enterprise generalizes areas, which are represented with UUIDs.
|
||||||
const AreaWAN AreaID = "wan"
|
const AreaWAN AreaID = "wan"
|
||||||
|
|
||||||
|
// This represents the existing LAN area that's built in to Consul. Consul
|
||||||
|
// Enterprise generalizes areas, which are represented with UUIDs.
|
||||||
|
const AreaLAN AreaID = "lan"
|
||||||
|
Loading…
x
Reference in New Issue
Block a user