2023-03-28 18:39:22 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
2023-08-11 13:12:13 +00:00
|
|
|
// SPDX-License-Identifier: BUSL-1.1
|
2023-03-28 18:39:22 +00:00
|
|
|
|
2017-07-06 10:40:54 +00:00
|
|
|
package router
|
2017-03-14 01:54:34 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2017-03-14 05:56:24 +00:00
|
|
|
"sort"
|
2017-03-14 01:54:34 +00:00
|
|
|
"sync"
|
|
|
|
|
2020-10-15 17:51:57 +00:00
|
|
|
"github.com/hashicorp/go-hclog"
|
|
|
|
"github.com/hashicorp/serf/coordinate"
|
|
|
|
"github.com/hashicorp/serf/serf"
|
|
|
|
|
2017-07-06 10:48:37 +00:00
|
|
|
"github.com/hashicorp/consul/agent/metadata"
|
2017-07-06 10:34:00 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2024-05-07 15:30:49 +00:00
|
|
|
"github.com/hashicorp/consul/internal/gossip/librtt"
|
2020-01-28 23:50:41 +00:00
|
|
|
"github.com/hashicorp/consul/logging"
|
2017-03-14 01:54:34 +00:00
|
|
|
"github.com/hashicorp/consul/types"
|
|
|
|
)
|
|
|
|
|
2017-03-14 23:39:00 +00:00
|
|
|
// Router keeps track of a set of network areas and their associated Serf
|
|
|
|
// membership of Consul servers. It then indexes this by datacenter to provide
|
|
|
|
// healthy routes to servers by datacenter.
|
2017-03-14 01:54:34 +00:00
|
|
|
type Router struct {
|
2017-03-14 23:39:00 +00:00
|
|
|
// logger is used for diagnostic output.
|
2020-01-28 23:50:41 +00:00
|
|
|
logger hclog.Logger
|
2017-03-14 01:54:34 +00:00
|
|
|
|
2017-03-14 23:39:00 +00:00
|
|
|
// localDatacenter has the name of the router's home datacenter. This is
|
|
|
|
// used to short-circuit RTT calculations for local servers.
|
2017-03-14 05:56:24 +00:00
|
|
|
localDatacenter string
|
2017-03-14 23:39:00 +00:00
|
|
|
|
2020-04-30 20:12:17 +00:00
|
|
|
// serverName has the name of the router's server. This is used to
|
|
|
|
// short-circuit pinging to itself.
|
|
|
|
serverName string
|
|
|
|
|
2017-03-14 23:39:00 +00:00
|
|
|
// areas maps area IDs to structures holding information about that
|
|
|
|
// area.
|
|
|
|
areas map[types.AreaID]*areaInfo
|
|
|
|
|
|
|
|
// managers is an index from datacenter names to a list of server
|
|
|
|
// managers for that datacenter. This is used to quickly lookup routes.
|
|
|
|
managers map[string][]*Manager
|
2017-03-14 01:54:34 +00:00
|
|
|
|
2017-03-15 15:08:37 +00:00
|
|
|
// routeFn is a hook to actually do the routing.
|
2017-07-06 10:48:37 +00:00
|
|
|
routeFn func(datacenter string) (*Manager, *metadata.Server, bool)
|
2017-03-15 15:08:37 +00:00
|
|
|
|
2020-09-09 20:37:43 +00:00
|
|
|
// grpcServerTracker is used to balance grpc connections across servers,
|
|
|
|
// and has callbacks for adding or removing a server.
|
|
|
|
grpcServerTracker ServerTracker
|
|
|
|
|
2017-06-02 23:33:48 +00:00
|
|
|
// isShutdown prevents adding new routes to a router after it is shut
|
|
|
|
// down.
|
|
|
|
isShutdown bool
|
|
|
|
|
2017-03-14 01:54:34 +00:00
|
|
|
// This top-level lock covers all the internal state.
|
|
|
|
sync.RWMutex
|
|
|
|
}
|
|
|
|
|
|
|
|
// RouterSerfCluster is an interface wrapper around Serf in order to make this
|
|
|
|
// easier to unit test.
|
|
|
|
type RouterSerfCluster interface {
|
|
|
|
NumNodes() int
|
|
|
|
Members() []serf.Member
|
|
|
|
GetCoordinate() (*coordinate.Coordinate, error)
|
2017-03-14 23:39:00 +00:00
|
|
|
GetCachedCoordinate(name string) (*coordinate.Coordinate, bool)
|
2017-03-14 01:54:34 +00:00
|
|
|
}
|
|
|
|
|
2017-03-14 23:39:00 +00:00
|
|
|
// managerInfo holds a server manager for a datacenter along with its associated
|
|
|
|
// shutdown channel.
|
2017-03-14 01:54:34 +00:00
|
|
|
type managerInfo struct {
|
2017-03-14 23:39:00 +00:00
|
|
|
// manager is notified about servers for this datacenter.
|
|
|
|
manager *Manager
|
|
|
|
|
|
|
|
// shutdownCh is only given to this manager so we can shut it down when
|
|
|
|
// all servers for this datacenter are gone.
|
2017-03-14 01:54:34 +00:00
|
|
|
shutdownCh chan struct{}
|
|
|
|
}
|
|
|
|
|
2017-03-14 23:39:00 +00:00
|
|
|
// areaInfo holds information about a given network area.
|
2017-03-14 01:54:34 +00:00
|
|
|
type areaInfo struct {
|
2017-03-14 23:39:00 +00:00
|
|
|
// cluster is the Serf instance for this network area.
|
|
|
|
cluster RouterSerfCluster
|
|
|
|
|
|
|
|
// pinger is used to ping servers in this network area when trying to
|
|
|
|
// find a new, healthy server to talk to.
|
|
|
|
pinger Pinger
|
|
|
|
|
|
|
|
// managers maps datacenter names to managers for that datacenter in
|
|
|
|
// this area.
|
2017-03-14 01:54:34 +00:00
|
|
|
managers map[string]*managerInfo
|
2017-07-15 00:31:52 +00:00
|
|
|
|
|
|
|
// useTLS specifies whether to use TLS to communicate for this network area.
|
|
|
|
useTLS bool
|
2017-03-14 01:54:34 +00:00
|
|
|
}
|
|
|
|
|
2017-06-02 23:33:48 +00:00
|
|
|
// NewRouter returns a new Router with the given configuration.
|
2020-09-09 20:37:43 +00:00
|
|
|
func NewRouter(logger hclog.Logger, localDatacenter, serverName string, tracker ServerTracker) *Router {
|
2020-01-28 23:50:41 +00:00
|
|
|
if logger == nil {
|
|
|
|
logger = hclog.New(&hclog.LoggerOptions{})
|
|
|
|
}
|
2020-09-09 20:37:43 +00:00
|
|
|
if tracker == nil {
|
|
|
|
tracker = NoOpServerTracker{}
|
|
|
|
}
|
2020-01-28 23:50:41 +00:00
|
|
|
|
2017-03-14 01:54:34 +00:00
|
|
|
router := &Router{
|
2020-09-09 20:37:43 +00:00
|
|
|
logger: logger.Named(logging.Router),
|
|
|
|
localDatacenter: localDatacenter,
|
|
|
|
serverName: serverName,
|
|
|
|
areas: make(map[types.AreaID]*areaInfo),
|
|
|
|
managers: make(map[string][]*Manager),
|
|
|
|
grpcServerTracker: tracker,
|
2017-03-14 01:54:34 +00:00
|
|
|
}
|
|
|
|
|
2017-03-15 15:08:37 +00:00
|
|
|
// Hook the direct route lookup by default.
|
|
|
|
router.routeFn = router.findDirectRoute
|
|
|
|
|
2017-06-02 23:33:48 +00:00
|
|
|
return router
|
|
|
|
}
|
|
|
|
|
|
|
|
// Shutdown removes all areas from the router, which stops all their respective
|
|
|
|
// managers. No new areas can be added after the router is shut down.
|
|
|
|
func (r *Router) Shutdown() {
|
|
|
|
r.Lock()
|
|
|
|
defer r.Unlock()
|
2017-03-14 01:54:34 +00:00
|
|
|
|
2017-06-02 23:33:48 +00:00
|
|
|
for areaID, area := range r.areas {
|
|
|
|
for datacenter, info := range area.managers {
|
|
|
|
r.removeManagerFromIndex(datacenter, info.manager)
|
|
|
|
close(info.shutdownCh)
|
2017-03-14 01:54:34 +00:00
|
|
|
}
|
|
|
|
|
2017-06-02 23:33:48 +00:00
|
|
|
delete(r.areas, areaID)
|
|
|
|
}
|
2017-03-14 01:54:34 +00:00
|
|
|
|
2017-06-02 23:33:48 +00:00
|
|
|
r.isShutdown = true
|
2017-03-14 01:54:34 +00:00
|
|
|
}
|
|
|
|
|
2017-03-14 23:39:00 +00:00
|
|
|
// AddArea registers a new network area with the router.
|
2020-04-30 20:12:17 +00:00
|
|
|
func (r *Router) AddArea(areaID types.AreaID, cluster RouterSerfCluster, pinger Pinger) error {
|
2017-03-14 01:54:34 +00:00
|
|
|
r.Lock()
|
|
|
|
defer r.Unlock()
|
|
|
|
|
2017-06-02 23:33:48 +00:00
|
|
|
if r.isShutdown {
|
|
|
|
return fmt.Errorf("cannot add area, router is shut down")
|
|
|
|
}
|
|
|
|
|
2017-03-14 01:54:34 +00:00
|
|
|
if _, ok := r.areas[areaID]; ok {
|
|
|
|
return fmt.Errorf("area ID %q already exists", areaID)
|
|
|
|
}
|
|
|
|
|
2017-03-14 23:39:00 +00:00
|
|
|
area := &areaInfo{
|
2017-03-14 01:54:34 +00:00
|
|
|
cluster: cluster,
|
|
|
|
pinger: pinger,
|
|
|
|
managers: make(map[string]*managerInfo),
|
|
|
|
}
|
2017-03-14 23:39:00 +00:00
|
|
|
r.areas[areaID] = area
|
|
|
|
|
2020-08-27 15:23:52 +00:00
|
|
|
// always ensure we have a started manager for the LAN area
|
|
|
|
if areaID == types.AreaLAN {
|
|
|
|
r.logger.Info("Initializing LAN area manager")
|
|
|
|
r.maybeInitializeManager(area, r.localDatacenter)
|
|
|
|
}
|
|
|
|
|
2017-03-14 23:39:00 +00:00
|
|
|
// Do an initial populate of the manager so that we don't have to wait
|
|
|
|
// for events to fire. This lets us attempt to use all the known servers
|
|
|
|
// initially, and then will quickly detect that they are failed if we
|
|
|
|
// can't reach them.
|
|
|
|
for _, m := range cluster.Members() {
|
2017-07-06 10:48:37 +00:00
|
|
|
ok, parts := metadata.IsConsulServer(m)
|
2017-03-14 23:39:00 +00:00
|
|
|
if !ok {
|
2020-08-27 15:23:52 +00:00
|
|
|
if areaID != types.AreaLAN {
|
|
|
|
r.logger.Warn("Non-server in server-only area",
|
|
|
|
"non_server", m.Name,
|
|
|
|
"area", areaID,
|
|
|
|
)
|
|
|
|
}
|
2017-03-14 23:39:00 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2021-12-06 15:55:54 +00:00
|
|
|
if err := r.addServer(areaID, area, parts); err != nil {
|
2017-03-14 23:39:00 +00:00
|
|
|
return fmt.Errorf("failed to add server %q to area %q: %v", m.Name, areaID, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-14 01:54:34 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:12:17 +00:00
|
|
|
// GetServerMetadataByAddr returns server metadata by dc and address. If it
|
|
|
|
// didn't find anything, nil is returned.
|
|
|
|
func (r *Router) GetServerMetadataByAddr(dc, addr string) *metadata.Server {
|
|
|
|
r.RLock()
|
|
|
|
defer r.RUnlock()
|
|
|
|
if ms, ok := r.managers[dc]; ok {
|
|
|
|
for _, m := range ms {
|
|
|
|
for _, s := range m.getServerList().servers {
|
|
|
|
if s.Addr.String() == addr {
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-03-14 01:54:34 +00:00
|
|
|
// removeManagerFromIndex does cleanup to take a manager out of the index of
|
|
|
|
// datacenters. This assumes the lock is already held for writing, and will
|
|
|
|
// panic if the given manager isn't found.
|
|
|
|
func (r *Router) removeManagerFromIndex(datacenter string, manager *Manager) {
|
|
|
|
managers := r.managers[datacenter]
|
|
|
|
for i := 0; i < len(managers); i++ {
|
|
|
|
if managers[i] == manager {
|
|
|
|
r.managers[datacenter] = append(managers[:i], managers[i+1:]...)
|
2017-03-14 23:39:00 +00:00
|
|
|
if len(r.managers[datacenter]) == 0 {
|
|
|
|
delete(r.managers, datacenter)
|
|
|
|
}
|
2017-03-14 01:54:34 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
panic("managers index out of sync")
|
|
|
|
}
|
|
|
|
|
2017-07-15 00:31:52 +00:00
|
|
|
// Returns whether TLS is enabled for the given area ID
|
|
|
|
func (r *Router) TLSEnabled(areaID types.AreaID) (bool, error) {
|
|
|
|
r.RLock()
|
|
|
|
defer r.RUnlock()
|
|
|
|
|
|
|
|
area, ok := r.areas[areaID]
|
|
|
|
if !ok {
|
|
|
|
return false, fmt.Errorf("area ID %q does not exist", areaID)
|
|
|
|
}
|
|
|
|
|
|
|
|
return area.useTLS, nil
|
|
|
|
}
|
|
|
|
|
2017-03-14 23:39:00 +00:00
|
|
|
// RemoveArea removes an existing network area from the router.
|
2017-03-14 01:54:34 +00:00
|
|
|
func (r *Router) RemoveArea(areaID types.AreaID) error {
|
|
|
|
r.Lock()
|
|
|
|
defer r.Unlock()
|
|
|
|
|
|
|
|
area, ok := r.areas[areaID]
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("area ID %q does not exist", areaID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove all of this area's managers from the index and shut them down.
|
|
|
|
for datacenter, info := range area.managers {
|
|
|
|
r.removeManagerFromIndex(datacenter, info.manager)
|
|
|
|
close(info.shutdownCh)
|
|
|
|
}
|
|
|
|
|
|
|
|
delete(r.areas, areaID)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-08-27 15:23:52 +00:00
|
|
|
// maybeInitializeManager will initialize a new manager for the given area/dc
|
|
|
|
// if its not already created. Calling this function should only be done if
|
|
|
|
// holding a write lock on the Router.
|
|
|
|
func (r *Router) maybeInitializeManager(area *areaInfo, dc string) *Manager {
|
|
|
|
info, ok := area.managers[dc]
|
|
|
|
if ok {
|
|
|
|
return info.manager
|
|
|
|
}
|
|
|
|
|
|
|
|
shutdownCh := make(chan struct{})
|
2020-09-14 20:16:44 +00:00
|
|
|
rb := r.grpcServerTracker.NewRebalancer(dc)
|
|
|
|
manager := New(r.logger, shutdownCh, area.cluster, area.pinger, r.serverName, rb)
|
2020-08-27 15:23:52 +00:00
|
|
|
info = &managerInfo{
|
|
|
|
manager: manager,
|
|
|
|
shutdownCh: shutdownCh,
|
|
|
|
}
|
|
|
|
area.managers[dc] = info
|
|
|
|
|
|
|
|
managers := r.managers[dc]
|
|
|
|
r.managers[dc] = append(managers, manager)
|
2020-10-15 17:51:57 +00:00
|
|
|
go manager.Run()
|
2020-08-27 15:23:52 +00:00
|
|
|
|
|
|
|
return manager
|
|
|
|
}
|
|
|
|
|
2017-03-14 23:39:00 +00:00
|
|
|
// addServer does the work of AddServer once the write lock is held.
|
2021-12-06 15:55:54 +00:00
|
|
|
func (r *Router) addServer(areaID types.AreaID, area *areaInfo, s *metadata.Server) error {
|
2017-03-14 01:54:34 +00:00
|
|
|
// Make the manager on the fly if this is the first we've seen of it,
|
|
|
|
// and add it to the index.
|
2020-08-27 15:23:52 +00:00
|
|
|
manager := r.maybeInitializeManager(area, s.Datacenter)
|
2017-03-14 01:54:34 +00:00
|
|
|
|
2017-07-15 00:31:52 +00:00
|
|
|
// If TLS is enabled for the area, set it on the server so the manager
|
|
|
|
// knows to use TLS when pinging it.
|
|
|
|
if area.useTLS {
|
|
|
|
s.UseTLS = true
|
|
|
|
}
|
|
|
|
|
2020-08-27 15:23:52 +00:00
|
|
|
manager.AddServer(s)
|
2021-12-06 15:55:54 +00:00
|
|
|
r.grpcServerTracker.AddServer(areaID, s)
|
2017-03-14 01:54:34 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-03-14 23:39:00 +00:00
|
|
|
// AddServer should be called whenever a new server joins an area. This is
|
|
|
|
// typically hooked into the Serf event handler area for this area.
|
2017-07-06 10:48:37 +00:00
|
|
|
func (r *Router) AddServer(areaID types.AreaID, s *metadata.Server) error {
|
2017-03-14 23:39:00 +00:00
|
|
|
r.Lock()
|
|
|
|
defer r.Unlock()
|
|
|
|
|
|
|
|
area, ok := r.areas[areaID]
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("area ID %q does not exist", areaID)
|
|
|
|
}
|
2021-12-06 15:55:54 +00:00
|
|
|
return r.addServer(areaID, area, s)
|
2017-03-14 23:39:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// RemoveServer should be called whenever a server is removed from an area. This
|
|
|
|
// is typically hooked into the Serf event handler area for this area.
|
2017-07-06 10:48:37 +00:00
|
|
|
func (r *Router) RemoveServer(areaID types.AreaID, s *metadata.Server) error {
|
2017-03-14 01:54:34 +00:00
|
|
|
r.Lock()
|
|
|
|
defer r.Unlock()
|
|
|
|
|
|
|
|
area, ok := r.areas[areaID]
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("area ID %q does not exist", areaID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the manager has already been removed we just quietly exit. This
|
|
|
|
// can get called by Serf events, so the timing isn't totally
|
|
|
|
// deterministic.
|
|
|
|
info, ok := area.managers[s.Datacenter]
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
info.manager.RemoveServer(s)
|
2021-12-06 15:55:54 +00:00
|
|
|
r.grpcServerTracker.RemoveServer(areaID, s)
|
2017-03-14 01:54:34 +00:00
|
|
|
|
|
|
|
// If this manager is empty then remove it so we don't accumulate cruft
|
|
|
|
// and waste time during request routing.
|
|
|
|
if num := info.manager.NumServers(); num == 0 {
|
|
|
|
r.removeManagerFromIndex(s.Datacenter, info.manager)
|
|
|
|
close(info.shutdownCh)
|
|
|
|
delete(area.managers, s.Datacenter)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-03-14 23:39:00 +00:00
|
|
|
// FailServer should be called whenever a server is failed in an area. This
|
|
|
|
// is typically hooked into the Serf event handler area for this area. We will
|
|
|
|
// immediately shift traffic away from this server, but it will remain in the
|
|
|
|
// list of servers.
|
2017-07-06 10:48:37 +00:00
|
|
|
func (r *Router) FailServer(areaID types.AreaID, s *metadata.Server) error {
|
2017-03-14 01:54:34 +00:00
|
|
|
r.RLock()
|
|
|
|
defer r.RUnlock()
|
|
|
|
|
|
|
|
area, ok := r.areas[areaID]
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("area ID %q does not exist", areaID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the manager has already been removed we just quietly exit. This
|
|
|
|
// can get called by Serf events, so the timing isn't totally
|
|
|
|
// deterministic.
|
|
|
|
info, ok := area.managers[s.Datacenter]
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
info.manager.NotifyFailedServer(s)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-03-14 23:39:00 +00:00
|
|
|
// FindRoute returns a healthy server with a route to the given datacenter. The
|
|
|
|
// Boolean return parameter will indicate if a server was available. In some
|
|
|
|
// cases this may return a best-effort unhealthy server that can be used for a
|
|
|
|
// connection attempt. If any problem occurs with the given server, the caller
|
|
|
|
// should feed that back to the manager associated with the server, which is
|
2018-03-19 16:56:00 +00:00
|
|
|
// also returned, by calling NotifyFailedServer().
|
2017-07-06 10:48:37 +00:00
|
|
|
func (r *Router) FindRoute(datacenter string) (*Manager, *metadata.Server, bool) {
|
2017-03-15 15:08:37 +00:00
|
|
|
return r.routeFn(datacenter)
|
|
|
|
}
|
|
|
|
|
2020-08-27 15:23:52 +00:00
|
|
|
// FindLANRoute returns a healthy server within the local datacenter. In some
|
|
|
|
// cases this may return a best-effort unhealthy server that can be used for a
|
|
|
|
// connection attempt. If any problem occurs with the given server, the caller
|
|
|
|
// should feed that back to the manager associated with the server, which is
|
|
|
|
// also returned, by calling NotifyFailedServer().
|
|
|
|
func (r *Router) FindLANRoute() (*Manager, *metadata.Server) {
|
|
|
|
mgr := r.GetLANManager()
|
|
|
|
|
|
|
|
if mgr == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return mgr, mgr.FindServer()
|
|
|
|
}
|
|
|
|
|
|
|
|
// FindLANServer will look for a server in the local datacenter.
|
|
|
|
// This function may return a nil value if no server is available.
|
|
|
|
func (r *Router) FindLANServer() *metadata.Server {
|
|
|
|
_, srv := r.FindLANRoute()
|
|
|
|
return srv
|
|
|
|
}
|
|
|
|
|
2017-03-15 15:08:37 +00:00
|
|
|
// findDirectRoute looks for a route to the given datacenter if it's directly
|
|
|
|
// adjacent to the server.
|
2017-07-06 10:48:37 +00:00
|
|
|
func (r *Router) findDirectRoute(datacenter string) (*Manager, *metadata.Server, bool) {
|
2017-03-14 23:39:00 +00:00
|
|
|
r.RLock()
|
|
|
|
defer r.RUnlock()
|
|
|
|
|
|
|
|
// Get the list of managers for this datacenter. This will usually just
|
|
|
|
// have one entry, but it's possible to have a user-defined area + WAN.
|
|
|
|
managers, ok := r.managers[datacenter]
|
|
|
|
if !ok {
|
|
|
|
return nil, nil, false
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try each manager until we get a server.
|
|
|
|
for _, manager := range managers {
|
2017-03-15 00:47:37 +00:00
|
|
|
if manager.IsOffline() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-03-14 23:39:00 +00:00
|
|
|
if s := manager.FindServer(); s != nil {
|
|
|
|
return manager, s, true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Didn't find a route (even via an unhealthy server).
|
|
|
|
return nil, nil, false
|
|
|
|
}
|
|
|
|
|
2020-01-29 18:21:38 +00:00
|
|
|
// CheckServers returns thwo things
|
|
|
|
// 1. bool to indicate whether any servers were processed
|
|
|
|
// 2. error if any propagated from the fn
|
|
|
|
//
|
|
|
|
// The fn called should return a bool indicating whether checks should continue and an error
|
|
|
|
// If an error is returned then checks will stop immediately
|
|
|
|
func (r *Router) CheckServers(dc string, fn func(srv *metadata.Server) bool) {
|
|
|
|
r.RLock()
|
|
|
|
defer r.RUnlock()
|
|
|
|
|
|
|
|
managers, ok := r.managers[dc]
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, m := range managers {
|
|
|
|
if !m.checkServers(fn) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-14 23:39:00 +00:00
|
|
|
// GetDatacenters returns a list of datacenters known to the router, sorted by
|
|
|
|
// name.
|
2017-03-14 01:54:34 +00:00
|
|
|
func (r *Router) GetDatacenters() []string {
|
|
|
|
r.RLock()
|
|
|
|
defer r.RUnlock()
|
|
|
|
|
|
|
|
dcs := make([]string, 0, len(r.managers))
|
2017-04-20 18:42:22 +00:00
|
|
|
for dc := range r.managers {
|
2017-03-14 01:54:34 +00:00
|
|
|
dcs = append(dcs, dc)
|
|
|
|
}
|
2017-03-14 05:56:24 +00:00
|
|
|
|
|
|
|
sort.Strings(dcs)
|
2017-03-14 01:54:34 +00:00
|
|
|
return dcs
|
|
|
|
}
|
|
|
|
|
2020-08-11 11:35:48 +00:00
|
|
|
// GetRemoteDatacenters returns a list of remote datacenters known to the router, sorted by
|
|
|
|
// name.
|
|
|
|
func (r *Router) GetRemoteDatacenters(local string) []string {
|
|
|
|
r.RLock()
|
|
|
|
defer r.RUnlock()
|
|
|
|
|
|
|
|
dcs := make([]string, 0, len(r.managers))
|
|
|
|
for dc := range r.managers {
|
|
|
|
if dc == local {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
dcs = append(dcs, dc)
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Strings(dcs)
|
|
|
|
return dcs
|
|
|
|
}
|
|
|
|
|
2019-09-03 15:46:24 +00:00
|
|
|
// HasDatacenter checks whether dc is defined in WAN
|
|
|
|
func (r *Router) HasDatacenter(dc string) bool {
|
|
|
|
r.RLock()
|
|
|
|
defer r.RUnlock()
|
|
|
|
_, ok := r.managers[dc]
|
|
|
|
return ok
|
|
|
|
}
|
|
|
|
|
2020-08-27 15:23:52 +00:00
|
|
|
// GetLANManager returns the Manager for the LAN area and the local datacenter
|
|
|
|
func (r *Router) GetLANManager() *Manager {
|
|
|
|
r.RLock()
|
|
|
|
defer r.RUnlock()
|
|
|
|
|
|
|
|
area, ok := r.areas[types.AreaLAN]
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
managerInfo, ok := area.managers[r.localDatacenter]
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return managerInfo.manager
|
|
|
|
}
|
|
|
|
|
2017-03-14 05:56:24 +00:00
|
|
|
// datacenterSorter takes a list of DC names and a parallel vector of distances
|
|
|
|
// and implements sort.Interface, keeping both structures coherent and sorting
|
|
|
|
// by distance.
|
|
|
|
type datacenterSorter struct {
|
|
|
|
Names []string
|
|
|
|
Vec []float64
|
|
|
|
}
|
|
|
|
|
|
|
|
// See sort.Interface.
|
|
|
|
func (n *datacenterSorter) Len() int {
|
|
|
|
return len(n.Names)
|
|
|
|
}
|
|
|
|
|
|
|
|
// See sort.Interface.
|
|
|
|
func (n *datacenterSorter) Swap(i, j int) {
|
|
|
|
n.Names[i], n.Names[j] = n.Names[j], n.Names[i]
|
|
|
|
n.Vec[i], n.Vec[j] = n.Vec[j], n.Vec[i]
|
|
|
|
}
|
|
|
|
|
|
|
|
// See sort.Interface.
|
|
|
|
func (n *datacenterSorter) Less(i, j int) bool {
|
|
|
|
return n.Vec[i] < n.Vec[j]
|
|
|
|
}
|
|
|
|
|
2018-03-19 16:56:00 +00:00
|
|
|
// GetDatacentersByDistance returns a list of datacenters known to the router,
|
2017-03-14 23:39:00 +00:00
|
|
|
// sorted by median RTT from this server to the servers in each datacenter. If
|
|
|
|
// there are multiple areas that reach a given datacenter, this will use the
|
|
|
|
// lowest RTT for the sort.
|
2017-03-14 05:56:24 +00:00
|
|
|
func (r *Router) GetDatacentersByDistance() ([]string, error) {
|
|
|
|
r.RLock()
|
|
|
|
defer r.RUnlock()
|
|
|
|
|
2017-03-14 06:29:58 +00:00
|
|
|
// Go through each area and aggregate the median RTT from the current
|
|
|
|
// server to the other servers in each datacenter.
|
2017-03-14 05:56:24 +00:00
|
|
|
dcs := make(map[string]float64)
|
|
|
|
for areaID, info := range r.areas {
|
|
|
|
index := make(map[string][]float64)
|
|
|
|
coord, err := info.cluster.GetCoordinate()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, m := range info.cluster.Members() {
|
2017-07-06 10:48:37 +00:00
|
|
|
ok, parts := metadata.IsConsulServer(m)
|
2017-03-14 05:56:24 +00:00
|
|
|
if !ok {
|
2020-09-15 09:47:10 +00:00
|
|
|
if areaID != types.AreaLAN {
|
|
|
|
r.logger.Warn("Non-server in server-only area",
|
|
|
|
"non_server", m.Name,
|
|
|
|
"area", areaID,
|
|
|
|
"func", "GetDatacentersByDistance",
|
|
|
|
)
|
|
|
|
}
|
2017-03-14 05:56:24 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-10-09 03:16:00 +00:00
|
|
|
if m.Status == serf.StatusLeft {
|
2020-01-28 23:50:41 +00:00
|
|
|
r.logger.Debug("server in area left, skipping",
|
|
|
|
"server", m.Name,
|
|
|
|
"area", areaID,
|
2020-09-15 09:47:10 +00:00
|
|
|
"func", "GetDatacentersByDistance",
|
2020-01-28 23:50:41 +00:00
|
|
|
)
|
2019-10-09 03:16:00 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-03-14 05:56:24 +00:00
|
|
|
existing := index[parts.Datacenter]
|
|
|
|
if parts.Datacenter == r.localDatacenter {
|
|
|
|
// Everything in the local datacenter looks like zero RTT.
|
|
|
|
index[parts.Datacenter] = append(existing, 0.0)
|
|
|
|
} else {
|
|
|
|
// It's OK to get a nil coordinate back, ComputeDistance
|
|
|
|
// will put the RTT at positive infinity.
|
|
|
|
other, _ := info.cluster.GetCachedCoordinate(parts.Name)
|
2024-05-07 15:30:49 +00:00
|
|
|
rtt := librtt.ComputeDistance(coord, other)
|
2017-03-14 05:56:24 +00:00
|
|
|
index[parts.Datacenter] = append(existing, rtt)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compute the median RTT between this server and the servers
|
|
|
|
// in each datacenter. We accumulate the lowest RTT to each DC
|
|
|
|
// in the master map, since a given DC might appear in multiple
|
|
|
|
// areas.
|
|
|
|
for dc, rtts := range index {
|
2017-03-14 06:29:58 +00:00
|
|
|
sort.Float64s(rtts)
|
|
|
|
rtt := rtts[len(rtts)/2]
|
2017-03-14 05:56:24 +00:00
|
|
|
|
|
|
|
current, ok := dcs[dc]
|
|
|
|
if !ok || (ok && rtt < current) {
|
|
|
|
dcs[dc] = rtt
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// First sort by DC name, since we do a stable sort later.
|
|
|
|
names := make([]string, 0, len(dcs))
|
2017-04-20 18:42:22 +00:00
|
|
|
for dc := range dcs {
|
2017-03-14 05:56:24 +00:00
|
|
|
names = append(names, dc)
|
|
|
|
}
|
|
|
|
sort.Strings(names)
|
|
|
|
|
|
|
|
// Then stable sort by median RTT.
|
2017-03-14 06:29:58 +00:00
|
|
|
rtts := make([]float64, 0, len(dcs))
|
2017-03-14 05:56:24 +00:00
|
|
|
for _, dc := range names {
|
2017-03-14 06:29:58 +00:00
|
|
|
rtts = append(rtts, dcs[dc])
|
2017-03-14 05:56:24 +00:00
|
|
|
}
|
2017-03-14 06:29:58 +00:00
|
|
|
sort.Stable(&datacenterSorter{names, rtts})
|
2017-03-14 05:56:24 +00:00
|
|
|
return names, nil
|
|
|
|
}
|
|
|
|
|
2017-03-14 23:39:00 +00:00
|
|
|
// GetDatacenterMaps returns a structure with the raw network coordinates of
|
|
|
|
// each known server, organized by datacenter and network area.
|
2017-03-14 01:54:34 +00:00
|
|
|
func (r *Router) GetDatacenterMaps() ([]structs.DatacenterMap, error) {
|
|
|
|
r.RLock()
|
|
|
|
defer r.RUnlock()
|
|
|
|
|
|
|
|
var maps []structs.DatacenterMap
|
|
|
|
for areaID, info := range r.areas {
|
|
|
|
index := make(map[string]structs.Coordinates)
|
|
|
|
for _, m := range info.cluster.Members() {
|
2017-07-06 10:48:37 +00:00
|
|
|
ok, parts := metadata.IsConsulServer(m)
|
2017-03-14 01:54:34 +00:00
|
|
|
if !ok {
|
2020-09-15 09:47:10 +00:00
|
|
|
if areaID != types.AreaLAN {
|
|
|
|
r.logger.Warn("Non-server in server-only area",
|
|
|
|
"non_server", m.Name,
|
|
|
|
"area", areaID,
|
|
|
|
"func", "GetDatacenterMaps",
|
|
|
|
)
|
|
|
|
}
|
2017-03-14 01:54:34 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-10-09 03:16:00 +00:00
|
|
|
if m.Status == serf.StatusLeft {
|
2020-01-28 23:50:41 +00:00
|
|
|
r.logger.Debug("server in area left, skipping",
|
|
|
|
"server", m.Name,
|
|
|
|
"area", areaID,
|
2020-09-15 09:47:10 +00:00
|
|
|
"func", "GetDatacenterMaps",
|
2020-01-28 23:50:41 +00:00
|
|
|
)
|
2019-10-09 03:16:00 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-03-14 01:54:34 +00:00
|
|
|
coord, ok := info.cluster.GetCachedCoordinate(parts.Name)
|
|
|
|
if ok {
|
|
|
|
entry := &structs.Coordinate{
|
|
|
|
Node: parts.Name,
|
|
|
|
Coord: coord,
|
|
|
|
}
|
|
|
|
existing := index[parts.Datacenter]
|
|
|
|
index[parts.Datacenter] = append(existing, entry)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for dc, coords := range index {
|
|
|
|
entry := structs.DatacenterMap{
|
|
|
|
Datacenter: dc,
|
|
|
|
AreaID: areaID,
|
|
|
|
Coordinates: coords,
|
|
|
|
}
|
|
|
|
maps = append(maps, entry)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return maps, nil
|
|
|
|
}
|