This commit is contained in:
Sarah Alsmiller 2022-07-14 11:24:39 -05:00
commit 1358609742
273 changed files with 11448 additions and 8964 deletions

4
.changelog/13677.txt Normal file
View File

@ -0,0 +1,4 @@
```release-note:feature
cli: A new flag for config delete to delete a config entry in a
valid config file, e.g., config delete -filename intention-allow.hcl
```

3
.changelog/13686.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:enhancement
ui: Add new CopyableCode component and use it in certain pre-existing areas
```

3
.changelog/13687.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
server: broadcast the public grpc port using lan serf and update the consul service in the catalog with the same data
```

3
.changelog/13699.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
xds: Fix a bug where terminating gateway upstream clusters weren't configured properly when the service protocol was `http2`.
```

View File

@ -34,7 +34,7 @@ references:
ember: &EMBER_IMAGE docker.mirror.hashicorp.services/circleci/node:14-browsers
ubuntu: &UBUNTU_CI_IMAGE ubuntu-2004:202201-02
cache:
yarn: &YARN_CACHE_KEY consul-ui-v8-{{ checksum "ui/yarn.lock" }}
yarn: &YARN_CACHE_KEY consul-ui-v9-{{ checksum "ui/yarn.lock" }}
steps:
install-gotestsum: &install-gotestsum
@ -241,7 +241,9 @@ jobs:
- run:
name: Install protobuf
command: make proto-tools
- run:
name: "Protobuf Format"
command: make proto-format
- run:
command: make --always-make proto
- run: |
@ -249,7 +251,7 @@ jobs:
echo "Generated code was not updated correctly"
exit 1
fi
- run:
- run:
name: "Protobuf Lint"
command: make proto-lint

View File

@ -1,3 +1,41 @@
## 1.12.3 (July 13, 2022)
IMPROVEMENTS:
* Support Vault namespaces in Connect CA by adding RootPKINamespace and
IntermediatePKINamespace fields to the config. [[GH-12904](https://github.com/hashicorp/consul/issues/12904)]
* connect: Update Envoy support matrix to latest patch releases (1.22.2, 1.21.3, 1.20.4, 1.19.5) [[GH-13431](https://github.com/hashicorp/consul/issues/13431)]
* dns: Added support for specifying admin partition in node lookups. [[GH-13421](https://github.com/hashicorp/consul/issues/13421)]
* telemetry: Added a `consul.server.isLeader` metric to track if a server is a leader or not. [[GH-13304](https://github.com/hashicorp/consul/issues/13304)]
BUG FIXES:
* agent: Fixed a bug in HTTP handlers where URLs were being decoded twice [[GH-13256](https://github.com/hashicorp/consul/issues/13256)]
* deps: Update go-grpc/grpc, resolving connection memory leak [[GH-13051](https://github.com/hashicorp/consul/issues/13051)]
* fix a bug that caused an error when creating `grpc` or `http2` ingress gateway listeners with multiple services [[GH-13127](https://github.com/hashicorp/consul/issues/13127)]
* ui: Fix incorrect text on certain page empty states [[GH-13409](https://github.com/hashicorp/consul/issues/13409)]
* xds: Fix a bug that resulted in Lambda services not using the payload-passthrough option as expected. [[GH-13607](https://github.com/hashicorp/consul/issues/13607)]
* xds: Fix a bug where terminating gateway upstream clusters weren't configured properly when the service protocol was `http2`. [[GH-13699](https://github.com/hashicorp/consul/issues/13699)]
## 1.11.7 (July 13, 2022)
IMPROVEMENTS:
* connect: Update supported Envoy versions to 1.20.4, 1.19.5, 1.18.6, 1.17.4 [[GH-13434](https://github.com/hashicorp/consul/issues/13434)]
BUG FIXES:
* agent: Fixed a bug in HTTP handlers where URLs were being decoded twice [[GH-13265](https://github.com/hashicorp/consul/issues/13265)]
* fix a bug that caused an error when creating `grpc` or `http2` ingress gateway listeners with multiple services [[GH-13127](https://github.com/hashicorp/consul/issues/13127)]
* xds: Fix a bug where terminating gateway upstream clusters weren't configured properly when the service protocol was `http2`. [[GH-13699](https://github.com/hashicorp/consul/issues/13699)]
## 1.10.12 (July 13, 2022)
BUG FIXES:
* agent: Fixed a bug in HTTP handlers where URLs were being decoded twice [[GH-13264](https://github.com/hashicorp/consul/issues/13264)]
* fix a bug that caused an error when creating `grpc` or `http2` ingress gateway listeners with multiple services [[GH-13127](https://github.com/hashicorp/consul/issues/13127)]
## 1.13.0-alpha2 (June 21, 2022)
IMPROVEMENTS:

View File

@ -376,6 +376,18 @@ proto-format: proto-tools
proto-lint: proto-tools
@buf lint --config proto/buf.yaml --path proto
@buf lint --config proto-public/buf.yaml --path proto-public
@for fn in $$(find proto -name '*.proto'); do \
if [[ "$$fn" = "proto/pbsubscribe/subscribe.proto" ]]; then \
continue ; \
elif [[ "$$fn" = "proto/pbpartition/partition.proto" ]]; then \
continue ; \
fi ; \
pkg=$$(grep "^package " "$$fn" | sed 's/^package \(.*\);/\1/'); \
if [[ "$$pkg" != hashicorp.consul.internal.* ]]; then \
echo "ERROR: $$fn: is missing 'hashicorp.consul.internal' package prefix: $$pkg" >&2; \
exit 1; \
fi \
done
# utility to echo a makefile variable (i.e. 'make print-PROTOC_VERSION')
print-% : ; @echo $($*)

View File

@ -38,7 +38,7 @@ import (
"github.com/hashicorp/consul/agent/config"
"github.com/hashicorp/consul/agent/consul"
"github.com/hashicorp/consul/agent/dns"
publicgrpc "github.com/hashicorp/consul/agent/grpc/public"
external "github.com/hashicorp/consul/agent/grpc-external"
"github.com/hashicorp/consul/agent/local"
"github.com/hashicorp/consul/agent/proxycfg"
proxycfgglue "github.com/hashicorp/consul/agent/proxycfg-glue"
@ -213,9 +213,9 @@ type Agent struct {
// depending on the configuration
delegate delegate
// publicGRPCServer is the gRPC server exposed on the dedicated gRPC port (as
// externalGRPCServer is the gRPC server exposed on the dedicated gRPC port (as
// opposed to the multiplexed "server" port).
publicGRPCServer *grpc.Server
externalGRPCServer *grpc.Server
// state stores a local representation of the node,
// services and checks. Used for anti-entropy.
@ -539,7 +539,7 @@ func (a *Agent) Start(ctx context.Context) error {
// This needs to happen after the initial auto-config is loaded, because TLS
// can only be configured on the gRPC server at the point of creation.
a.buildPublicGRPCServer()
a.buildExternalGRPCServer()
if err := a.startLicenseManager(ctx); err != nil {
return err
@ -578,7 +578,7 @@ func (a *Agent) Start(ctx context.Context) error {
// Setup either the client or the server.
if c.ServerMode {
server, err := consul.NewServer(consulCfg, a.baseDeps.Deps, a.publicGRPCServer)
server, err := consul.NewServer(consulCfg, a.baseDeps.Deps, a.externalGRPCServer)
if err != nil {
return fmt.Errorf("Failed to start Consul server: %v", err)
}
@ -760,13 +760,13 @@ func (a *Agent) Failed() <-chan struct{} {
return a.apiServers.failed
}
func (a *Agent) buildPublicGRPCServer() {
func (a *Agent) buildExternalGRPCServer() {
// TLS is only enabled on the gRPC server if there's an HTTPS port configured.
var tls *tlsutil.Configurator
if a.config.HTTPSPort > 0 {
tls = a.tlsConfigurator
}
a.publicGRPCServer = publicgrpc.NewServer(a.logger.Named("grpc.public"), tls)
a.externalGRPCServer = external.NewServer(a.logger.Named("grpc.external"), tls)
}
func (a *Agent) listenAndServeGRPC() error {
@ -803,7 +803,7 @@ func (a *Agent) listenAndServeGRPC() error {
},
a,
)
a.xdsServer.Register(a.publicGRPCServer)
a.xdsServer.Register(a.externalGRPCServer)
ln, err := a.startListeners(a.config.GRPCAddrs)
if err != nil {
@ -816,7 +816,7 @@ func (a *Agent) listenAndServeGRPC() error {
"address", innerL.Addr().String(),
"network", innerL.Addr().Network(),
)
err := a.publicGRPCServer.Serve(innerL)
err := a.externalGRPCServer.Serve(innerL)
if err != nil {
a.logger.Error("gRPC server failed", "error", err)
}
@ -1193,6 +1193,8 @@ func newConsulConfig(runtimeCfg *config.RuntimeConfig, logger hclog.Logger) (*co
cfg.RPCAddr = runtimeCfg.RPCBindAddr
cfg.RPCAdvertise = runtimeCfg.RPCAdvertiseAddr
cfg.GRPCPort = runtimeCfg.GRPCPort
cfg.Segment = runtimeCfg.SegmentName
if len(runtimeCfg.Segments) > 0 {
segments, err := segmentConfig(runtimeCfg)
@ -1492,7 +1494,7 @@ func (a *Agent) ShutdownAgent() error {
}
// Stop gRPC
a.publicGRPCServer.Stop()
a.externalGRPCServer.Stop()
// Stop the proxy config manager
if a.proxyConfig != nil {
@ -4111,6 +4113,8 @@ func (a *Agent) registerCache() {
a.cache.RegisterType(cachetype.TrustBundleListName, &cachetype.TrustBundles{Client: a.rpcClientPeering})
a.cache.RegisterType(cachetype.PeeredUpstreamsName, &cachetype.PeeredUpstreams{RPC: a})
a.registerEntCache()
}

View File

@ -18,8 +18,8 @@ import (
msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc"
"github.com/hashicorp/consul/agent/consul/stream"
grpc "github.com/hashicorp/consul/agent/grpc/private"
"github.com/hashicorp/consul/agent/grpc/private/resolver"
grpc "github.com/hashicorp/consul/agent/grpc-internal"
"github.com/hashicorp/consul/agent/grpc-internal/resolver"
"github.com/hashicorp/consul/agent/pool"
"github.com/hashicorp/consul/agent/router"
"github.com/hashicorp/consul/agent/rpc/middleware"

View File

@ -130,6 +130,9 @@ type Config struct {
// RPCSrcAddr is the source address for outgoing RPC connections.
RPCSrcAddr *net.TCPAddr
// GRPCPort is the port the public gRPC server listens on.
GRPCPort int
// (Enterprise-only) The network segment this agent is part of.
Segment string

View File

@ -9,7 +9,7 @@ import (
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/consul/authmethod/testauth"
"github.com/hashicorp/consul/agent/grpc/public"
external "github.com/hashicorp/consul/agent/grpc-external"
"github.com/hashicorp/consul/agent/structs"
tokenStore "github.com/hashicorp/consul/agent/token"
"github.com/hashicorp/consul/proto-public/pbacl"
@ -26,7 +26,7 @@ func TestGRPCIntegration_ConnectCA_Sign(t *testing.T) {
// correctly wiring everything up in the server by:
//
// * Starting a cluster with multiple servers.
// * Making a request to a follower's public gRPC port.
// * Making a request to a follower's external gRPC port.
// * Ensuring that the request is correctly forwarded to the leader.
// * Ensuring we get a valid certificate back (so it went through the CAManager).
server1, conn1, _ := testGRPCIntegrationServer(t, func(c *Config) {
@ -59,7 +59,7 @@ func TestGRPCIntegration_ConnectCA_Sign(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
t.Cleanup(cancel)
ctx = public.ContextWithToken(ctx, TestDefaultInitialManagementToken)
ctx = external.ContextWithToken(ctx, TestDefaultInitialManagementToken)
// This would fail if it wasn't forwarded to the leader.
rsp, err := client.Sign(ctx, &pbconnectca.SignRequest{
@ -96,7 +96,7 @@ func TestGRPCIntegration_ServerDiscovery_WatchServers(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
t.Cleanup(cancel)
ctx = public.ContextWithToken(ctx, TestDefaultInitialManagementToken)
ctx = external.ContextWithToken(ctx, TestDefaultInitialManagementToken)
serverStream, err := client.WatchServers(ctx, &pbserverdiscovery.WatchServersRequest{Wan: false})
require.NoError(t, err)

View File

@ -1069,6 +1069,11 @@ func (s *Server) handleAliveMember(member serf.Member, nodeEntMeta *acl.Enterpri
},
}
grpcPortStr := member.Tags["grpc_port"]
if v, err := strconv.Atoi(grpcPortStr); err == nil && v > 0 {
service.Meta["grpc_port"] = grpcPortStr
}
// Attempt to join the consul server
if err := s.joinConsulServer(member, parts); err != nil {
return err

View File

@ -6,7 +6,6 @@ import (
"crypto/tls"
"crypto/x509"
"fmt"
"net"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-memdb"
@ -18,12 +17,12 @@ import (
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/pool"
"github.com/hashicorp/consul/agent/rpc/peering"
"github.com/hashicorp/consul/agent/grpc-external/services/peerstream"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/logging"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/proto/pbpeerstream"
)
func (s *Server) startPeeringStreamSync(ctx context.Context) {
@ -86,7 +85,7 @@ func (s *Server) syncPeeringsAndBlock(ctx context.Context, logger hclog.Logger,
// 3. accept new stream for [D]
// 4. list peerings [A,B,C,D]
// 5. terminate []
connectedStreams := s.peeringService.ConnectedStreams()
connectedStreams := s.peerStreamServer.ConnectedStreams()
state := s.fsm.State()
@ -132,7 +131,7 @@ func (s *Server) syncPeeringsAndBlock(ctx context.Context, logger hclog.Logger,
continue
}
status, found := s.peeringService.StreamStatus(peer.ID)
status, found := s.peerStreamServer.StreamStatus(peer.ID)
// TODO(peering): If there is new peering data and a connected stream, should we tear down the stream?
// If the data in the updated token is bad, the user wouldn't know until the old servers/certs become invalid.
@ -161,7 +160,7 @@ func (s *Server) syncPeeringsAndBlock(ctx context.Context, logger hclog.Logger,
}
}
logger.Trace("checking connected streams", "streams", s.peeringService.ConnectedStreams(), "sequence_id", seq)
logger.Trace("checking connected streams", "streams", s.peerStreamServer.ConnectedStreams(), "sequence_id", seq)
// Clean up active streams of peerings that were deleted from the state store.
// TODO(peering): This is going to trigger shutting down peerings we generated a token for. Is that OK?
@ -239,7 +238,6 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, peer
logger.Trace("dialing peer", "addr", addr)
conn, err := grpc.DialContext(retryCtx, addr,
grpc.WithContextDialer(newPeerDialer(addr)),
grpc.WithBlock(),
tlsOption,
)
@ -248,24 +246,28 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, peer
}
defer conn.Close()
client := pbpeering.NewPeeringServiceClient(conn)
client := pbpeerstream.NewPeerStreamServiceClient(conn)
stream, err := client.StreamResources(retryCtx)
if err != nil {
return err
}
streamReq := peering.HandleStreamRequest{
if peer.PeerID == "" {
return fmt.Errorf("expected PeerID to be non empty; the wrong end of peering is being dialed")
}
streamReq := peerstream.HandleStreamRequest{
LocalID: peer.ID,
RemoteID: peer.PeerID,
PeerName: peer.Name,
Partition: peer.Partition,
Stream: stream,
}
err = s.peeringService.HandleStream(streamReq)
err = s.peerStreamServer.HandleStream(streamReq)
// A nil error indicates that the peering was deleted and the stream needs to be gracefully shutdown.
if err == nil {
stream.CloseSend()
s.peeringService.DrainStream(streamReq)
s.peerStreamServer.DrainStream(streamReq)
// This will cancel the retry-er context, letting us break out of this loop when we want to shut down the stream.
cancel()
@ -283,26 +285,6 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, peer
return nil
}
func newPeerDialer(peerAddr string) func(context.Context, string) (net.Conn, error) {
return func(ctx context.Context, addr string) (net.Conn, error) {
d := net.Dialer{}
conn, err := d.DialContext(ctx, "tcp", peerAddr)
if err != nil {
return nil, err
}
// TODO(peering): This is going to need to be revisited. This type uses the TLS settings configured on the agent, but
// for peering we never want mutual TLS because the client peer doesn't share its CA cert.
_, err = conn.Write([]byte{byte(pool.RPCGRPC)})
if err != nil {
conn.Close()
return nil, err
}
return conn, nil
}
}
func (s *Server) startPeeringDeferredDeletion(ctx context.Context) {
s.leaderRoutineManager.Start(ctx, peeringDeletionRoutineName, s.runPeeringDeletions)
}

View File

@ -59,7 +59,7 @@ func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) {
// S1 should not have a stream tracked for dc2 because s1 generated a token for baz, and therefore needs to wait to be dialed.
time.Sleep(1 * time.Second)
_, found := s1.peeringService.StreamStatus(token.PeerID)
_, found := s1.peerStreamServer.StreamStatus(token.PeerID)
require.False(t, found)
var (
@ -90,7 +90,7 @@ func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) {
require.NoError(t, s2.fsm.State().PeeringWrite(1000, p))
retry.Run(t, func(r *retry.R) {
status, found := s2.peeringService.StreamStatus(p.ID)
status, found := s2.peerStreamServer.StreamStatus(p.ID)
require.True(r, found)
require.True(r, status.Connected)
})
@ -105,7 +105,7 @@ func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) {
s2.logger.Trace("deleted peering for my-peer-s1")
retry.Run(t, func(r *retry.R) {
_, found := s2.peeringService.StreamStatus(p.ID)
_, found := s2.peerStreamServer.StreamStatus(p.ID)
require.False(r, found)
})
@ -186,7 +186,7 @@ func TestLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T) {
require.NoError(t, s2.fsm.State().PeeringWrite(1000, p))
retry.Run(t, func(r *retry.R) {
status, found := s2.peeringService.StreamStatus(p.ID)
status, found := s2.peerStreamServer.StreamStatus(p.ID)
require.True(r, found)
require.True(r, status.Connected)
})
@ -201,7 +201,7 @@ func TestLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T) {
s2.logger.Trace("deleted peering for my-peer-s1")
retry.Run(t, func(r *retry.R) {
_, found := s1.peeringService.StreamStatus(p.PeerID)
_, found := s1.peerStreamServer.StreamStatus(p.PeerID)
require.False(r, found)
})

View File

@ -7,51 +7,56 @@ import (
"strconv"
"sync"
"google.golang.org/grpc"
"github.com/hashicorp/consul/agent/consul/stream"
"github.com/hashicorp/consul/agent/grpc-external/services/peerstream"
"github.com/hashicorp/consul/agent/rpc/peering"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto/pbpeering"
)
type peeringBackend struct {
type PeeringBackend struct {
// TODO(peering): accept a smaller interface; maybe just funcs from the server that we actually need: DC, IsLeader, etc
srv *Server
connPool GRPCClientConner
apply *peeringApply
addr *leaderAddr
srv *Server
leaderAddrLock sync.RWMutex
leaderAddr string
}
var _ peering.Backend = (*peeringBackend)(nil)
var _ peering.Backend = (*PeeringBackend)(nil)
var _ peerstream.Backend = (*PeeringBackend)(nil)
// NewPeeringBackend returns a peering.Backend implementation that is bound to the given server.
func NewPeeringBackend(srv *Server, connPool GRPCClientConner) peering.Backend {
return &peeringBackend{
srv: srv,
connPool: connPool,
apply: &peeringApply{srv: srv},
addr: &leaderAddr{},
func NewPeeringBackend(srv *Server) *PeeringBackend {
return &PeeringBackend{
srv: srv,
}
}
// Forward should not be used to initiate forwarding over bidirectional streams
func (b *peeringBackend) Forward(info structs.RPCInfo, f func(*grpc.ClientConn) error) (handled bool, err error) {
// Only forward the request if the dc in the request matches the server's datacenter.
if info.RequestDatacenter() != "" && info.RequestDatacenter() != b.srv.config.Datacenter {
return false, fmt.Errorf("requests to generate peering tokens cannot be forwarded to remote datacenters")
}
return b.srv.ForwardGRPC(b.connPool, info, f)
// SetLeaderAddress is called on a raft.LeaderObservation in a go routine
// in the consul server; see trackLeaderChanges()
func (b *PeeringBackend) SetLeaderAddress(addr string) {
b.leaderAddrLock.Lock()
b.leaderAddr = addr
b.leaderAddrLock.Unlock()
}
// GetLeaderAddress provides the best hint for the current address of the
// leader. There is no guarantee that this is the actual address of the
// leader.
func (b *PeeringBackend) GetLeaderAddress() string {
b.leaderAddrLock.RLock()
defer b.leaderAddrLock.RUnlock()
return b.leaderAddr
}
// GetAgentCACertificates gets the server's raw CA data from its TLS Configurator.
func (b *peeringBackend) GetAgentCACertificates() ([]string, error) {
func (b *PeeringBackend) GetAgentCACertificates() ([]string, error) {
// TODO(peering): handle empty CA pems
return b.srv.tlsConfigurator.ManualCAPems(), nil
}
// GetServerAddresses looks up server node addresses from the state store.
func (b *peeringBackend) GetServerAddresses() ([]string, error) {
func (b *PeeringBackend) GetServerAddresses() ([]string, error) {
state := b.srv.fsm.State()
_, nodes, err := state.ServiceNodes(nil, "consul", structs.DefaultEnterpriseMetaInDefaultPartition(), structs.DefaultPeerKeyword)
if err != nil {
@ -59,19 +64,26 @@ func (b *peeringBackend) GetServerAddresses() ([]string, error) {
}
var addrs []string
for _, node := range nodes {
addrs = append(addrs, node.Address+":"+strconv.Itoa(node.ServicePort))
grpcPortStr := node.ServiceMeta["grpc_port"]
if v, err := strconv.Atoi(grpcPortStr); err != nil || v < 1 {
continue // skip server that isn't exporting public gRPC properly
}
addrs = append(addrs, node.Address+":"+grpcPortStr)
}
if len(addrs) == 0 {
return nil, fmt.Errorf("a grpc bind port must be specified in the configuration for all servers")
}
return addrs, nil
}
// GetServerName returns the SNI to be returned in the peering token data which
// will be used by peers when establishing peering connections over TLS.
func (b *peeringBackend) GetServerName() string {
func (b *PeeringBackend) GetServerName() string {
return b.srv.tlsConfigurator.ServerSNI(b.srv.config.Datacenter, "")
}
// EncodeToken encodes a peering token as a bas64-encoded representation of JSON (for now).
func (b *peeringBackend) EncodeToken(tok *structs.PeeringToken) ([]byte, error) {
func (b *PeeringBackend) EncodeToken(tok *structs.PeeringToken) ([]byte, error) {
jsonToken, err := json.Marshal(tok)
if err != nil {
return nil, fmt.Errorf("failed to marshal token: %w", err)
@ -80,7 +92,7 @@ func (b *peeringBackend) EncodeToken(tok *structs.PeeringToken) ([]byte, error)
}
// DecodeToken decodes a peering token from a base64-encoded JSON byte array (for now).
func (b *peeringBackend) DecodeToken(tokRaw []byte) (*structs.PeeringToken, error) {
func (b *PeeringBackend) DecodeToken(tokRaw []byte) (*structs.PeeringToken, error) {
tokJSONRaw, err := base64.StdEncoding.DecodeString(string(tokRaw))
if err != nil {
return nil, fmt.Errorf("failed to decode token: %w", err)
@ -92,59 +104,28 @@ func (b *peeringBackend) DecodeToken(tokRaw []byte) (*structs.PeeringToken, erro
return &tok, nil
}
func (s peeringBackend) Subscribe(req *stream.SubscribeRequest) (*stream.Subscription, error) {
func (s *PeeringBackend) Subscribe(req *stream.SubscribeRequest) (*stream.Subscription, error) {
return s.srv.publisher.Subscribe(req)
}
func (b *peeringBackend) Store() peering.Store {
func (b *PeeringBackend) Store() peering.Store {
return b.srv.fsm.State()
}
func (b *peeringBackend) Apply() peering.Apply {
return b.apply
}
func (b *peeringBackend) LeaderAddress() peering.LeaderAddress {
return b.addr
}
func (b *peeringBackend) EnterpriseCheckPartitions(partition string) error {
func (b *PeeringBackend) EnterpriseCheckPartitions(partition string) error {
return b.enterpriseCheckPartitions(partition)
}
func (b *peeringBackend) EnterpriseCheckNamespaces(namespace string) error {
func (b *PeeringBackend) EnterpriseCheckNamespaces(namespace string) error {
return b.enterpriseCheckNamespaces(namespace)
}
func (b *peeringBackend) IsLeader() bool {
func (b *PeeringBackend) IsLeader() bool {
return b.srv.IsLeader()
}
type leaderAddr struct {
lock sync.RWMutex
leaderAddr string
}
func (m *leaderAddr) Set(addr string) {
m.lock.Lock()
defer m.lock.Unlock()
m.leaderAddr = addr
}
func (m *leaderAddr) Get() string {
m.lock.RLock()
defer m.lock.RUnlock()
return m.leaderAddr
}
type peeringApply struct {
srv *Server
}
func (a *peeringApply) CheckPeeringUUID(id string) (bool, error) {
state := a.srv.fsm.State()
func (b *PeeringBackend) CheckPeeringUUID(id string) (bool, error) {
state := b.srv.fsm.State()
if _, existing, err := state.PeeringReadByID(nil, id); err != nil {
return false, err
} else if existing != nil {
@ -154,31 +135,28 @@ func (a *peeringApply) CheckPeeringUUID(id string) (bool, error) {
return true, nil
}
func (a *peeringApply) PeeringWrite(req *pbpeering.PeeringWriteRequest) error {
_, err := a.srv.raftApplyProtobuf(structs.PeeringWriteType, req)
func (b *PeeringBackend) PeeringWrite(req *pbpeering.PeeringWriteRequest) error {
_, err := b.srv.raftApplyProtobuf(structs.PeeringWriteType, req)
return err
}
// TODO(peering): This needs RPC metrics interceptor since it's not triggered by an RPC.
func (a *peeringApply) PeeringTerminateByID(req *pbpeering.PeeringTerminateByIDRequest) error {
_, err := a.srv.raftApplyProtobuf(structs.PeeringTerminateByIDType, req)
func (b *PeeringBackend) PeeringTerminateByID(req *pbpeering.PeeringTerminateByIDRequest) error {
_, err := b.srv.raftApplyProtobuf(structs.PeeringTerminateByIDType, req)
return err
}
func (a *peeringApply) PeeringTrustBundleWrite(req *pbpeering.PeeringTrustBundleWriteRequest) error {
_, err := a.srv.raftApplyProtobuf(structs.PeeringTrustBundleWriteType, req)
func (b *PeeringBackend) PeeringTrustBundleWrite(req *pbpeering.PeeringTrustBundleWriteRequest) error {
_, err := b.srv.raftApplyProtobuf(structs.PeeringTrustBundleWriteType, req)
return err
}
func (a *peeringApply) CatalogRegister(req *structs.RegisterRequest) error {
_, err := a.srv.leaderRaftApply("Catalog.Register", structs.RegisterRequestType, req)
func (b *PeeringBackend) CatalogRegister(req *structs.RegisterRequest) error {
_, err := b.srv.leaderRaftApply("Catalog.Register", structs.RegisterRequestType, req)
return err
}
func (a *peeringApply) CatalogDeregister(req *structs.DeregisterRequest) error {
_, err := a.srv.leaderRaftApply("Catalog.Deregister", structs.DeregisterRequestType, req)
func (b *PeeringBackend) CatalogDeregister(req *structs.DeregisterRequest) error {
_, err := b.srv.leaderRaftApply("Catalog.Deregister", structs.DeregisterRequestType, req)
return err
}
var _ peering.Apply = (*peeringApply)(nil)
var _ peering.LeaderAddress = (*leaderAddr)(nil)

View File

@ -8,14 +8,14 @@ import (
"strings"
)
func (b *peeringBackend) enterpriseCheckPartitions(partition string) error {
func (b *PeeringBackend) enterpriseCheckPartitions(partition string) error {
if partition == "" || strings.EqualFold(partition, "default") {
return nil
}
return fmt.Errorf("Partitions are a Consul Enterprise feature")
}
func (b *peeringBackend) enterpriseCheckNamespaces(namespace string) error {
func (b *PeeringBackend) enterpriseCheckNamespaces(namespace string) error {
if namespace == "" || strings.EqualFold(namespace, "default") {
return nil
}

View File

@ -32,7 +32,7 @@ import (
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/consul/state"
agent_grpc "github.com/hashicorp/consul/agent/grpc/private"
agent_grpc "github.com/hashicorp/consul/agent/grpc-internal"
"github.com/hashicorp/consul/agent/pool"
"github.com/hashicorp/consul/agent/structs"
tokenStore "github.com/hashicorp/consul/agent/token"

View File

@ -39,12 +39,13 @@ import (
"github.com/hashicorp/consul/agent/consul/stream"
"github.com/hashicorp/consul/agent/consul/usagemetrics"
"github.com/hashicorp/consul/agent/consul/wanfed"
agentgrpc "github.com/hashicorp/consul/agent/grpc/private"
"github.com/hashicorp/consul/agent/grpc/private/services/subscribe"
aclgrpc "github.com/hashicorp/consul/agent/grpc/public/services/acl"
"github.com/hashicorp/consul/agent/grpc/public/services/connectca"
"github.com/hashicorp/consul/agent/grpc/public/services/dataplane"
"github.com/hashicorp/consul/agent/grpc/public/services/serverdiscovery"
aclgrpc "github.com/hashicorp/consul/agent/grpc-external/services/acl"
"github.com/hashicorp/consul/agent/grpc-external/services/connectca"
"github.com/hashicorp/consul/agent/grpc-external/services/dataplane"
"github.com/hashicorp/consul/agent/grpc-external/services/peerstream"
"github.com/hashicorp/consul/agent/grpc-external/services/serverdiscovery"
agentgrpc "github.com/hashicorp/consul/agent/grpc-internal"
"github.com/hashicorp/consul/agent/grpc-internal/services/subscribe"
"github.com/hashicorp/consul/agent/metadata"
"github.com/hashicorp/consul/agent/pool"
"github.com/hashicorp/consul/agent/router"
@ -55,7 +56,6 @@ import (
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/lib/routine"
"github.com/hashicorp/consul/logging"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/proto/pbsubscribe"
"github.com/hashicorp/consul/tlsutil"
"github.com/hashicorp/consul/types"
@ -242,19 +242,19 @@ type Server struct {
// is only ever closed.
leaveCh chan struct{}
// publicACLServer serves the ACL service exposed on the public gRPC port.
// It is also exposed on the private multiplexed "server" port to enable
// externalACLServer serves the ACL service exposed on the external gRPC port.
// It is also exposed on the internal multiplexed "server" port to enable
// RPC forwarding.
publicACLServer *aclgrpc.Server
externalACLServer *aclgrpc.Server
// publicConnectCAServer serves the Connect CA service exposed on the public
// gRPC port. It is also exposed on the private multiplexed "server" port to
// externalConnectCAServer serves the Connect CA service exposed on the external
// gRPC port. It is also exposed on the internal multiplexed "server" port to
// enable RPC forwarding.
publicConnectCAServer *connectca.Server
externalConnectCAServer *connectca.Server
// publicGRPCServer is the gRPC server exposed on the dedicated gRPC port, as
// externalGRPCServer is the gRPC server exposed on the dedicated gRPC port, as
// opposed to the multiplexed "server" port which is served by grpcHandler.
publicGRPCServer *grpc.Server
externalGRPCServer *grpc.Server
// router is used to map out Consul servers in the WAN and in Consul
// Enterprise user-defined areas.
@ -364,8 +364,13 @@ type Server struct {
// this into the Deps struct and created it much earlier on.
publisher *stream.EventPublisher
// peering is a service used to handle peering streams.
peeringService *peering.Service
// peeringBackend is shared between the external and internal gRPC services for peering
peeringBackend *PeeringBackend
// peerStreamServer is a server used to handle peering streams
peerStreamServer *peerstream.Server
peeringServer *peering.Server
peerStreamTracker *peerstream.Tracker
// embedded struct to hold all the enterprise specific data
EnterpriseServer
@ -379,7 +384,7 @@ type connHandler interface {
// NewServer is used to construct a new Consul server from the configuration
// and extra options, potentially returning an error.
func NewServer(config *Config, flat Deps, publicGRPCServer *grpc.Server) (*Server, error) {
func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server) (*Server, error) {
logger := flat.Logger
if err := config.CheckProtocolVersion(); err != nil {
return nil, err
@ -425,7 +430,7 @@ func NewServer(config *Config, flat Deps, publicGRPCServer *grpc.Server) (*Serve
reconcileCh: make(chan serf.Member, reconcileChSize),
router: flat.Router,
tlsConfigurator: flat.TLSConfigurator,
publicGRPCServer: publicGRPCServer,
externalGRPCServer: externalGRPCServer,
reassertLeaderCh: make(chan chan error),
sessionTimers: NewSessionTimers(),
tombstoneGC: gc,
@ -672,8 +677,8 @@ func NewServer(config *Config, flat Deps, publicGRPCServer *grpc.Server) (*Serve
s.overviewManager = NewOverviewManager(s.logger, s.fsm, s.config.MetricsReportingInterval)
go s.overviewManager.Run(&lib.StopChannelContext{StopCh: s.shutdownCh})
// Initialize public gRPC server - register services on public gRPC server.
s.publicACLServer = aclgrpc.NewServer(aclgrpc.Config{
// Initialize external gRPC server - register services on external gRPC server.
s.externalACLServer = aclgrpc.NewServer(aclgrpc.Config{
ACLsEnabled: s.config.ACLsEnabled,
ForwardRPC: func(info structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) {
return s.ForwardGRPC(s.grpcConnPool, info, fn)
@ -689,9 +694,9 @@ func NewServer(config *Config, flat Deps, publicGRPCServer *grpc.Server) (*Serve
PrimaryDatacenter: s.config.PrimaryDatacenter,
ValidateEnterpriseRequest: s.validateEnterpriseRequest,
})
s.publicACLServer.Register(s.publicGRPCServer)
s.externalACLServer.Register(s.externalGRPCServer)
s.publicConnectCAServer = connectca.NewServer(connectca.Config{
s.externalConnectCAServer = connectca.NewServer(connectca.Config{
Publisher: s.publisher,
GetStore: func() connectca.StateStore { return s.FSM().State() },
Logger: logger.Named("grpc-api.connect-ca"),
@ -702,24 +707,37 @@ func NewServer(config *Config, flat Deps, publicGRPCServer *grpc.Server) (*Serve
},
ConnectEnabled: s.config.ConnectEnabled,
})
s.publicConnectCAServer.Register(s.publicGRPCServer)
s.externalConnectCAServer.Register(s.externalGRPCServer)
dataplane.NewServer(dataplane.Config{
GetStore: func() dataplane.StateStore { return s.FSM().State() },
Logger: logger.Named("grpc-api.dataplane"),
ACLResolver: s.ACLResolver,
Datacenter: s.config.Datacenter,
}).Register(s.publicGRPCServer)
}).Register(s.externalGRPCServer)
serverdiscovery.NewServer(serverdiscovery.Config{
Publisher: s.publisher,
ACLResolver: s.ACLResolver,
Logger: logger.Named("grpc-api.server-discovery"),
}).Register(s.publicGRPCServer)
}).Register(s.externalGRPCServer)
// Initialize private gRPC server.
s.peerStreamTracker = peerstream.NewTracker()
s.peeringBackend = NewPeeringBackend(s)
s.peerStreamServer = peerstream.NewServer(peerstream.Config{
Backend: s.peeringBackend,
Tracker: s.peerStreamTracker,
GetStore: func() peerstream.StateStore { return s.FSM().State() },
Logger: logger.Named("grpc-api.peerstream"),
ACLResolver: s.ACLResolver,
Datacenter: s.config.Datacenter,
ConnectEnabled: s.config.ConnectEnabled,
})
s.peerStreamServer.Register(s.externalGRPCServer)
// Initialize internal gRPC server.
//
// Note: some "public" gRPC services are also exposed on the private gRPC server
// Note: some "external" gRPC services are also exposed on the internal gRPC server
// to enable RPC forwarding.
s.grpcHandler = newGRPCHandlerFromConfig(flat, config, s)
s.grpcLeaderForwarder = flat.LeaderForwarder
@ -757,15 +775,25 @@ func NewServer(config *Config, flat Deps, publicGRPCServer *grpc.Server) (*Serve
}
func newGRPCHandlerFromConfig(deps Deps, config *Config, s *Server) connHandler {
p := peering.NewService(
deps.Logger.Named("grpc-api.peering"),
peering.Config{
Datacenter: config.Datacenter,
ConnectEnabled: config.ConnectEnabled,
if s.peeringBackend == nil {
panic("peeringBackend is required during construction")
}
p := peering.NewServer(peering.Config{
Backend: s.peeringBackend,
Tracker: s.peerStreamTracker,
Logger: deps.Logger.Named("grpc-api.peering"),
ForwardRPC: func(info structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) {
// Only forward the request if the dc in the request matches the server's datacenter.
if info.RequestDatacenter() != "" && info.RequestDatacenter() != config.Datacenter {
return false, fmt.Errorf("requests to generate peering tokens cannot be forwarded to remote datacenters")
}
return s.ForwardGRPC(s.grpcConnPool, info, fn)
},
NewPeeringBackend(s, deps.GRPCConnPool),
)
s.peeringService = p
Datacenter: config.Datacenter,
ConnectEnabled: config.ConnectEnabled,
})
s.peeringServer = p
register := func(srv *grpc.Server) {
if config.RPCConfig.EnableStreaming {
@ -773,13 +801,13 @@ func newGRPCHandlerFromConfig(deps Deps, config *Config, s *Server) connHandler
&subscribeBackend{srv: s, connPool: deps.GRPCConnPool},
deps.Logger.Named("grpc-api.subscription")))
}
pbpeering.RegisterPeeringServiceServer(srv, s.peeringService)
s.peeringServer.Register(srv)
s.registerEnterpriseGRPCServices(deps, srv)
// Note: these public gRPC services are also exposed on the private server to
// Note: these external gRPC services are also exposed on the internal server to
// enable RPC forwarding.
s.publicACLServer.Register(srv)
s.publicConnectCAServer.Register(srv)
s.externalACLServer.Register(srv)
s.externalConnectCAServer.Register(srv)
}
return agentgrpc.NewHandler(deps.Logger, config.RPCAddr, register)
@ -1658,7 +1686,7 @@ func (s *Server) trackLeaderChanges() {
}
s.grpcLeaderForwarder.UpdateLeaderAddr(s.config.Datacenter, string(leaderObs.LeaderAddr))
s.peeringService.Backend.LeaderAddress().Set(string(leaderObs.LeaderAddr))
s.peeringBackend.SetLeaderAddress(string(leaderObs.LeaderAddr))
case <-s.shutdownCh:
s.raft.DeregisterObserver(observer)
return

View File

@ -103,6 +103,9 @@ func (s *Server) setupSerfConfig(opts setupSerfOptions) (*serf.Config, error) {
conf.Tags["build"] = s.config.Build
addr := opts.Listener.Addr().(*net.TCPAddr)
conf.Tags["port"] = fmt.Sprintf("%d", addr.Port)
if s.config.GRPCPort > 0 {
conf.Tags["grpc_port"] = fmt.Sprintf("%d", s.config.GRPCPort)
}
if s.config.Bootstrap {
conf.Tags["bootstrap"] = "1"
}

View File

@ -15,12 +15,12 @@ import (
"github.com/armon/go-metrics"
"github.com/google/tcpproxy"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/memberlist"
"github.com/hashicorp/raft"
"google.golang.org/grpc"
"github.com/hashicorp/go-uuid"
"github.com/stretchr/testify/require"
"golang.org/x/time/rate"
"google.golang.org/grpc"
"github.com/hashicorp/consul-net-rpc/net/rpc"
@ -36,8 +36,6 @@ import (
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/tlsutil"
"github.com/hashicorp/consul/types"
"github.com/stretchr/testify/require"
)
const (
@ -111,7 +109,7 @@ func testServerConfig(t *testing.T) (string, *Config) {
dir := testutil.TempDir(t, "consul")
config := DefaultConfig()
ports := freeport.GetN(t, 3)
ports := freeport.GetN(t, 4) // {server, serf_lan, serf_wan, grpc}
config.NodeName = uniqueNodeName(t.Name())
config.Bootstrap = true
config.Datacenter = "dc1"
@ -167,6 +165,8 @@ func testServerConfig(t *testing.T) (string, *Config) {
// looks like several depend on it.
config.RPCHoldTimeout = 10 * time.Second
config.GRPCPort = ports[3]
config.ConnectEnabled = true
config.CAConfig = &structs.CAConfiguration{
ClusterID: connect.TestClusterID,
@ -239,6 +239,19 @@ func testServerWithConfig(t *testing.T, configOpts ...func(*Config)) (string, *S
})
t.Cleanup(func() { srv.Shutdown() })
if srv.config.GRPCPort > 0 {
// Normally the gRPC server listener is created at the agent level and
// passed down into the Server creation.
externalGRPCAddr := fmt.Sprintf("127.0.0.1:%d", srv.config.GRPCPort)
ln, err := net.Listen("tcp", externalGRPCAddr)
require.NoError(t, err)
go func() {
_ = srv.externalGRPCServer.Serve(ln)
}()
t.Cleanup(srv.externalGRPCServer.Stop)
}
return dir, srv
}
@ -262,16 +275,8 @@ func testACLServerWithConfig(t *testing.T, cb func(*Config), initReplicationToke
func testGRPCIntegrationServer(t *testing.T, cb func(*Config)) (*Server, *grpc.ClientConn, rpc.ClientCodec) {
_, srv, codec := testACLServerWithConfig(t, cb, false)
// Normally the gRPC server listener is created at the agent level and passed down into
// the Server creation. For our tests, we need to ensure
ln, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
go func() {
_ = srv.publicGRPCServer.Serve(ln)
}()
t.Cleanup(srv.publicGRPCServer.Stop)
conn, err := grpc.Dial(ln.Addr().String(), grpc.WithInsecure())
grpcAddr := fmt.Sprintf("127.0.0.1:%d", srv.config.GRPCPort)
conn, err := grpc.Dial(grpcAddr, grpc.WithInsecure())
require.NoError(t, err)
t.Cleanup(func() { _ = conn.Close() })
@ -1992,7 +1997,7 @@ func TestServer_Peering_LeadershipCheck(t *testing.T) {
// the actual tests
// when leadership has been established s2 should have the address of s1
// in the peering service
peeringLeaderAddr := s2.peeringService.Backend.LeaderAddress().Get()
peeringLeaderAddr := s2.peeringBackend.GetLeaderAddress()
require.Equal(t, s1.config.RPCAddr.String(), peeringLeaderAddr)
// test corollary by transitivity to future-proof against any setup bugs

View File

@ -138,7 +138,12 @@ func (s *Store) ConfigEntriesByKind(ws memdb.WatchSet, kind string, entMeta *acl
return configEntriesByKindTxn(tx, ws, kind, entMeta)
}
func listDiscoveryChainNamesTxn(tx ReadTxn, ws memdb.WatchSet, entMeta acl.EnterpriseMeta) (uint64, []structs.ServiceName, error) {
func listDiscoveryChainNamesTxn(
tx ReadTxn,
ws memdb.WatchSet,
overrides map[configentry.KindName]structs.ConfigEntry,
entMeta acl.EnterpriseMeta,
) (uint64, []structs.ServiceName, error) {
// Get the index and watch for updates
idx := maxIndexWatchTxn(tx, ws, tableConfigEntries)
@ -160,6 +165,15 @@ func listDiscoveryChainNamesTxn(tx ReadTxn, ws memdb.WatchSet, entMeta acl.Enter
sn := structs.NewServiceName(entry.GetName(), entry.GetEnterpriseMeta())
seen[sn] = struct{}{}
}
for kn, entry := range overrides {
sn := structs.NewServiceName(kn.Name, &kn.EnterpriseMeta)
if entry != nil {
seen[sn] = struct{}{}
} else {
delete(seen, sn)
}
}
}
results := maps.SliceOfKeys(seen)
@ -506,7 +520,7 @@ var serviceGraphKinds = []string{
// discoveryChainTargets will return a list of services listed as a target for the input's discovery chain
func (s *Store) discoveryChainTargetsTxn(tx ReadTxn, ws memdb.WatchSet, dc, service string, entMeta *acl.EnterpriseMeta) (uint64, []structs.ServiceName, error) {
idx, targets, err := s.discoveryChainOriginalTargetsTxn(tx, ws, dc, service, entMeta)
idx, targets, err := discoveryChainOriginalTargetsTxn(tx, ws, dc, service, entMeta)
if err != nil {
return 0, nil, err
}
@ -524,7 +538,12 @@ func (s *Store) discoveryChainTargetsTxn(tx ReadTxn, ws memdb.WatchSet, dc, serv
return idx, resp, nil
}
func (s *Store) discoveryChainOriginalTargetsTxn(tx ReadTxn, ws memdb.WatchSet, dc, service string, entMeta *acl.EnterpriseMeta) (uint64, []*structs.DiscoveryTarget, error) {
func discoveryChainOriginalTargetsTxn(
tx ReadTxn,
ws memdb.WatchSet,
dc, service string,
entMeta *acl.EnterpriseMeta,
) (uint64, []*structs.DiscoveryTarget, error) {
source := structs.NewServiceName(service, entMeta)
req := discoverychain.CompileRequest{
ServiceName: source.Name,
@ -532,7 +551,7 @@ func (s *Store) discoveryChainOriginalTargetsTxn(tx ReadTxn, ws memdb.WatchSet,
EvaluateInPartition: source.PartitionOrDefault(),
EvaluateInDatacenter: dc,
}
idx, chain, _, err := s.serviceDiscoveryChainTxn(tx, ws, source.Name, entMeta, req)
idx, chain, _, err := serviceDiscoveryChainTxn(tx, ws, source.Name, entMeta, req)
if err != nil {
return 0, nil, fmt.Errorf("failed to fetch discovery chain for %q: %v", source.String(), err)
}
@ -579,7 +598,7 @@ func (s *Store) discoveryChainSourcesTxn(tx ReadTxn, ws memdb.WatchSet, dc strin
EvaluateInPartition: sn.PartitionOrDefault(),
EvaluateInDatacenter: dc,
}
idx, chain, _, err := s.serviceDiscoveryChainTxn(tx, ws, sn.Name, &sn.EnterpriseMeta, req)
idx, chain, _, err := serviceDiscoveryChainTxn(tx, ws, sn.Name, &sn.EnterpriseMeta, req)
if err != nil {
return 0, nil, fmt.Errorf("failed to fetch discovery chain for %q: %v", sn.String(), err)
}
@ -620,7 +639,28 @@ func validateProposedConfigEntryInServiceGraph(
wildcardEntMeta := kindName.WithWildcardNamespace()
switch kindName.Kind {
case structs.ExportedServices, structs.MeshConfig:
case structs.ExportedServices:
// This is the case for deleting a config entry
if newEntry == nil {
return nil
}
entry := newEntry.(*structs.ExportedServicesConfigEntry)
_, serviceList, err := listServicesExportedToAnyPeerByConfigEntry(nil, tx, entry, nil)
if err != nil {
return err
}
for _, sn := range serviceList {
if err := validateChainIsPeerExportSafe(tx, sn, nil); err != nil {
return err
}
}
return nil
case structs.MeshConfig:
// Exported services and mesh config do not influence discovery chains.
return nil
@ -759,16 +799,59 @@ func validateProposedConfigEntryInServiceGraph(
}
var (
svcProtocols = make(map[structs.ServiceID]string)
svcTopNodeType = make(map[structs.ServiceID]string)
svcProtocols = make(map[structs.ServiceID]string)
svcTopNodeType = make(map[structs.ServiceID]string)
exportedServicesByPartition = make(map[string]map[structs.ServiceName]struct{})
)
for chain := range checkChains {
protocol, topNode, err := testCompileDiscoveryChain(tx, chain.ID, overrides, &chain.EnterpriseMeta)
protocol, topNode, newTargets, err := testCompileDiscoveryChain(tx, chain.ID, overrides, &chain.EnterpriseMeta)
if err != nil {
return err
}
svcProtocols[chain] = protocol
svcTopNodeType[chain] = topNode.Type
chainSvc := structs.NewServiceName(chain.ID, &chain.EnterpriseMeta)
// Validate that we aren't adding a cross-datacenter or cross-partition
// reference to a peer-exported service's discovery chain by this pending
// edit.
partition := chain.PartitionOrDefault()
exportedServices, ok := exportedServicesByPartition[partition]
if !ok {
entMeta := structs.NodeEnterpriseMetaInPartition(partition)
_, exportedServices, err = listAllExportedServices(nil, tx, overrides, *entMeta)
if err != nil {
return err
}
exportedServicesByPartition[partition] = exportedServices
}
if _, exported := exportedServices[chainSvc]; exported {
if err := validateChainIsPeerExportSafe(tx, chainSvc, overrides); err != nil {
return err
}
// If a TCP (L4) discovery chain is peer exported we have to take
// care to prohibit certain edits to service-resolvers.
if !structs.IsProtocolHTTPLike(protocol) {
_, _, oldTargets, err := testCompileDiscoveryChain(tx, chain.ID, nil, &chain.EnterpriseMeta)
if err != nil {
return fmt.Errorf("error compiling current discovery chain for %q: %w", chainSvc, err)
}
// Ensure that you can't introduce any new targets that would
// produce a new SpiffeID for this L4 service.
oldSpiffeIDs := convertTargetsToTestSpiffeIDs(oldTargets)
newSpiffeIDs := convertTargetsToTestSpiffeIDs(newTargets)
for id, targetID := range newSpiffeIDs {
if _, exists := oldSpiffeIDs[id]; !exists {
return fmt.Errorf("peer exported service %q uses protocol=%q and cannot introduce new discovery chain targets like %q",
chainSvc, protocol, targetID,
)
}
}
}
}
}
// Now validate all of our ingress gateways.
@ -828,18 +911,84 @@ func validateProposedConfigEntryInServiceGraph(
return nil
}
func validateChainIsPeerExportSafe(
tx ReadTxn,
exportedSvc structs.ServiceName,
overrides map[configentry.KindName]structs.ConfigEntry,
) error {
_, chainEntries, err := readDiscoveryChainConfigEntriesTxn(tx, nil, exportedSvc.Name, overrides, &exportedSvc.EnterpriseMeta)
if err != nil {
return fmt.Errorf("error reading discovery chain for %q during config entry validation: %w", exportedSvc, err)
}
emptyOrMatchesEntryPartition := func(entry structs.ConfigEntry, found string) bool {
if found == "" {
return true
}
return acl.EqualPartitions(entry.GetEnterpriseMeta().PartitionOrEmpty(), found)
}
for _, e := range chainEntries.Routers {
for _, route := range e.Routes {
if route.Destination == nil {
continue
}
if !emptyOrMatchesEntryPartition(e, route.Destination.Partition) {
return fmt.Errorf("peer exported service %q contains cross-partition route destination", exportedSvc)
}
}
}
for _, e := range chainEntries.Splitters {
for _, split := range e.Splits {
if !emptyOrMatchesEntryPartition(e, split.Partition) {
return fmt.Errorf("peer exported service %q contains cross-partition split destination", exportedSvc)
}
}
}
for _, e := range chainEntries.Resolvers {
if e.Redirect != nil {
if e.Redirect.Datacenter != "" {
return fmt.Errorf("peer exported service %q contains cross-datacenter resolver redirect", exportedSvc)
}
if !emptyOrMatchesEntryPartition(e, e.Redirect.Partition) {
return fmt.Errorf("peer exported service %q contains cross-partition resolver redirect", exportedSvc)
}
}
if e.Failover != nil {
for _, failover := range e.Failover {
if len(failover.Datacenters) > 0 {
return fmt.Errorf("peer exported service %q contains cross-datacenter failover", exportedSvc)
}
}
}
}
return nil
}
// testCompileDiscoveryChain speculatively compiles a discovery chain with
// pending modifications to see if it would be valid. Also returns the computed
// protocol and topmost discovery chain node.
//
// If provided, the overrides map will service reads of specific config entries
// instead of the state store if the config entry kind name is present in the
// map. A nil in the map implies that the config entry should be tombstoned
// during evaluation and treated as erased.
//
// The override map lets us speculatively compile a discovery chain to see if
// doing so would error, so we can ultimately block config entry writes from
// happening.
func testCompileDiscoveryChain(
tx ReadTxn,
chainName string,
overrides map[configentry.KindName]structs.ConfigEntry,
entMeta *acl.EnterpriseMeta,
) (string, *structs.DiscoveryGraphNode, error) {
) (string, *structs.DiscoveryGraphNode, map[string]*structs.DiscoveryTarget, error) {
_, speculativeEntries, err := readDiscoveryChainConfigEntriesTxn(tx, nil, chainName, overrides, entMeta)
if err != nil {
return "", nil, err
return "", nil, nil, err
}
// Note we use an arbitrary namespace and datacenter as those would not
@ -856,10 +1005,10 @@ func testCompileDiscoveryChain(
}
chain, err := discoverychain.Compile(req)
if err != nil {
return "", nil, err
return "", nil, nil, err
}
return chain.Protocol, chain.Nodes[chain.StartNode], nil
return chain.Protocol, chain.Nodes[chain.StartNode], chain.Targets, nil
}
func (s *Store) ServiceDiscoveryChain(
@ -871,10 +1020,10 @@ func (s *Store) ServiceDiscoveryChain(
tx := s.db.ReadTxn()
defer tx.Abort()
return s.serviceDiscoveryChainTxn(tx, ws, serviceName, entMeta, req)
return serviceDiscoveryChainTxn(tx, ws, serviceName, entMeta, req)
}
func (s *Store) serviceDiscoveryChainTxn(
func serviceDiscoveryChainTxn(
tx ReadTxn,
ws memdb.WatchSet,
serviceName string,
@ -888,7 +1037,7 @@ func (s *Store) serviceDiscoveryChainTxn(
}
req.Entries = entries
_, config, err := s.CAConfig(ws)
_, config, err := caConfigTxn(tx, ws)
if err != nil {
return 0, nil, nil, err
} else if config == nil {
@ -1268,7 +1417,9 @@ func anyKey(m map[structs.ServiceID]struct{}) (structs.ServiceID, bool) {
// getProxyConfigEntryTxn is a convenience method for fetching a
// proxy-defaults kind of config entry.
//
// If an override is returned the index returned will be 0.
// If an override KEY is present for the requested config entry, the index
// returned will be 0. Any override VALUE (nil or otherwise) will be returned
// if there is a KEY match.
func getProxyConfigEntryTxn(
tx ReadTxn,
ws memdb.WatchSet,
@ -1293,7 +1444,9 @@ func getProxyConfigEntryTxn(
// getServiceConfigEntryTxn is a convenience method for fetching a
// service-defaults kind of config entry.
//
// If an override is returned the index returned will be 0.
// If an override KEY is present for the requested config entry, the index
// returned will be 0. Any override VALUE (nil or otherwise) will be returned
// if there is a KEY match.
func getServiceConfigEntryTxn(
tx ReadTxn,
ws memdb.WatchSet,
@ -1318,7 +1471,9 @@ func getServiceConfigEntryTxn(
// getRouterConfigEntryTxn is a convenience method for fetching a
// service-router kind of config entry.
//
// If an override is returned the index returned will be 0.
// If an override KEY is present for the requested config entry, the index
// returned will be 0. Any override VALUE (nil or otherwise) will be returned
// if there is a KEY match.
func getRouterConfigEntryTxn(
tx ReadTxn,
ws memdb.WatchSet,
@ -1343,7 +1498,9 @@ func getRouterConfigEntryTxn(
// getSplitterConfigEntryTxn is a convenience method for fetching a
// service-splitter kind of config entry.
//
// If an override is returned the index returned will be 0.
// If an override KEY is present for the requested config entry, the index
// returned will be 0. Any override VALUE (nil or otherwise) will be returned
// if there is a KEY match.
func getSplitterConfigEntryTxn(
tx ReadTxn,
ws memdb.WatchSet,
@ -1368,7 +1525,9 @@ func getSplitterConfigEntryTxn(
// getResolverConfigEntryTxn is a convenience method for fetching a
// service-resolver kind of config entry.
//
// If an override is returned the index returned will be 0.
// If an override KEY is present for the requested config entry, the index
// returned will be 0. Any override VALUE (nil or otherwise) will be returned
// if there is a KEY match.
func getResolverConfigEntryTxn(
tx ReadTxn,
ws memdb.WatchSet,
@ -1393,7 +1552,9 @@ func getResolverConfigEntryTxn(
// getServiceIntentionsConfigEntryTxn is a convenience method for fetching a
// service-intentions kind of config entry.
//
// If an override is returned the index returned will be 0.
// If an override KEY is present for the requested config entry, the index
// returned will be 0. Any override VALUE (nil or otherwise) will be returned
// if there is a KEY match.
func getServiceIntentionsConfigEntryTxn(
tx ReadTxn,
ws memdb.WatchSet,
@ -1415,6 +1576,32 @@ func getServiceIntentionsConfigEntryTxn(
return idx, ixn, nil
}
// getExportedServicesConfigEntryTxn is a convenience method for fetching a
// exported-services kind of config entry.
//
// If an override KEY is present for the requested config entry, the index
// returned will be 0. Any override VALUE (nil or otherwise) will be returned
// if there is a KEY match.
func getExportedServicesConfigEntryTxn(
tx ReadTxn,
ws memdb.WatchSet,
overrides map[configentry.KindName]structs.ConfigEntry,
entMeta *acl.EnterpriseMeta,
) (uint64, *structs.ExportedServicesConfigEntry, error) {
idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ExportedServices, entMeta.PartitionOrDefault(), overrides, entMeta)
if err != nil {
return 0, nil, err
} else if entry == nil {
return idx, nil, nil
}
export, ok := entry.(*structs.ExportedServicesConfigEntry)
if !ok {
return 0, nil, fmt.Errorf("invalid service config type %T", entry)
}
return idx, export, nil
}
func configEntryWithOverridesTxn(
tx ReadTxn,
ws memdb.WatchSet,
@ -1443,12 +1630,12 @@ func protocolForService(
svc structs.ServiceName,
) (uint64, string, error) {
// Get the global proxy defaults (for default protocol)
maxIdx, proxyConfig, err := configEntryTxn(tx, ws, structs.ProxyDefaults, structs.ProxyConfigGlobal, &svc.EnterpriseMeta)
maxIdx, proxyConfig, err := getProxyConfigEntryTxn(tx, ws, structs.ProxyConfigGlobal, nil, &svc.EnterpriseMeta)
if err != nil {
return 0, "", err
}
idx, serviceDefaults, err := configEntryTxn(tx, ws, structs.ServiceDefaults, svc.Name, &svc.EnterpriseMeta)
idx, serviceDefaults, err := getServiceConfigEntryTxn(tx, ws, svc.Name, nil, &svc.EnterpriseMeta)
if err != nil {
return 0, "", err
}
@ -1467,7 +1654,7 @@ func protocolForService(
EvaluateInPartition: svc.PartitionOrDefault(),
EvaluateInDatacenter: "dc1",
// Use a dummy trust domain since that won't affect the protocol here.
EvaluateInTrustDomain: "b6fc9da3-03d4-4b5a-9134-c045e9b20152.consul",
EvaluateInTrustDomain: dummyTrustDomain,
Entries: entries,
}
chain, err := discoverychain.Compile(req)
@ -1477,6 +1664,8 @@ func protocolForService(
return maxIdx, chain.Protocol, nil
}
const dummyTrustDomain = "b6fc9da3-03d4-4b5a-9134-c045e9b20152.consul"
func newConfigEntryQuery(c structs.ConfigEntry) configentry.KindName {
return configentry.NewKindName(c.GetKind(), c.GetName(), c.GetEnterpriseMeta())
}
@ -1498,3 +1687,24 @@ func (q ConfigEntryKindQuery) NamespaceOrDefault() string {
func (q ConfigEntryKindQuery) PartitionOrDefault() string {
return q.EnterpriseMeta.PartitionOrDefault()
}
// convertTargetsToTestSpiffeIDs indexes the provided targets by their eventual
// spiffeid values using a dummy trust domain. Returns a map of SpiffeIDs to
// targetID values which can be used for error output.
func convertTargetsToTestSpiffeIDs(targets map[string]*structs.DiscoveryTarget) map[string]string {
out := make(map[string]string)
for tid, t := range targets {
testSpiffeID := connect.SpiffeIDService{
Host: dummyTrustDomain,
Partition: t.Partition,
Namespace: t.Namespace,
Datacenter: t.Datacenter,
Service: t.Service,
}
uri := testSpiffeID.URI().String()
if _, ok := out[uri]; !ok {
out[uri] = tid
}
}
return out
}

View File

@ -843,24 +843,75 @@ func TestStore_Service_TerminatingGateway_Kind_Service_Destination_Wildcard(t *t
}
func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
ensureConfigEntry := func(s *Store, idx uint64, entry structs.ConfigEntry) error {
if err := entry.Normalize(); err != nil {
return err
}
if err := entry.Validate(); err != nil {
return err
}
return s.EnsureConfigEntry(0, entry)
}
type tcase struct {
entries []structs.ConfigEntry
op func(t *testing.T, s *Store) error
opAdd structs.ConfigEntry
opDelete configentry.KindName
expectErr string
expectGraphErr bool
}
EMPTY_KN := configentry.KindName{}
run := func(t *testing.T, tc tcase) {
s := testConfigStateStore(t)
for _, entry := range tc.entries {
require.NoError(t, ensureConfigEntry(s, 0, entry))
}
nOps := 0
if tc.opAdd != nil {
nOps++
}
if tc.opDelete != EMPTY_KN {
nOps++
}
require.Equal(t, 1, nOps, "exactly one operation is required")
var err error
switch {
case tc.opAdd != nil:
err = ensureConfigEntry(s, 0, tc.opAdd)
case tc.opDelete != EMPTY_KN:
kn := tc.opDelete
err = s.DeleteConfigEntry(0, kn.Kind, kn.Name, &kn.EnterpriseMeta)
default:
t.Fatal("not possible")
}
if tc.expectErr != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tc.expectErr)
_, ok := err.(*structs.ConfigEntryGraphError)
if tc.expectGraphErr {
require.True(t, ok, "%T is not a *ConfigEntryGraphError", err)
} else {
require.False(t, ok, "did not expect a *ConfigEntryGraphError here: %v", err)
}
} else {
require.NoError(t, err)
}
}
cases := map[string]tcase{
"splitter fails without default protocol": {
entries: []structs.ConfigEntry{},
op: func(t *testing.T, s *Store) error {
entry := &structs.ServiceSplitterConfigEntry{
Kind: structs.ServiceSplitter,
Name: "main",
Splits: []structs.ServiceSplit{
{Weight: 100},
},
}
return s.EnsureConfigEntry(0, entry)
opAdd: &structs.ServiceSplitterConfigEntry{
Kind: structs.ServiceSplitter,
Name: "main",
Splits: []structs.ServiceSplit{
{Weight: 100},
},
},
expectErr: "does not permit advanced routing or splitting behavior",
expectGraphErr: true,
@ -873,15 +924,12 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
Protocol: "tcp",
},
},
op: func(t *testing.T, s *Store) error {
entry := &structs.ServiceSplitterConfigEntry{
Kind: structs.ServiceSplitter,
Name: "main",
Splits: []structs.ServiceSplit{
{Weight: 100},
},
}
return s.EnsureConfigEntry(0, entry)
opAdd: &structs.ServiceSplitterConfigEntry{
Kind: structs.ServiceSplitter,
Name: "main",
Splits: []structs.ServiceSplit{
{Weight: 100},
},
},
expectErr: "does not permit advanced routing or splitting behavior",
expectGraphErr: true,
@ -914,17 +962,14 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
},
},
},
op: func(t *testing.T, s *Store) error {
entry := &structs.ServiceSplitterConfigEntry{
Kind: structs.ServiceSplitter,
Name: "main",
Splits: []structs.ServiceSplit{
{Weight: 90, ServiceSubset: "v1"},
{Weight: 10, ServiceSubset: "v2"},
},
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
}
return s.EnsureConfigEntry(0, entry)
opAdd: &structs.ServiceSplitterConfigEntry{
Kind: structs.ServiceSplitter,
Name: "main",
Splits: []structs.ServiceSplit{
{Weight: 90, ServiceSubset: "v1"},
{Weight: 10, ServiceSubset: "v2"},
},
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
},
},
"splitter works with http protocol (from proxy-defaults)": {
@ -949,16 +994,13 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
},
},
},
op: func(t *testing.T, s *Store) error {
entry := &structs.ServiceSplitterConfigEntry{
Kind: structs.ServiceSplitter,
Name: "main",
Splits: []structs.ServiceSplit{
{Weight: 90, ServiceSubset: "v1"},
{Weight: 10, ServiceSubset: "v2"},
},
}
return s.EnsureConfigEntry(0, entry)
opAdd: &structs.ServiceSplitterConfigEntry{
Kind: structs.ServiceSplitter,
Name: "main",
Splits: []structs.ServiceSplit{
{Weight: 90, ServiceSubset: "v1"},
{Weight: 10, ServiceSubset: "v2"},
},
},
},
"router fails with tcp protocol": {
@ -978,24 +1020,21 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
},
},
},
op: func(t *testing.T, s *Store) error {
entry := &structs.ServiceRouterConfigEntry{
Kind: structs.ServiceRouter,
Name: "main",
Routes: []structs.ServiceRoute{
{
Match: &structs.ServiceRouteMatch{
HTTP: &structs.ServiceRouteHTTPMatch{
PathExact: "/other",
},
},
Destination: &structs.ServiceRouteDestination{
ServiceSubset: "other",
opAdd: &structs.ServiceRouterConfigEntry{
Kind: structs.ServiceRouter,
Name: "main",
Routes: []structs.ServiceRoute{
{
Match: &structs.ServiceRouteMatch{
HTTP: &structs.ServiceRouteHTTPMatch{
PathExact: "/other",
},
},
Destination: &structs.ServiceRouteDestination{
ServiceSubset: "other",
},
},
}
return s.EnsureConfigEntry(0, entry)
},
},
expectErr: "does not permit advanced routing or splitting behavior",
expectGraphErr: true,
@ -1012,24 +1051,21 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
},
},
},
op: func(t *testing.T, s *Store) error {
entry := &structs.ServiceRouterConfigEntry{
Kind: structs.ServiceRouter,
Name: "main",
Routes: []structs.ServiceRoute{
{
Match: &structs.ServiceRouteMatch{
HTTP: &structs.ServiceRouteHTTPMatch{
PathExact: "/other",
},
},
Destination: &structs.ServiceRouteDestination{
ServiceSubset: "other",
opAdd: &structs.ServiceRouterConfigEntry{
Kind: structs.ServiceRouter,
Name: "main",
Routes: []structs.ServiceRoute{
{
Match: &structs.ServiceRouteMatch{
HTTP: &structs.ServiceRouteHTTPMatch{
PathExact: "/other",
},
},
Destination: &structs.ServiceRouteDestination{
ServiceSubset: "other",
},
},
}
return s.EnsureConfigEntry(0, entry)
},
},
expectErr: "does not permit advanced routing or splitting behavior",
expectGraphErr: true,
@ -1063,9 +1099,7 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
},
},
},
op: func(t *testing.T, s *Store) error {
return s.DeleteConfigEntry(0, structs.ServiceDefaults, "main", nil)
},
opDelete: configentry.NewKindName(structs.ServiceDefaults, "main", nil),
expectErr: "does not permit advanced routing or splitting behavior",
expectGraphErr: true,
},
@ -1099,9 +1133,7 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
},
},
},
op: func(t *testing.T, s *Store) error {
return s.DeleteConfigEntry(0, structs.ProxyDefaults, structs.ProxyConfigGlobal, nil)
},
opDelete: configentry.NewKindName(structs.ProxyDefaults, structs.ProxyConfigGlobal, nil),
expectErr: "does not permit advanced routing or splitting behavior",
expectGraphErr: true,
},
@ -1140,9 +1172,7 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
},
},
},
op: func(t *testing.T, s *Store) error {
return s.DeleteConfigEntry(0, structs.ProxyDefaults, structs.ProxyConfigGlobal, nil)
},
opDelete: configentry.NewKindName(structs.ProxyDefaults, structs.ProxyConfigGlobal, nil),
},
"cannot change to tcp protocol after splitter created": {
entries: []structs.ConfigEntry{
@ -1172,13 +1202,10 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
},
},
},
op: func(t *testing.T, s *Store) error {
entry := &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "main",
Protocol: "tcp",
}
return s.EnsureConfigEntry(0, entry)
opAdd: &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "main",
Protocol: "tcp",
},
expectErr: "does not permit advanced routing or splitting behavior",
expectGraphErr: true,
@ -1216,9 +1243,7 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
},
},
},
op: func(t *testing.T, s *Store) error {
return s.DeleteConfigEntry(0, structs.ServiceDefaults, "main", nil)
},
opDelete: configentry.NewKindName(structs.ServiceDefaults, "main", nil),
expectErr: "does not permit advanced routing or splitting behavior",
expectGraphErr: true,
},
@ -1255,13 +1280,10 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
},
},
},
op: func(t *testing.T, s *Store) error {
entry := &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "main",
Protocol: "tcp",
}
return s.EnsureConfigEntry(0, entry)
opAdd: &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: "main",
Protocol: "tcp",
},
expectErr: "does not permit advanced routing or splitting behavior",
expectGraphErr: true,
@ -1280,16 +1302,13 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
Protocol: "tcp",
},
},
op: func(t *testing.T, s *Store) error {
entry := &structs.ServiceSplitterConfigEntry{
Kind: structs.ServiceSplitter,
Name: "main",
Splits: []structs.ServiceSplit{
{Weight: 90},
{Weight: 10, Service: "other"},
},
}
return s.EnsureConfigEntry(0, entry)
opAdd: &structs.ServiceSplitterConfigEntry{
Kind: structs.ServiceSplitter,
Name: "main",
Splits: []structs.ServiceSplit{
{Weight: 90},
{Weight: 10, Service: "other"},
},
},
expectErr: "uses inconsistent protocols",
expectGraphErr: true,
@ -1307,24 +1326,21 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
Protocol: "tcp",
},
},
op: func(t *testing.T, s *Store) error {
entry := &structs.ServiceRouterConfigEntry{
Kind: structs.ServiceRouter,
Name: "main",
Routes: []structs.ServiceRoute{
{
Match: &structs.ServiceRouteMatch{
HTTP: &structs.ServiceRouteHTTPMatch{
PathExact: "/other",
},
},
Destination: &structs.ServiceRouteDestination{
Service: "other",
opAdd: &structs.ServiceRouterConfigEntry{
Kind: structs.ServiceRouter,
Name: "main",
Routes: []structs.ServiceRoute{
{
Match: &structs.ServiceRouteMatch{
HTTP: &structs.ServiceRouteHTTPMatch{
PathExact: "/other",
},
},
Destination: &structs.ServiceRouteDestination{
Service: "other",
},
},
}
return s.EnsureConfigEntry(0, entry)
},
},
expectErr: "uses inconsistent protocols",
expectGraphErr: true,
@ -1348,17 +1364,14 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
ConnectTimeout: 33 * time.Second,
},
},
op: func(t *testing.T, s *Store) error {
entry := &structs.ServiceResolverConfigEntry{
Kind: structs.ServiceResolver,
Name: "main",
Failover: map[string]structs.ServiceResolverFailover{
"*": {
Service: "other",
},
opAdd: &structs.ServiceResolverConfigEntry{
Kind: structs.ServiceResolver,
Name: "main",
Failover: map[string]structs.ServiceResolverFailover{
"*": {
Service: "other",
},
}
return s.EnsureConfigEntry(0, entry)
},
},
expectErr: "uses inconsistent protocols",
expectGraphErr: true,
@ -1381,15 +1394,12 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
ConnectTimeout: 33 * time.Second,
},
},
op: func(t *testing.T, s *Store) error {
entry := &structs.ServiceResolverConfigEntry{
Kind: structs.ServiceResolver,
Name: "main",
Redirect: &structs.ServiceResolverRedirect{
Service: "other",
},
}
return s.EnsureConfigEntry(0, entry)
opAdd: &structs.ServiceResolverConfigEntry{
Kind: structs.ServiceResolver,
Name: "main",
Redirect: &structs.ServiceResolverRedirect{
Service: "other",
},
},
expectErr: "uses inconsistent protocols",
expectGraphErr: true,
@ -1408,16 +1418,13 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
},
},
},
op: func(t *testing.T, s *Store) error {
entry := &structs.ServiceResolverConfigEntry{
Kind: structs.ServiceResolver,
Name: "main",
Redirect: &structs.ServiceResolverRedirect{
Service: "other",
ServiceSubset: "v1",
},
}
return s.EnsureConfigEntry(0, entry)
opAdd: &structs.ServiceResolverConfigEntry{
Kind: structs.ServiceResolver,
Name: "main",
Redirect: &structs.ServiceResolverRedirect{
Service: "other",
ServiceSubset: "v1",
},
},
},
"cannot redirect to a subset that does not exist": {
@ -1428,16 +1435,13 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
ConnectTimeout: 33 * time.Second,
},
},
op: func(t *testing.T, s *Store) error {
entry := &structs.ServiceResolverConfigEntry{
Kind: structs.ServiceResolver,
Name: "main",
Redirect: &structs.ServiceResolverRedirect{
Service: "other",
ServiceSubset: "v1",
},
}
return s.EnsureConfigEntry(0, entry)
opAdd: &structs.ServiceResolverConfigEntry{
Kind: structs.ServiceResolver,
Name: "main",
Redirect: &structs.ServiceResolverRedirect{
Service: "other",
ServiceSubset: "v1",
},
},
expectErr: `does not have a subset named "v1"`,
expectGraphErr: true,
@ -1453,15 +1457,12 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
},
},
},
op: func(t *testing.T, s *Store) error {
entry := &structs.ServiceResolverConfigEntry{
Kind: structs.ServiceResolver,
Name: "main",
Redirect: &structs.ServiceResolverRedirect{
Service: "other",
},
}
return s.EnsureConfigEntry(0, entry)
opAdd: &structs.ServiceResolverConfigEntry{
Kind: structs.ServiceResolver,
Name: "main",
Redirect: &structs.ServiceResolverRedirect{
Service: "other",
},
},
expectErr: `detected circular resolver redirect`,
expectGraphErr: true,
@ -1483,45 +1484,121 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
},
},
},
op: func(t *testing.T, s *Store) error {
entry := &structs.ServiceSplitterConfigEntry{
Kind: "service-splitter",
Name: "main",
Splits: []structs.ServiceSplit{
{Weight: 100, Service: "other"},
},
}
return s.EnsureConfigEntry(0, entry)
opAdd: &structs.ServiceSplitterConfigEntry{
Kind: "service-splitter",
Name: "main",
Splits: []structs.ServiceSplit{
{Weight: 100, Service: "other"},
},
},
expectErr: `detected circular reference`,
expectGraphErr: true,
},
/////////////////////////////////////////////////
"cannot peer export cross-dc redirect": {
entries: []structs.ConfigEntry{
&structs.ServiceResolverConfigEntry{
Kind: "service-resolver",
Name: "main",
Redirect: &structs.ServiceResolverRedirect{
Datacenter: "dc3",
},
},
},
opAdd: &structs.ExportedServicesConfigEntry{
Name: "default",
Services: []structs.ExportedService{{
Name: "main",
Consumers: []structs.ServiceConsumer{{PeerName: "my-peer"}},
}},
},
expectErr: `contains cross-datacenter resolver redirect`,
},
"cannot peer export cross-dc redirect via wildcard": {
entries: []structs.ConfigEntry{
&structs.ServiceResolverConfigEntry{
Kind: "service-resolver",
Name: "main",
Redirect: &structs.ServiceResolverRedirect{
Datacenter: "dc3",
},
},
},
opAdd: &structs.ExportedServicesConfigEntry{
Name: "default",
Services: []structs.ExportedService{{
Name: "*",
Consumers: []structs.ServiceConsumer{{PeerName: "my-peer"}},
}},
},
expectErr: `contains cross-datacenter resolver redirect`,
},
"cannot peer export cross-dc failover": {
entries: []structs.ConfigEntry{
&structs.ServiceResolverConfigEntry{
Kind: "service-resolver",
Name: "main",
Failover: map[string]structs.ServiceResolverFailover{
"*": {
Datacenters: []string{"dc3"},
},
},
},
},
opAdd: &structs.ExportedServicesConfigEntry{
Name: "default",
Services: []structs.ExportedService{{
Name: "main",
Consumers: []structs.ServiceConsumer{{PeerName: "my-peer"}},
}},
},
expectErr: `contains cross-datacenter failover`,
},
"cannot peer export cross-dc failover via wildcard": {
entries: []structs.ConfigEntry{
&structs.ServiceResolverConfigEntry{
Kind: "service-resolver",
Name: "main",
Failover: map[string]structs.ServiceResolverFailover{
"*": {
Datacenters: []string{"dc3"},
},
},
},
},
opAdd: &structs.ExportedServicesConfigEntry{
Name: "default",
Services: []structs.ExportedService{{
Name: "*",
Consumers: []structs.ServiceConsumer{{PeerName: "my-peer"}},
}},
},
expectErr: `contains cross-datacenter failover`,
},
"cannot redirect a peer exported tcp service": {
entries: []structs.ConfigEntry{
&structs.ExportedServicesConfigEntry{
Name: "default",
Services: []structs.ExportedService{{
Name: "main",
Consumers: []structs.ServiceConsumer{{PeerName: "my-peer"}},
}},
},
},
opAdd: &structs.ServiceResolverConfigEntry{
Kind: structs.ServiceResolver,
Name: "main",
Redirect: &structs.ServiceResolverRedirect{
Service: "other",
},
},
expectErr: `cannot introduce new discovery chain targets like`,
},
}
for name, tc := range cases {
name := name
tc := tc
t.Run(name, func(t *testing.T) {
s := testConfigStateStore(t)
for _, entry := range tc.entries {
require.NoError(t, entry.Normalize())
require.NoError(t, s.EnsureConfigEntry(0, entry))
}
err := tc.op(t, s)
if tc.expectErr != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tc.expectErr)
_, ok := err.(*structs.ConfigEntryGraphError)
if tc.expectGraphErr {
require.True(t, ok, "%T is not a *ConfigEntryGraphError", err)
} else {
require.False(t, ok, "did not expect a *ConfigEntryGraphError here: %v", err)
}
} else {
require.NoError(t, err)
}
run(t, tc)
})
}
}

View File

@ -10,6 +10,7 @@ import (
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/configentry"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/lib/maps"
"github.com/hashicorp/consul/proto/pbpeering"
@ -154,10 +155,10 @@ func peeringReadTxn(tx ReadTxn, ws memdb.WatchSet, q Query) (uint64, *pbpeering.
func (s *Store) PeeringList(ws memdb.WatchSet, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.Peering, error) {
tx := s.db.ReadTxn()
defer tx.Abort()
return s.peeringListTxn(ws, tx, entMeta)
return peeringListTxn(ws, tx, entMeta)
}
func (s *Store) peeringListTxn(ws memdb.WatchSet, tx ReadTxn, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.Peering, error) {
func peeringListTxn(ws memdb.WatchSet, tx ReadTxn, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.Peering, error) {
var (
iter memdb.ResultIterator
err error
@ -322,21 +323,21 @@ func (s *Store) ExportedServicesForPeer(ws memdb.WatchSet, peerID string, dc str
return 0, &structs.ExportedServiceList{}, nil
}
return s.exportedServicesForPeerTxn(ws, tx, peering, dc)
return exportedServicesForPeerTxn(ws, tx, peering, dc)
}
func (s *Store) ExportedServicesForAllPeersByName(ws memdb.WatchSet, entMeta acl.EnterpriseMeta) (uint64, map[string]structs.ServiceList, error) {
tx := s.db.ReadTxn()
defer tx.Abort()
maxIdx, peerings, err := s.peeringListTxn(ws, tx, entMeta)
maxIdx, peerings, err := peeringListTxn(ws, tx, entMeta)
if err != nil {
return 0, nil, fmt.Errorf("failed to list peerings: %w", err)
}
out := make(map[string]structs.ServiceList)
for _, peering := range peerings {
idx, list, err := s.exportedServicesForPeerTxn(ws, tx, peering, "")
idx, list, err := exportedServicesForPeerTxn(ws, tx, peering, "")
if err != nil {
return 0, nil, fmt.Errorf("failed to list exported services for peer %q: %w", peering.ID, err)
}
@ -356,34 +357,31 @@ func (s *Store) ExportedServicesForAllPeersByName(ws memdb.WatchSet, entMeta acl
// specific peering, and optionally include information about discovery chain
// reachable targets for these exported services if the "dc" parameter is
// specified.
func (s *Store) exportedServicesForPeerTxn(ws memdb.WatchSet, tx ReadTxn, peering *pbpeering.Peering, dc string) (uint64, *structs.ExportedServiceList, error) {
func exportedServicesForPeerTxn(
ws memdb.WatchSet,
tx ReadTxn,
peering *pbpeering.Peering,
dc string,
) (uint64, *structs.ExportedServiceList, error) {
maxIdx := peering.ModifyIndex
entMeta := structs.NodeEnterpriseMetaInPartition(peering.Partition)
idx, raw, err := configEntryTxn(tx, ws, structs.ExportedServices, entMeta.PartitionOrDefault(), entMeta)
idx, conf, err := getExportedServicesConfigEntryTxn(tx, ws, nil, entMeta)
if err != nil {
return 0, nil, fmt.Errorf("failed to fetch exported-services config entry: %w", err)
}
if idx > maxIdx {
maxIdx = idx
}
if raw == nil {
if conf == nil {
return maxIdx, &structs.ExportedServiceList{}, nil
}
conf, ok := raw.(*structs.ExportedServicesConfigEntry)
if !ok {
return 0, nil, fmt.Errorf("expected type *structs.ExportedServicesConfigEntry, got %T", raw)
}
var (
normalSet = make(map[structs.ServiceName]struct{})
discoSet = make(map[structs.ServiceName]struct{})
)
// TODO(peering): filter the disco chain portion of the results to only be
// things reachable over the mesh to avoid replicating some clutter.
//
// At least one of the following should be true for a name for it to
// replicate:
//
@ -426,7 +424,7 @@ func (s *Store) exportedServicesForPeerTxn(ws memdb.WatchSet, tx ReadTxn, peerin
}
// list all config entries of kind service-resolver, service-router, service-splitter?
idx, discoChains, err := listDiscoveryChainNamesTxn(tx, ws, svcMeta)
idx, discoChains, err := listDiscoveryChainNamesTxn(tx, ws, nil, svcMeta)
if err != nil {
return 0, nil, fmt.Errorf("failed to get discovery chain names: %w", err)
}
@ -463,7 +461,7 @@ func (s *Store) exportedServicesForPeerTxn(ws memdb.WatchSet, tx ReadTxn, peerin
if dc != "" && !structs.IsProtocolHTTPLike(protocol) {
// We only need to populate the targets for replication purposes for L4 protocols, which
// do not ultimately get intercepted by the mesh gateways.
idx, targets, err := s.discoveryChainOriginalTargetsTxn(tx, ws, dc, svc.Name, &svc.EnterpriseMeta)
idx, targets, err := discoveryChainOriginalTargetsTxn(tx, ws, dc, svc.Name, &svc.EnterpriseMeta)
if err != nil {
return fmt.Errorf("failed to get discovery chain targets for service %q: %w", svc, err)
}
@ -504,6 +502,86 @@ func (s *Store) exportedServicesForPeerTxn(ws memdb.WatchSet, tx ReadTxn, peerin
return maxIdx, list, nil
}
func listAllExportedServices(
ws memdb.WatchSet,
tx ReadTxn,
overrides map[configentry.KindName]structs.ConfigEntry,
entMeta acl.EnterpriseMeta,
) (uint64, map[structs.ServiceName]struct{}, error) {
idx, export, err := getExportedServicesConfigEntryTxn(tx, ws, overrides, &entMeta)
if err != nil {
return 0, nil, err
}
found := make(map[structs.ServiceName]struct{})
if export == nil {
return idx, found, nil
}
_, services, err := listServicesExportedToAnyPeerByConfigEntry(ws, tx, export, overrides)
if err != nil {
return 0, nil, err
}
for _, svc := range services {
found[svc] = struct{}{}
}
return idx, found, nil
}
func listServicesExportedToAnyPeerByConfigEntry(
ws memdb.WatchSet,
tx ReadTxn,
conf *structs.ExportedServicesConfigEntry,
overrides map[configentry.KindName]structs.ConfigEntry,
) (uint64, []structs.ServiceName, error) {
var (
entMeta = conf.GetEnterpriseMeta()
found = make(map[structs.ServiceName]struct{})
maxIdx uint64
)
for _, svc := range conf.Services {
svcMeta := acl.NewEnterpriseMetaWithPartition(entMeta.PartitionOrDefault(), svc.Namespace)
sawPeer := false
for _, consumer := range svc.Consumers {
if consumer.PeerName == "" {
continue
}
sawPeer = true
sn := structs.NewServiceName(svc.Name, &svcMeta)
if _, ok := found[sn]; ok {
continue
}
if svc.Name != structs.WildcardSpecifier {
found[sn] = struct{}{}
}
}
if sawPeer && svc.Name == structs.WildcardSpecifier {
idx, discoChains, err := listDiscoveryChainNamesTxn(tx, ws, overrides, svcMeta)
if err != nil {
return 0, nil, fmt.Errorf("failed to get discovery chain names: %w", err)
}
if idx > maxIdx {
maxIdx = idx
}
for _, sn := range discoChains {
found[sn] = struct{}{}
}
}
}
foundKeys := maps.SliceOfKeys(found)
structs.ServiceList(foundKeys).Sort()
return maxIdx, foundKeys, nil
}
// PeeringsForService returns the list of peerings that are associated with the service name provided in the query.
// This is used to configure connect proxies for a given service. The result is generated by querying for exported
// service config entries and filtering for those that match the given service.

View File

@ -5,7 +5,7 @@ import (
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/stream"
"github.com/hashicorp/consul/agent/grpc/private/services/subscribe"
"github.com/hashicorp/consul/agent/grpc-internal/services/subscribe"
"github.com/hashicorp/consul/agent/structs"
)

View File

@ -14,8 +14,8 @@ import (
"golang.org/x/sync/errgroup"
gogrpc "google.golang.org/grpc"
grpc "github.com/hashicorp/consul/agent/grpc/private"
"github.com/hashicorp/consul/agent/grpc/private/resolver"
grpc "github.com/hashicorp/consul/agent/grpc-internal"
"github.com/hashicorp/consul/agent/grpc-internal/resolver"
"github.com/hashicorp/consul/agent/router"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto/pbservice"

View File

@ -676,15 +676,34 @@ func (e ecsNotGlobalError) Unwrap() error {
return e.error
}
type queryLocality struct {
// datacenter is the datacenter parsed from a label that has an explicit datacenter part.
// Example query: <service>.virtual.<namespace>.ns.<partition>.ap.<datacenter>.dc.consul
datacenter string
// peerOrDatacenter is parsed from DNS queries where the datacenter and peer name are specified in the same query part.
// Example query: <service>.virtual.<peerOrDatacenter>.consul
peerOrDatacenter string
acl.EnterpriseMeta
}
func (l queryLocality) effectiveDatacenter(defaultDC string) string {
// Prefer the value parsed from a query with explicit parts: <namespace>.ns.<partition>.ap.<datacenter>.dc
if l.datacenter != "" {
return l.datacenter
}
// Fall back to the ambiguously parsed DC or Peer.
if l.peerOrDatacenter != "" {
return l.peerOrDatacenter
}
// If all are empty, use a default value.
return defaultDC
}
// dispatch is used to parse a request and invoke the correct handler.
// parameter maxRecursionLevel will handle whether recursive call can be performed
func (d *DNSServer) dispatch(remoteAddr net.Addr, req, resp *dns.Msg, maxRecursionLevel int) error {
// By default the query is in the default datacenter
datacenter := d.agent.config.Datacenter
// have to deref to clone it so we don't modify (start from the agent's defaults)
var entMeta = d.defaultEnterpriseMeta
// Choose correct response domain
respDomain := d.getResponseDomain(req.Question[0].Name)
@ -733,16 +752,17 @@ func (d *DNSServer) dispatch(remoteAddr net.Addr, req, resp *dns.Msg, maxRecursi
return invalid()
}
if !d.parseDatacenterAndEnterpriseMeta(querySuffixes, cfg, &datacenter, &entMeta) {
locality, ok := d.parseLocality(querySuffixes, cfg)
if !ok {
return invalid()
}
lookup := serviceLookup{
Datacenter: datacenter,
Datacenter: locality.effectiveDatacenter(d.agent.config.Datacenter),
Connect: false,
Ingress: false,
MaxRecursionLevel: maxRecursionLevel,
EnterpriseMeta: entMeta,
EnterpriseMeta: locality.EnterpriseMeta,
}
// Support RFC 2782 style syntax
if n == 2 && strings.HasPrefix(queryParts[1], "_") && strings.HasPrefix(queryParts[0], "_") {
@ -779,17 +799,18 @@ func (d *DNSServer) dispatch(remoteAddr net.Addr, req, resp *dns.Msg, maxRecursi
return invalid()
}
if !d.parseDatacenterAndEnterpriseMeta(querySuffixes, cfg, &datacenter, &entMeta) {
locality, ok := d.parseLocality(querySuffixes, cfg)
if !ok {
return invalid()
}
lookup := serviceLookup{
Datacenter: datacenter,
Datacenter: locality.effectiveDatacenter(d.agent.config.Datacenter),
Service: queryParts[len(queryParts)-1],
Connect: true,
Ingress: false,
MaxRecursionLevel: maxRecursionLevel,
EnterpriseMeta: entMeta,
EnterpriseMeta: locality.EnterpriseMeta,
}
// name.connect.consul
return d.serviceLookup(cfg, lookup, req, resp)
@ -799,14 +820,18 @@ func (d *DNSServer) dispatch(remoteAddr net.Addr, req, resp *dns.Msg, maxRecursi
return invalid()
}
if !d.parseDatacenterAndEnterpriseMeta(querySuffixes, cfg, &datacenter, &entMeta) {
locality, ok := d.parseLocality(querySuffixes, cfg)
if !ok {
return invalid()
}
args := structs.ServiceSpecificRequest{
Datacenter: datacenter,
// The datacenter of the request is not specified because cross-datacenter virtual IP
// queries are not supported. This guard rail is in place because virtual IPs are allocated
// within a DC, therefore their uniqueness is not guaranteed globally.
PeerName: locality.peerOrDatacenter,
ServiceName: queryParts[len(queryParts)-1],
EnterpriseMeta: entMeta,
EnterpriseMeta: locality.EnterpriseMeta,
QueryOptions: structs.QueryOptions{
Token: d.agent.tokens.UserToken(),
},
@ -834,17 +859,18 @@ func (d *DNSServer) dispatch(remoteAddr net.Addr, req, resp *dns.Msg, maxRecursi
return invalid()
}
if !d.parseDatacenterAndEnterpriseMeta(querySuffixes, cfg, &datacenter, &entMeta) {
locality, ok := d.parseLocality(querySuffixes, cfg)
if !ok {
return invalid()
}
lookup := serviceLookup{
Datacenter: datacenter,
Datacenter: locality.effectiveDatacenter(d.agent.config.Datacenter),
Service: queryParts[len(queryParts)-1],
Connect: false,
Ingress: true,
MaxRecursionLevel: maxRecursionLevel,
EnterpriseMeta: entMeta,
EnterpriseMeta: locality.EnterpriseMeta,
}
// name.ingress.consul
return d.serviceLookup(cfg, lookup, req, resp)
@ -854,13 +880,14 @@ func (d *DNSServer) dispatch(remoteAddr net.Addr, req, resp *dns.Msg, maxRecursi
return invalid()
}
if !d.parseDatacenterAndEnterpriseMeta(querySuffixes, cfg, &datacenter, &entMeta) {
locality, ok := d.parseLocality(querySuffixes, cfg)
if !ok {
return invalid()
}
// Namespace should not be set for node queries
ns := entMeta.NamespaceOrEmpty()
if ns != "" && ns != acl.DefaultNamespaceName {
// Nodes are only registered in the default namespace so queries
// must not specify a non-default namespace.
if !locality.InDefaultNamespace() {
return invalid()
}
@ -868,15 +895,17 @@ func (d *DNSServer) dispatch(remoteAddr net.Addr, req, resp *dns.Msg, maxRecursi
node := strings.Join(queryParts, ".")
lookup := nodeLookup{
Datacenter: datacenter,
Datacenter: locality.effectiveDatacenter(d.agent.config.Datacenter),
Node: node,
MaxRecursionLevel: maxRecursionLevel,
EnterpriseMeta: entMeta,
EnterpriseMeta: locality.EnterpriseMeta,
}
return d.nodeLookup(cfg, lookup, req, resp)
case "query":
datacenter := d.agent.config.Datacenter
// ensure we have a query name
if len(queryParts) < 1 {
return invalid()
@ -905,7 +934,7 @@ func (d *DNSServer) dispatch(remoteAddr net.Addr, req, resp *dns.Msg, maxRecursi
if err != nil {
return invalid()
}
//check if the query type is A for IPv4 or ANY
// check if the query type is A for IPv4 or ANY
aRecord := &dns.A{
Hdr: dns.RR_Header{
Name: qName + respDomain,
@ -926,7 +955,7 @@ func (d *DNSServer) dispatch(remoteAddr net.Addr, req, resp *dns.Msg, maxRecursi
if err != nil {
return invalid()
}
//check if the query type is AAAA for IPv6 or ANY
// check if the query type is AAAA for IPv6 or ANY
aaaaRecord := &dns.AAAA{
Hdr: dns.RR_Header{
Name: qName + respDomain,

View File

@ -16,15 +16,19 @@ func getEnterpriseDNSConfig(conf *config.RuntimeConfig) enterpriseDNSConfig {
return enterpriseDNSConfig{}
}
func (d *DNSServer) parseDatacenterAndEnterpriseMeta(labels []string, _ *dnsConfig, datacenter *string, _ *acl.EnterpriseMeta) bool {
// parseLocality can parse peer name or datacenter from a DNS query's labels.
// Peer name is parsed from the same query part that datacenter is, so given this ambiguity
// we parse a "peerOrDatacenter". The caller or RPC handler are responsible for disambiguating.
func (d *DNSServer) parseLocality(labels []string, cfg *dnsConfig) (queryLocality, bool) {
switch len(labels) {
case 1:
*datacenter = labels[0]
return true
return queryLocality{peerOrDatacenter: labels[0]}, true
case 0:
return true
return queryLocality{}, true
}
return false
return queryLocality{}, false
}
func serviceCanonicalDNSName(name, kind, datacenter, domain string, _ *acl.EnterpriseMeta) string {

View File

@ -11,6 +11,7 @@ import (
"testing"
"time"
"github.com/hashicorp/consul/agent/consul"
"github.com/hashicorp/serf/coordinate"
"github.com/miekg/dns"
"github.com/stretchr/testify/require"
@ -458,7 +459,7 @@ func TestDNSCycleRecursorCheck(t *testing.T) {
},
})
defer server2.Shutdown()
//Mock the agent startup with the necessary configs
// Mock the agent startup with the necessary configs
agent := NewTestAgent(t,
`recursors = ["`+server1.Addr+`", "`+server2.Addr+`"]
`)
@ -496,7 +497,7 @@ func TestDNSCycleRecursorCheckAllFail(t *testing.T) {
MsgHdr: dns.MsgHdr{Rcode: dns.RcodeRefused},
})
defer server3.Shutdown()
//Mock the agent startup with the necessary configs
// Mock the agent startup with the necessary configs
agent := NewTestAgent(t,
`recursors = ["`+server1.Addr+`", "`+server2.Addr+`","`+server3.Addr+`"]
`)
@ -507,7 +508,7 @@ func TestDNSCycleRecursorCheckAllFail(t *testing.T) {
// Agent request
client := new(dns.Client)
in, _, _ := client.Exchange(m, agent.DNSAddr())
//Verify if we hit SERVFAIL from Consul
// Verify if we hit SERVFAIL from Consul
require.Equal(t, dns.RcodeServerFailure, in.Rcode)
}
func TestDNS_NodeLookup_CNAME(t *testing.T) {
@ -1756,14 +1757,42 @@ func TestDNS_ConnectServiceLookup(t *testing.T) {
require.Equal(t, uint32(0), srvRec.Hdr.Ttl)
require.Equal(t, "127.0.0.55", cnameRec.A.String())
}
}
// Look up the virtual IP of the proxy.
questions = []string{
"db.virtual.consul.",
func TestDNS_VirtualIPLookup(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
for _, question := range questions {
t.Parallel()
a := NewTestAgent(t, "")
defer a.Shutdown()
testrpc.WaitForLeader(t, a.RPC, "dc1")
server, ok := a.delegate.(*consul.Server)
require.True(t, ok)
// The proxy service will not receive a virtual IP if the server is not assigning virtual IPs yet.
retry.Run(t, func(r *retry.R) {
_, entry, err := server.FSM().State().SystemMetadataGet(nil, structs.SystemMetadataVirtualIPsEnabled)
require.NoError(r, err)
require.NotNil(r, entry)
})
type testCase struct {
name string
reg *structs.RegisterRequest
question string
expect string
}
run := func(t *testing.T, tc testCase) {
var out struct{}
require.Nil(t, a.RPC("Catalog.Register", tc.reg, &out))
m := new(dns.Msg)
m.SetQuestion(question, dns.TypeA)
m.SetQuestion(tc.question, dns.TypeA)
c := new(dns.Client)
in, _, err := c.Exchange(m, a.DNSAddr())
@ -1772,7 +1801,54 @@ func TestDNS_ConnectServiceLookup(t *testing.T) {
aRec, ok := in.Answer[0].(*dns.A)
require.True(t, ok)
require.Equal(t, "240.0.0.1", aRec.A.String())
require.Equal(t, tc.expect, aRec.A.String())
}
tt := []testCase{
{
name: "local query",
reg: &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.55",
Service: &structs.NodeService{
Kind: structs.ServiceKindConnectProxy,
Service: "web-proxy",
Port: 12345,
Proxy: structs.ConnectProxyConfig{
DestinationServiceName: "db",
},
},
},
question: "db.virtual.consul.",
expect: "240.0.0.1",
},
{
name: "query for imported service",
reg: &structs.RegisterRequest{
PeerName: "frontend",
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.55",
Service: &structs.NodeService{
PeerName: "frontend",
Kind: structs.ServiceKindConnectProxy,
Service: "web-proxy",
Port: 12345,
Proxy: structs.ConnectProxyConfig{
DestinationServiceName: "db",
},
},
},
question: "db.virtual.frontend.consul.",
expect: "240.0.0.2",
},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
run(t, tc)
})
}
}

View File

@ -1,4 +1,4 @@
package public
package external
import (
"context"

View File

@ -1,4 +1,4 @@
package public
package external
import (
middleware "github.com/grpc-ecosystem/go-grpc-middleware"
@ -6,11 +6,11 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
agentmiddleware "github.com/hashicorp/consul/agent/grpc/middleware"
agentmiddleware "github.com/hashicorp/consul/agent/grpc-middleware"
"github.com/hashicorp/consul/tlsutil"
)
// NewServer constructs a gRPC server for the public gRPC port, to which
// NewServer constructs a gRPC server for the external gRPC port, to which
// handlers can be registered.
func NewServer(logger agentmiddleware.Logger, tls *tlsutil.Configurator) *grpc.Server {
recoveryOpts := agentmiddleware.PanicHandlerMiddlewareOpts(logger)

View File

@ -10,14 +10,14 @@ import (
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/auth"
"github.com/hashicorp/consul/agent/grpc/public"
external "github.com/hashicorp/consul/agent/grpc-external"
"github.com/hashicorp/consul/proto-public/pbacl"
)
// Login exchanges the presented bearer token for a Consul ACL token using a
// configured auth method.
func (s *Server) Login(ctx context.Context, req *pbacl.LoginRequest) (*pbacl.LoginResponse, error) {
logger := s.Logger.Named("login").With("request_id", public.TraceID())
logger := s.Logger.Named("login").With("request_id", external.TraceID())
logger.Trace("request received")
if err := s.requireACLsEnabled(logger); err != nil {

View File

@ -16,7 +16,7 @@ import (
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/authmethod"
"github.com/hashicorp/consul/agent/grpc/public/testutils"
"github.com/hashicorp/consul/agent/grpc-external/testutils"
structs "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto-public/pbacl"
)

View File

@ -10,13 +10,13 @@ import (
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/auth"
"github.com/hashicorp/consul/agent/grpc/public"
external "github.com/hashicorp/consul/agent/grpc-external"
"github.com/hashicorp/consul/proto-public/pbacl"
)
// Logout destroys the given ACL token once the caller is done with it.
func (s *Server) Logout(ctx context.Context, req *pbacl.LogoutRequest) (*pbacl.LogoutResponse, error) {
logger := s.Logger.Named("logout").With("request_id", public.TraceID())
logger := s.Logger.Named("logout").With("request_id", external.TraceID())
logger.Trace("request received")
if err := s.requireACLsEnabled(logger); err != nil {

View File

@ -15,7 +15,7 @@ import (
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/auth"
"github.com/hashicorp/consul/agent/grpc/public/testutils"
"github.com/hashicorp/consul/agent/grpc-external/testutils"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto-public/pbacl"
)

View File

@ -9,7 +9,7 @@ import (
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/consul/stream"
"github.com/hashicorp/consul/agent/grpc/public/testutils"
"github.com/hashicorp/consul/agent/grpc-external/testutils"
structs "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto-public/pbconnectca"
)

View File

@ -10,7 +10,7 @@ import (
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/grpc/public"
external "github.com/hashicorp/consul/agent/grpc-external"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto-public/pbconnectca"
)
@ -22,10 +22,10 @@ func (s *Server) Sign(ctx context.Context, req *pbconnectca.SignRequest) (*pbcon
return nil, err
}
logger := s.Logger.Named("sign").With("request_id", public.TraceID())
logger := s.Logger.Named("sign").With("request_id", external.TraceID())
logger.Trace("request received")
token := public.TokenFromContext(ctx)
token := external.TokenFromContext(ctx)
if req.Csr == "" {
return nil, status.Error(codes.InvalidArgument, "CSR is required")
@ -48,7 +48,7 @@ func (s *Server) Sign(ctx context.Context, req *pbconnectca.SignRequest) (*pbcon
var rsp *pbconnectca.SignResponse
handled, err := s.ForwardRPC(&rpcInfo, func(conn *grpc.ClientConn) error {
logger.Trace("forwarding RPC")
ctx := public.ForwardMetadataContext(ctx)
ctx := external.ForwardMetadataContext(ctx)
var err error
rsp, err = pbconnectca.NewConnectCAServiceClient(conn).Sign(ctx, req)
return err

View File

@ -16,7 +16,7 @@ import (
acl "github.com/hashicorp/consul/acl"
resolver "github.com/hashicorp/consul/acl/resolver"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/grpc/public/testutils"
"github.com/hashicorp/consul/agent/grpc-external/testutils"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto-public/pbconnectca"
)

View File

@ -15,7 +15,7 @@ import (
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/consul/stream"
"github.com/hashicorp/consul/agent/grpc/public"
external "github.com/hashicorp/consul/agent/grpc-external"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto-public/pbconnectca"
)
@ -28,11 +28,11 @@ func (s *Server) WatchRoots(_ *pbconnectca.WatchRootsRequest, serverStream pbcon
return err
}
logger := s.Logger.Named("watch-roots").With("request_id", public.TraceID())
logger := s.Logger.Named("watch-roots").With("request_id", external.TraceID())
logger.Trace("starting stream")
defer logger.Trace("stream closed")
token := public.TokenFromContext(serverStream.Context())
token := external.TokenFromContext(serverStream.Context())
// Serve the roots from an EventPublisher subscription. If the subscription is
// closed due to an ACL change, we'll attempt to re-authorize and resume it to

View File

@ -17,8 +17,8 @@ import (
"github.com/hashicorp/consul/acl"
resolver "github.com/hashicorp/consul/acl/resolver"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/grpc/public"
"github.com/hashicorp/consul/agent/grpc/public/testutils"
external "github.com/hashicorp/consul/agent/grpc-external"
"github.com/hashicorp/consul/agent/grpc-external/testutils"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto-public/pbconnectca"
"github.com/hashicorp/consul/sdk/testutil"
@ -56,7 +56,7 @@ func TestWatchRoots_Success(t *testing.T) {
aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything).
Return(testutils.TestAuthorizerServiceWriteAny(t), nil)
ctx := public.ContextWithToken(context.Background(), testACLToken)
ctx := external.ContextWithToken(context.Background(), testACLToken)
server := NewServer(Config{
Publisher: publisher,
@ -104,7 +104,7 @@ func TestWatchRoots_InvalidACLToken(t *testing.T) {
aclResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything).
Return(resolver.Result{}, acl.ErrNotFound)
ctx := public.ContextWithToken(context.Background(), testACLToken)
ctx := external.ContextWithToken(context.Background(), testACLToken)
server := NewServer(Config{
Publisher: publisher,
@ -142,7 +142,7 @@ func TestWatchRoots_ACLTokenInvalidated(t *testing.T) {
aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything).
Return(testutils.TestAuthorizerServiceWriteAny(t), nil).Twice()
ctx := public.ContextWithToken(context.Background(), testACLToken)
ctx := external.ContextWithToken(context.Background(), testACLToken)
server := NewServer(Config{
Publisher: publisher,
@ -210,7 +210,7 @@ func TestWatchRoots_StateStoreAbandoned(t *testing.T) {
aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything).
Return(testutils.TestAuthorizerServiceWriteAny(t), nil)
ctx := public.ContextWithToken(context.Background(), testACLToken)
ctx := external.ContextWithToken(context.Background(), testACLToken)
server := NewServer(Config{
Publisher: publisher,

View File

@ -13,8 +13,8 @@ import (
acl "github.com/hashicorp/consul/acl"
resolver "github.com/hashicorp/consul/acl/resolver"
"github.com/hashicorp/consul/agent/grpc/public"
"github.com/hashicorp/consul/agent/grpc/public/testutils"
external "github.com/hashicorp/consul/agent/grpc-external"
"github.com/hashicorp/consul/agent/grpc-external/testutils"
structs "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto-public/pbdataplane"
"github.com/hashicorp/consul/types"
@ -78,7 +78,7 @@ func TestGetEnvoyBootstrapParams_Success(t *testing.T) {
aclResolver := &MockACLResolver{}
aclResolver.On("ResolveTokenAndDefaultMeta", testToken, mock.Anything, mock.Anything).
Return(testutils.TestAuthorizerServiceRead(t, tc.registerReq.Service.ID), nil)
ctx := public.ContextWithToken(context.Background(), testToken)
ctx := external.ContextWithToken(context.Background(), testToken)
server := NewServer(Config{
GetStore: func() StateStore { return store },
@ -148,7 +148,7 @@ func TestGetEnvoyBootstrapParams_Error(t *testing.T) {
aclResolver.On("ResolveTokenAndDefaultMeta", testToken, mock.Anything, mock.Anything).
Return(testutils.TestAuthorizerServiceRead(t, proxyServiceID), nil)
ctx := public.ContextWithToken(context.Background(), testToken)
ctx := external.ContextWithToken(context.Background(), testToken)
store := testutils.TestStateStore(t, nil)
registerReq := testRegisterRequestProxy(t)
@ -218,7 +218,7 @@ func TestGetEnvoyBootstrapParams_Unauthenticated(t *testing.T) {
aclResolver := &MockACLResolver{}
aclResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything).
Return(resolver.Result{}, acl.ErrNotFound)
ctx := public.ContextWithToken(context.Background(), testToken)
ctx := external.ContextWithToken(context.Background(), testToken)
store := testutils.TestStateStore(t, nil)
server := NewServer(Config{
GetStore: func() StateStore { return store },
@ -237,7 +237,7 @@ func TestGetEnvoyBootstrapParams_PermissionDenied(t *testing.T) {
aclResolver := &MockACLResolver{}
aclResolver.On("ResolveTokenAndDefaultMeta", testToken, mock.Anything, mock.Anything).
Return(testutils.TestAuthorizerDenyAll(t), nil)
ctx := public.ContextWithToken(context.Background(), testToken)
ctx := external.ContextWithToken(context.Background(), testToken)
store := testutils.TestStateStore(t, nil)
registerReq := structs.TestRegisterRequestProxy(t)
proxyServiceID := "web-sidecar-proxy"

View File

@ -11,18 +11,18 @@ import (
acl "github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/grpc/public"
external "github.com/hashicorp/consul/agent/grpc-external"
structs "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto-public/pbdataplane"
)
func (s *Server) GetEnvoyBootstrapParams(ctx context.Context, req *pbdataplane.GetEnvoyBootstrapParamsRequest) (*pbdataplane.GetEnvoyBootstrapParamsResponse, error) {
logger := s.Logger.Named("get-envoy-bootstrap-params").With("service_id", req.GetServiceId(), "request_id", public.TraceID())
logger := s.Logger.Named("get-envoy-bootstrap-params").With("service_id", req.GetServiceId(), "request_id", external.TraceID())
logger.Trace("Started processing request")
defer logger.Trace("Finished processing request")
token := public.TokenFromContext(ctx)
token := external.TokenFromContext(ctx)
var authzContext acl.AuthorizerContext
entMeta := acl.NewEnterpriseMetaWithPartition(req.GetPartition(), req.GetNamespace())
authz, err := s.ACLResolver.ResolveTokenAndDefaultMeta(token, &entMeta, &authzContext)

View File

@ -7,19 +7,19 @@ import (
"google.golang.org/grpc/status"
acl "github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/grpc/public"
external "github.com/hashicorp/consul/agent/grpc-external"
structs "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto-public/pbdataplane"
)
func (s *Server) GetSupportedDataplaneFeatures(ctx context.Context, req *pbdataplane.GetSupportedDataplaneFeaturesRequest) (*pbdataplane.GetSupportedDataplaneFeaturesResponse, error) {
logger := s.Logger.Named("get-supported-dataplane-features").With("request_id", public.TraceID())
logger := s.Logger.Named("get-supported-dataplane-features").With("request_id", external.TraceID())
logger.Trace("Started processing request")
defer logger.Trace("Finished processing request")
// Require the given ACL token to have `service:write` on any service
token := public.TokenFromContext(ctx)
token := external.TokenFromContext(ctx)
var authzContext acl.AuthorizerContext
entMeta := structs.WildcardEnterpriseMetaInPartition(structs.WildcardSpecifier)
authz, err := s.ACLResolver.ResolveTokenAndDefaultMeta(token, entMeta, &authzContext)

View File

@ -12,8 +12,8 @@ import (
"github.com/hashicorp/consul/acl"
resolver "github.com/hashicorp/consul/acl/resolver"
"github.com/hashicorp/consul/agent/grpc/public"
"github.com/hashicorp/consul/agent/grpc/public/testutils"
external "github.com/hashicorp/consul/agent/grpc-external"
"github.com/hashicorp/consul/agent/grpc-external/testutils"
"github.com/hashicorp/consul/proto-public/pbdataplane"
)
@ -24,7 +24,7 @@ func TestSupportedDataplaneFeatures_Success(t *testing.T) {
aclResolver := &MockACLResolver{}
aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything).
Return(testutils.TestAuthorizerServiceWriteAny(t), nil)
ctx := public.ContextWithToken(context.Background(), testACLToken)
ctx := external.ContextWithToken(context.Background(), testACLToken)
server := NewServer(Config{
Logger: hclog.NewNullLogger(),
ACLResolver: aclResolver,
@ -53,7 +53,7 @@ func TestSupportedDataplaneFeatures_Unauthenticated(t *testing.T) {
aclResolver := &MockACLResolver{}
aclResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything).
Return(resolver.Result{}, acl.ErrNotFound)
ctx := public.ContextWithToken(context.Background(), testACLToken)
ctx := external.ContextWithToken(context.Background(), testACLToken)
server := NewServer(Config{
Logger: hclog.NewNullLogger(),
ACLResolver: aclResolver,
@ -70,7 +70,7 @@ func TestSupportedDataplaneFeatures_PermissionDenied(t *testing.T) {
aclResolver := &MockACLResolver{}
aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything).
Return(testutils.TestAuthorizerDenyAll(t), nil)
ctx := public.ContextWithToken(context.Background(), testACLToken)
ctx := external.ContextWithToken(context.Background(), testACLToken)
server := NewServer(Config{
Logger: hclog.NewNullLogger(),
ACLResolver: aclResolver,

View File

@ -4,7 +4,7 @@ import (
"context"
"testing"
"github.com/hashicorp/consul/agent/grpc/public/testutils"
"github.com/hashicorp/consul/agent/grpc-external/testutils"
"github.com/hashicorp/consul/proto-public/pbdataplane"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"

View File

@ -1,4 +1,4 @@
package peering
package peerstream
import (
"github.com/hashicorp/consul/agent/structs"

View File

@ -1,4 +1,4 @@
package peering
package peerstream
import (
"testing"

View File

@ -0,0 +1,48 @@
// Code generated by mockery v2.12.2. DO NOT EDIT.
package peerstream
import (
acl "github.com/hashicorp/consul/acl"
mock "github.com/stretchr/testify/mock"
resolver "github.com/hashicorp/consul/acl/resolver"
testing "testing"
)
// MockACLResolver is an autogenerated mock type for the ACLResolver type
type MockACLResolver struct {
mock.Mock
}
// ResolveTokenAndDefaultMeta provides a mock function with given fields: _a0, _a1, _a2
func (_m *MockACLResolver) ResolveTokenAndDefaultMeta(_a0 string, _a1 *acl.EnterpriseMeta, _a2 *acl.AuthorizerContext) (resolver.Result, error) {
ret := _m.Called(_a0, _a1, _a2)
var r0 resolver.Result
if rf, ok := ret.Get(0).(func(string, *acl.EnterpriseMeta, *acl.AuthorizerContext) resolver.Result); ok {
r0 = rf(_a0, _a1, _a2)
} else {
r0 = ret.Get(0).(resolver.Result)
}
var r1 error
if rf, ok := ret.Get(1).(func(string, *acl.EnterpriseMeta, *acl.AuthorizerContext) error); ok {
r1 = rf(_a0, _a1, _a2)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewMockACLResolver creates a new instance of MockACLResolver. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations.
func NewMockACLResolver(t testing.TB) *MockACLResolver {
mock := &MockACLResolver{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -1,4 +1,4 @@
package peering
package peerstream
import (
"errors"
@ -7,7 +7,6 @@ import (
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/hashicorp/consul/types"
"github.com/hashicorp/go-hclog"
"google.golang.org/genproto/googleapis/rpc/code"
"google.golang.org/protobuf/types/known/anypb"
@ -15,8 +14,10 @@ import (
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/proto/pbpeerstream"
"github.com/hashicorp/consul/proto/pbservice"
"github.com/hashicorp/consul/proto/pbstatus"
"github.com/hashicorp/consul/types"
)
/*
@ -37,12 +38,10 @@ import (
func makeServiceResponse(
logger hclog.Logger,
update cache.UpdateEvent,
) *pbpeering.ReplicationMessage {
) (*pbpeerstream.ReplicationMessage_Response, error) {
any, csn, err := marshalToProtoAny[*pbservice.IndexedCheckServiceNodes](update.Result)
if err != nil {
// Log the error and skip this response to avoid locking up peering due to a bad update event.
logger.Error("failed to marshal", "error", err)
return nil
return nil, fmt.Errorf("failed to marshal: %w", err)
}
serviceName := strings.TrimPrefix(update.CorrelationID, subExportedService)
@ -55,60 +54,43 @@ func makeServiceResponse(
// We don't distinguish when these three things occurred, but it's safe to send a DELETE Op in all cases, so we do that.
// Case #1 is a no-op for the importing peer.
if len(csn.Nodes) == 0 {
resp := &pbpeering.ReplicationMessage{
Payload: &pbpeering.ReplicationMessage_Response_{
Response: &pbpeering.ReplicationMessage_Response{
ResourceURL: pbpeering.TypeURLService,
// TODO(peering): Nonce management
Nonce: "",
ResourceID: serviceName,
Operation: pbpeering.ReplicationMessage_Response_DELETE,
},
},
}
return resp
return &pbpeerstream.ReplicationMessage_Response{
ResourceURL: pbpeerstream.TypeURLService,
// TODO(peering): Nonce management
Nonce: "",
ResourceID: serviceName,
Operation: pbpeerstream.Operation_OPERATION_DELETE,
}, nil
}
// If there are nodes in the response, we push them as an UPSERT operation.
resp := &pbpeering.ReplicationMessage{
Payload: &pbpeering.ReplicationMessage_Response_{
Response: &pbpeering.ReplicationMessage_Response{
ResourceURL: pbpeering.TypeURLService,
// TODO(peering): Nonce management
Nonce: "",
ResourceID: serviceName,
Operation: pbpeering.ReplicationMessage_Response_UPSERT,
Resource: any,
},
},
}
return resp
return &pbpeerstream.ReplicationMessage_Response{
ResourceURL: pbpeerstream.TypeURLService,
// TODO(peering): Nonce management
Nonce: "",
ResourceID: serviceName,
Operation: pbpeerstream.Operation_OPERATION_UPSERT,
Resource: any,
}, nil
}
func makeCARootsResponse(
logger hclog.Logger,
update cache.UpdateEvent,
) *pbpeering.ReplicationMessage {
) (*pbpeerstream.ReplicationMessage_Response, error) {
any, _, err := marshalToProtoAny[*pbpeering.PeeringTrustBundle](update.Result)
if err != nil {
// Log the error and skip this response to avoid locking up peering due to a bad update event.
logger.Error("failed to marshal", "error", err)
return nil
return nil, fmt.Errorf("failed to marshal: %w", err)
}
resp := &pbpeering.ReplicationMessage{
Payload: &pbpeering.ReplicationMessage_Response_{
Response: &pbpeering.ReplicationMessage_Response{
ResourceURL: pbpeering.TypeURLRoots,
// TODO(peering): Nonce management
Nonce: "",
ResourceID: "roots",
Operation: pbpeering.ReplicationMessage_Response_UPSERT,
Resource: any,
},
},
}
return resp
return &pbpeerstream.ReplicationMessage_Response{
ResourceURL: pbpeerstream.TypeURLRoots,
// TODO(peering): Nonce management
Nonce: "",
ResourceID: "roots",
Operation: pbpeerstream.Operation_OPERATION_UPSERT,
Resource: any,
}, nil
}
// marshalToProtoAny takes any input and returns:
@ -128,14 +110,14 @@ func marshalToProtoAny[T proto.Message](in any) (*anypb.Any, T, error) {
return any, typ, nil
}
func (s *Service) processResponse(
func (s *Server) processResponse(
peerName string,
partition string,
resp *pbpeering.ReplicationMessage_Response,
) (*pbpeering.ReplicationMessage, error) {
if !pbpeering.KnownTypeURL(resp.ResourceURL) {
resp *pbpeerstream.ReplicationMessage_Response,
) (*pbpeerstream.ReplicationMessage, error) {
if !pbpeerstream.KnownTypeURL(resp.ResourceURL) {
err := fmt.Errorf("received response for unknown resource type %q", resp.ResourceURL)
return makeReply(
return makeNACKReply(
resp.ResourceURL,
resp.Nonce,
code.Code_INVALID_ARGUMENT,
@ -144,10 +126,10 @@ func (s *Service) processResponse(
}
switch resp.Operation {
case pbpeering.ReplicationMessage_Response_UPSERT:
case pbpeerstream.Operation_OPERATION_UPSERT:
if resp.Resource == nil {
err := fmt.Errorf("received upsert response with no content")
return makeReply(
return makeNACKReply(
resp.ResourceURL,
resp.Nonce,
code.Code_INVALID_ARGUMENT,
@ -156,7 +138,7 @@ func (s *Service) processResponse(
}
if err := s.handleUpsert(peerName, partition, resp.ResourceURL, resp.ResourceID, resp.Resource); err != nil {
return makeReply(
return makeNACKReply(
resp.ResourceURL,
resp.Nonce,
code.Code_INTERNAL,
@ -164,27 +146,27 @@ func (s *Service) processResponse(
), fmt.Errorf("upsert error: %w", err)
}
return makeReply(resp.ResourceURL, resp.Nonce, code.Code_OK, ""), nil
return makeACKReply(resp.ResourceURL, resp.Nonce), nil
case pbpeering.ReplicationMessage_Response_DELETE:
case pbpeerstream.Operation_OPERATION_DELETE:
if err := s.handleDelete(peerName, partition, resp.ResourceURL, resp.ResourceID); err != nil {
return makeReply(
return makeNACKReply(
resp.ResourceURL,
resp.Nonce,
code.Code_INTERNAL,
fmt.Sprintf("delete error, ResourceURL: %q, ResourceID: %q: %v", resp.ResourceURL, resp.ResourceID, err),
), fmt.Errorf("delete error: %w", err)
}
return makeReply(resp.ResourceURL, resp.Nonce, code.Code_OK, ""), nil
return makeACKReply(resp.ResourceURL, resp.Nonce), nil
default:
var errMsg string
if op := pbpeering.ReplicationMessage_Response_Operation_name[int32(resp.Operation)]; op != "" {
if op := pbpeerstream.Operation_name[int32(resp.Operation)]; op != "" {
errMsg = fmt.Sprintf("unsupported operation: %q", op)
} else {
errMsg = fmt.Sprintf("unsupported operation: %d", resp.Operation)
}
return makeReply(
return makeNACKReply(
resp.ResourceURL,
resp.Nonce,
code.Code_INVALID_ARGUMENT,
@ -193,7 +175,7 @@ func (s *Service) processResponse(
}
}
func (s *Service) handleUpsert(
func (s *Server) handleUpsert(
peerName string,
partition string,
resourceURL string,
@ -201,7 +183,7 @@ func (s *Service) handleUpsert(
resource *anypb.Any,
) error {
switch resourceURL {
case pbpeering.TypeURLService:
case pbpeerstream.TypeURLService:
sn := structs.ServiceNameFromString(resourceID)
sn.OverridePartition(partition)
@ -212,7 +194,7 @@ func (s *Service) handleUpsert(
return s.handleUpdateService(peerName, partition, sn, csn)
case pbpeering.TypeURLRoots:
case pbpeerstream.TypeURLRoots:
roots := &pbpeering.PeeringTrustBundle{}
if err := ptypes.UnmarshalAny(resource, roots); err != nil {
return fmt.Errorf("failed to unmarshal resource: %w", err)
@ -233,14 +215,14 @@ func (s *Service) handleUpsert(
// On a DELETE event:
// - A reconciliation against nil or empty input pbNodes leads to deleting all stored catalog resources
// associated with the service name.
func (s *Service) handleUpdateService(
func (s *Server) handleUpdateService(
peerName string,
partition string,
sn structs.ServiceName,
pbNodes *pbservice.IndexedCheckServiceNodes,
) error {
// Capture instances in the state store for reconciliation later.
_, storedInstances, err := s.Backend.Store().CheckServiceNodes(nil, sn.Name, &sn.EnterpriseMeta, peerName)
_, storedInstances, err := s.GetStore().CheckServiceNodes(nil, sn.Name, &sn.EnterpriseMeta, peerName)
if err != nil {
return fmt.Errorf("failed to read imported services: %w", err)
}
@ -256,14 +238,14 @@ func (s *Service) handleUpdateService(
for _, nodeSnap := range snap.Nodes {
// First register the node
req := nodeSnap.Node.ToRegisterRequest()
if err := s.Backend.Apply().CatalogRegister(&req); err != nil {
if err := s.Backend.CatalogRegister(&req); err != nil {
return fmt.Errorf("failed to register node: %w", err)
}
// Then register all services on that node
for _, svcSnap := range nodeSnap.Services {
req.Service = svcSnap.Service
if err := s.Backend.Apply().CatalogRegister(&req); err != nil {
if err := s.Backend.CatalogRegister(&req); err != nil {
return fmt.Errorf("failed to register service: %w", err)
}
}
@ -278,7 +260,7 @@ func (s *Service) handleUpdateService(
}
req.Checks = chks
if err := s.Backend.Apply().CatalogRegister(&req); err != nil {
if err := s.Backend.CatalogRegister(&req); err != nil {
return fmt.Errorf("failed to register check: %w", err)
}
}
@ -315,7 +297,7 @@ func (s *Service) handleUpdateService(
// instance is not in the snapshot either, since a service instance can't
// exist without a node.
// This will also delete all service checks.
err := s.Backend.Apply().CatalogDeregister(&structs.DeregisterRequest{
err := s.Backend.CatalogDeregister(&structs.DeregisterRequest{
Node: csn.Node.Node,
ServiceID: csn.Service.ID,
EnterpriseMeta: csn.Service.EnterpriseMeta,
@ -335,7 +317,7 @@ func (s *Service) handleUpdateService(
// Delete the service instance if not in the snapshot.
sid := csn.Service.CompoundServiceID()
if _, ok := snap.Nodes[csn.Node.ID].Services[sid]; !ok {
err := s.Backend.Apply().CatalogDeregister(&structs.DeregisterRequest{
err := s.Backend.CatalogDeregister(&structs.DeregisterRequest{
Node: csn.Node.Node,
ServiceID: csn.Service.ID,
EnterpriseMeta: csn.Service.EnterpriseMeta,
@ -369,7 +351,7 @@ func (s *Service) handleUpdateService(
// If the check isn't a node check then it's a service check.
// Service checks that were not present can be deleted immediately because
// checks for a given service ID will only be attached to a single CheckServiceNode.
err := s.Backend.Apply().CatalogDeregister(&structs.DeregisterRequest{
err := s.Backend.CatalogDeregister(&structs.DeregisterRequest{
Node: chk.Node,
CheckID: chk.CheckID,
EnterpriseMeta: chk.EnterpriseMeta,
@ -387,7 +369,7 @@ func (s *Service) handleUpdateService(
// Delete all deduplicated node checks.
for chk := range deletedNodeChecks {
nodeMeta := structs.NodeEnterpriseMetaInPartition(sn.PartitionOrDefault())
err := s.Backend.Apply().CatalogDeregister(&structs.DeregisterRequest{
err := s.Backend.CatalogDeregister(&structs.DeregisterRequest{
Node: chk.node,
CheckID: chk.checkID,
EnterpriseMeta: *nodeMeta,
@ -402,7 +384,7 @@ func (s *Service) handleUpdateService(
// Delete any nodes that do not have any other services registered on them.
for node := range unusedNodes {
nodeMeta := structs.NodeEnterpriseMetaInPartition(sn.PartitionOrDefault())
_, ns, err := s.Backend.Store().NodeServices(nil, node, nodeMeta, peerName)
_, ns, err := s.GetStore().NodeServices(nil, node, nodeMeta, peerName)
if err != nil {
return fmt.Errorf("failed to query services on node: %w", err)
}
@ -412,7 +394,7 @@ func (s *Service) handleUpdateService(
}
// All services on the node were deleted, so the node is also cleaned up.
err = s.Backend.Apply().CatalogDeregister(&structs.DeregisterRequest{
err = s.Backend.CatalogDeregister(&structs.DeregisterRequest{
Node: node,
PeerName: peerName,
EnterpriseMeta: *nodeMeta,
@ -425,7 +407,7 @@ func (s *Service) handleUpdateService(
return nil
}
func (s *Service) handleUpsertRoots(
func (s *Server) handleUpsertRoots(
peerName string,
partition string,
trustBundle *pbpeering.PeeringTrustBundle,
@ -437,17 +419,17 @@ func (s *Service) handleUpsertRoots(
req := &pbpeering.PeeringTrustBundleWriteRequest{
PeeringTrustBundle: trustBundle,
}
return s.Backend.Apply().PeeringTrustBundleWrite(req)
return s.Backend.PeeringTrustBundleWrite(req)
}
func (s *Service) handleDelete(
func (s *Server) handleDelete(
peerName string,
partition string,
resourceURL string,
resourceID string,
) error {
switch resourceURL {
case pbpeering.TypeURLService:
case pbpeerstream.TypeURLService:
sn := structs.ServiceNameFromString(resourceID)
sn.OverridePartition(partition)
return s.handleUpdateService(peerName, partition, sn, nil)
@ -457,7 +439,14 @@ func (s *Service) handleDelete(
}
}
func makeReply(resourceURL, nonce string, errCode code.Code, errMsg string) *pbpeering.ReplicationMessage {
func makeACKReply(resourceURL, nonce string) *pbpeerstream.ReplicationMessage {
return makeReplicationRequest(&pbpeerstream.ReplicationMessage_Request{
ResourceURL: resourceURL,
ResponseNonce: nonce,
})
}
func makeNACKReply(resourceURL, nonce string, errCode code.Code, errMsg string) *pbpeerstream.ReplicationMessage {
var rpcErr *pbstatus.Status
if errCode != code.Code_OK || errMsg != "" {
rpcErr = &pbstatus.Status{
@ -466,14 +455,27 @@ func makeReply(resourceURL, nonce string, errCode code.Code, errMsg string) *pbp
}
}
// TODO: shouldn't this be response?
return &pbpeering.ReplicationMessage{
Payload: &pbpeering.ReplicationMessage_Request_{
Request: &pbpeering.ReplicationMessage_Request{
ResourceURL: resourceURL,
Nonce: nonce,
Error: rpcErr,
},
return makeReplicationRequest(&pbpeerstream.ReplicationMessage_Request{
ResourceURL: resourceURL,
ResponseNonce: nonce,
Error: rpcErr,
})
}
// makeReplicationRequest is a convenience method to make a Request-type ReplicationMessage.
func makeReplicationRequest(req *pbpeerstream.ReplicationMessage_Request) *pbpeerstream.ReplicationMessage {
return &pbpeerstream.ReplicationMessage{
Payload: &pbpeerstream.ReplicationMessage_Request_{
Request: req,
},
}
}
// makeReplicationResponse is a convenience method to make a Response-type ReplicationMessage.
func makeReplicationResponse(resp *pbpeerstream.ReplicationMessage_Response) *pbpeerstream.ReplicationMessage {
return &pbpeerstream.ReplicationMessage{
Payload: &pbpeerstream.ReplicationMessage_Response_{
Response: resp,
},
}
}

View File

@ -0,0 +1,101 @@
package peerstream
import (
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-memdb"
"google.golang.org/grpc"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/acl/resolver"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/consul/stream"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/proto/pbpeerstream"
)
// TODO(peering): fix up these interfaces to be more testable now that they are
// extracted from private peering
type Server struct {
Config
}
type Config struct {
Backend Backend
Tracker *Tracker
GetStore func() StateStore
Logger hclog.Logger
ACLResolver ACLResolver
// Datacenter of the Consul server this gRPC server is hosted on
Datacenter string
ConnectEnabled bool
}
//go:generate mockery --name ACLResolver --inpackage
type ACLResolver interface {
ResolveTokenAndDefaultMeta(string, *acl.EnterpriseMeta, *acl.AuthorizerContext) (resolver.Result, error)
}
func NewServer(cfg Config) *Server {
requireNotNil(cfg.Backend, "Backend")
requireNotNil(cfg.Tracker, "Tracker")
requireNotNil(cfg.GetStore, "GetStore")
requireNotNil(cfg.Logger, "Logger")
// requireNotNil(cfg.ACLResolver, "ACLResolver") // TODO(peering): reenable check when ACLs are required
if cfg.Datacenter == "" {
panic("Datacenter is required")
}
return &Server{
Config: cfg,
}
}
func requireNotNil(v interface{}, name string) {
if v == nil {
panic(name + " is required")
}
}
var _ pbpeerstream.PeerStreamServiceServer = (*Server)(nil)
func (s *Server) Register(grpcServer *grpc.Server) {
pbpeerstream.RegisterPeerStreamServiceServer(grpcServer, s)
}
type Backend interface {
Subscribe(req *stream.SubscribeRequest) (*stream.Subscription, error)
// IsLeader indicates whether the consul server is in a leader state or not.
IsLeader() bool
// SetLeaderAddress is called on a raft.LeaderObservation in a go routine
// in the consul server; see trackLeaderChanges()
SetLeaderAddress(string)
// GetLeaderAddress provides the best hint for the current address of the
// leader. There is no guarantee that this is the actual address of the
// leader.
GetLeaderAddress() string
PeeringTerminateByID(req *pbpeering.PeeringTerminateByIDRequest) error
PeeringTrustBundleWrite(req *pbpeering.PeeringTrustBundleWriteRequest) error
CatalogRegister(req *structs.RegisterRequest) error
CatalogDeregister(req *structs.DeregisterRequest) error
}
// StateStore provides a read-only interface for querying Peering data.
type StateStore interface {
PeeringRead(ws memdb.WatchSet, q state.Query) (uint64, *pbpeering.Peering, error)
PeeringReadByID(ws memdb.WatchSet, id string) (uint64, *pbpeering.Peering, error)
PeeringList(ws memdb.WatchSet, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.Peering, error)
PeeringTrustBundleRead(ws memdb.WatchSet, q state.Query) (uint64, *pbpeering.PeeringTrustBundle, error)
PeeringTrustBundleList(ws memdb.WatchSet, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.PeeringTrustBundle, error)
ExportedServicesForPeer(ws memdb.WatchSet, peerID, dc string) (uint64, *structs.ExportedServiceList, error)
ServiceDump(ws memdb.WatchSet, kind structs.ServiceKind, useKind bool, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.CheckServiceNodes, error)
CheckServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.CheckServiceNodes, error)
NodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, *structs.NodeServices, error)
CAConfig(ws memdb.WatchSet) (uint64, *structs.CAConfiguration, error)
TrustBundleListByService(ws memdb.WatchSet, service, dc string, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.PeeringTrustBundle, error)
AbandonCh() <-chan struct{}
}

View File

@ -0,0 +1,422 @@
package peerstream
import (
"context"
"fmt"
"io"
"strings"
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto"
"github.com/hashicorp/go-hclog"
"google.golang.org/genproto/googleapis/rpc/code"
"google.golang.org/grpc/codes"
grpcstatus "google.golang.org/grpc/status"
"github.com/hashicorp/consul/agent/connect"
external "github.com/hashicorp/consul/agent/grpc-external"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/proto/pbpeerstream"
)
type BidirectionalStream interface {
Send(*pbpeerstream.ReplicationMessage) error
Recv() (*pbpeerstream.ReplicationMessage, error)
Context() context.Context
}
// StreamResources handles incoming streaming connections.
func (s *Server) StreamResources(stream pbpeerstream.PeerStreamService_StreamResourcesServer) error {
logger := s.Logger.Named("stream-resources").With("request_id", external.TraceID())
logger.Trace("Started processing request")
defer logger.Trace("Finished processing request")
// NOTE: this code should have similar error handling to the new-request
// handling code in HandleStream()
if !s.Backend.IsLeader() {
// we are not the leader so we will hang up on the dialer
logger.Error("cannot establish a peering stream on a follower node")
st, err := grpcstatus.New(codes.FailedPrecondition,
"cannot establish a peering stream on a follower node").WithDetails(
&pbpeerstream.LeaderAddress{Address: s.Backend.GetLeaderAddress()})
if err != nil {
logger.Error(fmt.Sprintf("failed to marshal the leader address in response; err: %v", err))
return grpcstatus.Error(codes.FailedPrecondition, "cannot establish a peering stream on a follower node")
} else {
return st.Err()
}
}
// Initial message on a new stream must be a new subscription request.
first, err := stream.Recv()
if err != nil {
logger.Error("failed to establish stream", "error", err)
return err
}
// TODO(peering) Make request contain a list of resources, so that roots and services can be
// subscribed to with a single request. See:
// https://github.com/envoyproxy/data-plane-api/blob/main/envoy/service/discovery/v3/discovery.proto#L46
req := first.GetRequest()
if req == nil {
return grpcstatus.Error(codes.InvalidArgument, "first message when initiating a peering must be a subscription request")
}
logger.Trace("received initial replication request from peer")
logTraceRecv(logger, req)
if req.PeerID == "" {
return grpcstatus.Error(codes.InvalidArgument, "initial subscription request must specify a PeerID")
}
if req.ResponseNonce != "" {
return grpcstatus.Error(codes.InvalidArgument, "initial subscription request must not contain a nonce")
}
if req.Error != nil {
return grpcstatus.Error(codes.InvalidArgument, "initial subscription request must not contain an error")
}
if !pbpeerstream.KnownTypeURL(req.ResourceURL) {
return grpcstatus.Errorf(codes.InvalidArgument, "subscription request to unknown resource URL: %s", req.ResourceURL)
}
_, p, err := s.GetStore().PeeringReadByID(nil, req.PeerID)
if err != nil {
logger.Error("failed to look up peer", "peer_id", req.PeerID, "error", err)
return grpcstatus.Error(codes.Internal, "failed to find PeerID: "+req.PeerID)
}
if p == nil {
return grpcstatus.Error(codes.InvalidArgument, "initial subscription for unknown PeerID: "+req.PeerID)
}
// TODO(peering): If the peering is marked as deleted, send a Terminated message and return
// TODO(peering): Store subscription request so that an event publisher can separately handle pushing messages for it
logger.Info("accepted initial replication request from peer", "peer_id", p.ID)
if p.PeerID != "" {
return grpcstatus.Error(codes.InvalidArgument, "expected PeerID to be empty; the wrong end of peering is being dialed")
}
streamReq := HandleStreamRequest{
LocalID: p.ID,
RemoteID: "",
PeerName: p.Name,
Partition: p.Partition,
Stream: stream,
}
err = s.HandleStream(streamReq)
// A nil error indicates that the peering was deleted and the stream needs to be gracefully shutdown.
if err == nil {
s.DrainStream(streamReq)
return nil
}
logger.Error("error handling stream", "peer_name", p.Name, "peer_id", req.PeerID, "error", err)
return err
}
type HandleStreamRequest struct {
// LocalID is the UUID for the peering in the local Consul datacenter.
LocalID string
// RemoteID is the UUID for the peering from the perspective of the peer.
RemoteID string
// PeerName is the name of the peering.
PeerName string
// Partition is the local partition associated with the peer.
Partition string
// Stream is the open stream to the peer cluster.
Stream BidirectionalStream
}
func (r HandleStreamRequest) WasDialed() bool {
return r.RemoteID == ""
}
// DrainStream attempts to gracefully drain the stream when the connection is going to be torn down.
// Tearing down the connection too quickly can lead our peer receiving a context cancellation error before the stream termination message.
// Handling the termination message is important to set the expectation that the peering will not be reestablished unless recreated.
func (s *Server) DrainStream(req HandleStreamRequest) {
for {
// Ensure that we read until an error, or the peer has nothing more to send.
if _, err := req.Stream.Recv(); err != nil {
if err != io.EOF {
s.Logger.Warn("failed to tear down stream gracefully: peer may not have received termination message",
"peer_name", req.PeerName, "peer_id", req.LocalID, "error", err)
}
break
}
// Since the peering is being torn down we discard all replication messages without an error.
// We want to avoid importing new data at this point.
}
}
// The localID provided is the locally-generated identifier for the peering.
// The remoteID is an identifier that the remote peer recognizes for the peering.
func (s *Server) HandleStream(streamReq HandleStreamRequest) error {
// TODO: pass logger down from caller?
logger := s.Logger.Named("stream").
With("peer_name", streamReq.PeerName).
With("peer_id", streamReq.LocalID).
With("dialed", streamReq.WasDialed())
logger.Trace("handling stream for peer")
status, err := s.Tracker.Connected(streamReq.LocalID)
if err != nil {
return fmt.Errorf("failed to register stream: %v", err)
}
// TODO(peering) Also need to clear subscriptions associated with the peer
defer s.Tracker.Disconnected(streamReq.LocalID)
var trustDomain string
if s.ConnectEnabled {
// Read the TrustDomain up front - we do not allow users to change the ClusterID
// so reading it once at the beginning of the stream is sufficient.
trustDomain, err = getTrustDomain(s.GetStore(), logger)
if err != nil {
return err
}
}
mgr := newSubscriptionManager(
streamReq.Stream.Context(),
logger,
s.Config,
trustDomain,
s.Backend,
s.GetStore,
)
subCh := mgr.subscribe(streamReq.Stream.Context(), streamReq.LocalID, streamReq.PeerName, streamReq.Partition)
sub := makeReplicationRequest(&pbpeerstream.ReplicationMessage_Request{
ResourceURL: pbpeerstream.TypeURLService,
PeerID: streamReq.RemoteID,
})
logTraceSend(logger, sub)
if err := streamReq.Stream.Send(sub); err != nil {
if err == io.EOF {
logger.Info("stream ended by peer")
status.TrackReceiveError(err.Error())
return nil
}
// TODO(peering) Test error handling in calls to Send/Recv
status.TrackSendError(err.Error())
return fmt.Errorf("failed to send to stream: %v", err)
}
// TODO(peering): Should this be buffered?
recvChan := make(chan *pbpeerstream.ReplicationMessage)
go func() {
defer close(recvChan)
for {
msg, err := streamReq.Stream.Recv()
if err == nil {
logTraceRecv(logger, msg)
recvChan <- msg
continue
}
if err == io.EOF {
logger.Info("stream ended by peer")
status.TrackReceiveError(err.Error())
return
}
logger.Error("failed to receive from stream", "error", err)
status.TrackReceiveError(err.Error())
return
}
}()
for {
select {
// When the doneCh is closed that means that the peering was deleted locally.
case <-status.Done():
logger.Info("ending stream")
term := &pbpeerstream.ReplicationMessage{
Payload: &pbpeerstream.ReplicationMessage_Terminated_{
Terminated: &pbpeerstream.ReplicationMessage_Terminated{},
},
}
logTraceSend(logger, term)
if err := streamReq.Stream.Send(term); err != nil {
status.TrackSendError(err.Error())
return fmt.Errorf("failed to send to stream: %v", err)
}
logger.Trace("deleting stream status")
s.Tracker.DeleteStatus(streamReq.LocalID)
return nil
case msg, open := <-recvChan:
if !open {
logger.Trace("no longer receiving data on the stream")
return nil
}
// NOTE: this code should have similar error handling to the
// initial handling code in StreamResources()
if !s.Backend.IsLeader() {
// we are not the leader anymore so we will hang up on the dialer
logger.Error("node is not a leader anymore; cannot continue streaming")
st, err := grpcstatus.New(codes.FailedPrecondition,
"node is not a leader anymore; cannot continue streaming").WithDetails(
&pbpeerstream.LeaderAddress{Address: s.Backend.GetLeaderAddress()})
if err != nil {
logger.Error(fmt.Sprintf("failed to marshal the leader address in response; err: %v", err))
return grpcstatus.Error(codes.FailedPrecondition, "node is not a leader anymore; cannot continue streaming")
} else {
return st.Err()
}
}
if req := msg.GetRequest(); req != nil {
if !pbpeerstream.KnownTypeURL(req.ResourceURL) {
return grpcstatus.Errorf(codes.InvalidArgument, "subscription request to unknown resource URL: %s", req.ResourceURL)
}
switch {
case req.ResponseNonce == "":
// TODO(peering): This can happen on a client peer since they don't try to receive subscriptions before entering HandleStream.
// Should change that behavior or only allow it that one time.
case req.Error != nil && (req.Error.Code != int32(code.Code_OK) || req.Error.Message != ""):
logger.Warn("client peer was unable to apply resource", "code", req.Error.Code, "error", req.Error.Message)
status.TrackNack(fmt.Sprintf("client peer was unable to apply resource: %s", req.Error.Message))
default:
status.TrackAck()
}
continue
}
if resp := msg.GetResponse(); resp != nil {
// TODO(peering): Ensure there's a nonce
reply, err := s.processResponse(streamReq.PeerName, streamReq.Partition, resp)
if err != nil {
logger.Error("failed to persist resource", "resourceURL", resp.ResourceURL, "resourceID", resp.ResourceID)
status.TrackReceiveError(err.Error())
} else {
status.TrackReceiveSuccess()
}
logTraceSend(logger, reply)
if err := streamReq.Stream.Send(reply); err != nil {
status.TrackSendError(err.Error())
return fmt.Errorf("failed to send to stream: %v", err)
}
continue
}
if term := msg.GetTerminated(); term != nil {
logger.Info("peering was deleted by our peer: marking peering as terminated and cleaning up imported resources")
// Once marked as terminated, a separate deferred deletion routine will clean up imported resources.
if err := s.Backend.PeeringTerminateByID(&pbpeering.PeeringTerminateByIDRequest{ID: streamReq.LocalID}); err != nil {
logger.Error("failed to mark peering as terminated: %w", err)
}
return nil
}
case update := <-subCh:
var resp *pbpeerstream.ReplicationMessage_Response
switch {
case strings.HasPrefix(update.CorrelationID, subExportedService):
resp, err = makeServiceResponse(logger, update)
if err != nil {
// Log the error and skip this response to avoid locking up peering due to a bad update event.
logger.Error("failed to create service response", "error", err)
continue
}
case strings.HasPrefix(update.CorrelationID, subMeshGateway):
// TODO(Peering): figure out how to sync this separately
case update.CorrelationID == subCARoot:
resp, err = makeCARootsResponse(logger, update)
if err != nil {
// Log the error and skip this response to avoid locking up peering due to a bad update event.
logger.Error("failed to create ca roots response", "error", err)
continue
}
default:
logger.Warn("unrecognized update type from subscription manager: " + update.CorrelationID)
continue
}
if resp == nil {
continue
}
replResp := makeReplicationResponse(resp)
logTraceSend(logger, replResp)
if err := streamReq.Stream.Send(replResp); err != nil {
status.TrackSendError(err.Error())
return fmt.Errorf("failed to push data for %q: %w", update.CorrelationID, err)
}
}
}
}
func getTrustDomain(store StateStore, logger hclog.Logger) (string, error) {
_, cfg, err := store.CAConfig(nil)
switch {
case err != nil:
logger.Error("failed to read Connect CA Config", "error", err)
return "", grpcstatus.Error(codes.Internal, "failed to read Connect CA Config")
case cfg == nil:
logger.Warn("cannot begin stream because Connect CA is not yet initialized")
return "", grpcstatus.Error(codes.FailedPrecondition, "Connect CA is not yet initialized")
}
return connect.SpiffeIDSigningForCluster(cfg.ClusterID).Host(), nil
}
func (s *Server) StreamStatus(peer string) (resp Status, found bool) {
return s.Tracker.StreamStatus(peer)
}
// ConnectedStreams returns a map of connected stream IDs to the corresponding channel for tearing them down.
func (s *Server) ConnectedStreams() map[string]chan struct{} {
return s.Tracker.ConnectedStreams()
}
func logTraceRecv(logger hclog.Logger, pb proto.Message) {
logTraceProto(logger, pb, true)
}
func logTraceSend(logger hclog.Logger, pb proto.Message) {
logTraceProto(logger, pb, false)
}
func logTraceProto(logger hclog.Logger, pb proto.Message, received bool) {
if !logger.IsTrace() {
return
}
dir := "sent"
if received {
dir = "received"
}
m := jsonpb.Marshaler{
Indent: " ",
}
out, err := m.MarshalToString(pb)
if err != nil {
out = "<ERROR: " + err.Error() + ">"
}
logger.Trace("replication message", "direction", dir, "protobuf", out)
}

View File

@ -1,4 +1,4 @@
package peering
package peerstream
import (
"fmt"
@ -6,86 +6,94 @@ import (
"time"
)
// streamTracker contains a map of (PeerID -> StreamStatus).
// Tracker contains a map of (PeerID -> Status).
// As streams are opened and closed we track details about their status.
type streamTracker struct {
type Tracker struct {
mu sync.RWMutex
streams map[string]*lockableStreamStatus
streams map[string]*MutableStatus
// timeNow is a shim for testing.
timeNow func() time.Time
}
func newStreamTracker() *streamTracker {
return &streamTracker{
streams: make(map[string]*lockableStreamStatus),
func NewTracker() *Tracker {
return &Tracker{
streams: make(map[string]*MutableStatus),
timeNow: time.Now,
}
}
// connected registers a stream for a given peer, and marks it as connected.
func (t *Tracker) SetClock(clock func() time.Time) {
if clock == nil {
t.timeNow = time.Now
} else {
t.timeNow = clock
}
}
// Connected registers a stream for a given peer, and marks it as connected.
// It also enforces that there is only one active stream for a peer.
func (t *streamTracker) connected(id string) (*lockableStreamStatus, error) {
func (t *Tracker) Connected(id string) (*MutableStatus, error) {
t.mu.Lock()
defer t.mu.Unlock()
status, ok := t.streams[id]
if !ok {
status = newLockableStreamStatus(t.timeNow)
status = newMutableStatus(t.timeNow)
t.streams[id] = status
return status, nil
}
if status.connected() {
if status.IsConnected() {
return nil, fmt.Errorf("there is an active stream for the given PeerID %q", id)
}
status.trackConnected()
status.TrackConnected()
return status, nil
}
// disconnected ensures that if a peer id's stream status is tracked, it is marked as disconnected.
func (t *streamTracker) disconnected(id string) {
// Disconnected ensures that if a peer id's stream status is tracked, it is marked as disconnected.
func (t *Tracker) Disconnected(id string) {
t.mu.Lock()
defer t.mu.Unlock()
if status, ok := t.streams[id]; ok {
status.trackDisconnected()
status.TrackDisconnected()
}
}
func (t *streamTracker) streamStatus(id string) (resp StreamStatus, found bool) {
func (t *Tracker) StreamStatus(id string) (resp Status, found bool) {
t.mu.RLock()
defer t.mu.RUnlock()
s, ok := t.streams[id]
if !ok {
return StreamStatus{}, false
return Status{}, false
}
return s.status(), true
return s.GetStatus(), true
}
func (t *streamTracker) connectedStreams() map[string]chan struct{} {
func (t *Tracker) ConnectedStreams() map[string]chan struct{} {
t.mu.RLock()
defer t.mu.RUnlock()
resp := make(map[string]chan struct{})
for peer, status := range t.streams {
if status.connected() {
if status.IsConnected() {
resp[peer] = status.doneCh
}
}
return resp
}
func (t *streamTracker) deleteStatus(id string) {
func (t *Tracker) DeleteStatus(id string) {
t.mu.Lock()
defer t.mu.Unlock()
delete(t.streams, id)
}
type lockableStreamStatus struct {
type MutableStatus struct {
mu sync.RWMutex
// timeNow is a shim for testing.
@ -95,12 +103,12 @@ type lockableStreamStatus struct {
// to the peer before the stream's context is cancelled.
doneCh chan struct{}
StreamStatus
Status
}
// StreamStatus contains information about the replication stream to a peer cluster.
// Status contains information about the replication stream to a peer cluster.
// TODO(peering): There's a lot of fields here...
type StreamStatus struct {
type Status struct {
// Connected is true when there is an open stream for the peer.
Connected bool
@ -136,9 +144,9 @@ type StreamStatus struct {
LastReceiveErrorMessage string
}
func newLockableStreamStatus(now func() time.Time) *lockableStreamStatus {
return &lockableStreamStatus{
StreamStatus: StreamStatus{
func newMutableStatus(now func() time.Time) *MutableStatus {
return &MutableStatus{
Status: Status{
Connected: true,
},
timeNow: now,
@ -146,54 +154,58 @@ func newLockableStreamStatus(now func() time.Time) *lockableStreamStatus {
}
}
func (s *lockableStreamStatus) trackAck() {
func (s *MutableStatus) Done() <-chan struct{} {
return s.doneCh
}
func (s *MutableStatus) TrackAck() {
s.mu.Lock()
s.LastAck = s.timeNow().UTC()
s.mu.Unlock()
}
func (s *lockableStreamStatus) trackSendError(error string) {
func (s *MutableStatus) TrackSendError(error string) {
s.mu.Lock()
s.LastSendError = s.timeNow().UTC()
s.LastSendErrorMessage = error
s.mu.Unlock()
}
func (s *lockableStreamStatus) trackReceiveSuccess() {
func (s *MutableStatus) TrackReceiveSuccess() {
s.mu.Lock()
s.LastReceiveSuccess = s.timeNow().UTC()
s.mu.Unlock()
}
func (s *lockableStreamStatus) trackReceiveError(error string) {
func (s *MutableStatus) TrackReceiveError(error string) {
s.mu.Lock()
s.LastReceiveError = s.timeNow().UTC()
s.LastReceiveErrorMessage = error
s.mu.Unlock()
}
func (s *lockableStreamStatus) trackNack(msg string) {
func (s *MutableStatus) TrackNack(msg string) {
s.mu.Lock()
s.LastNack = s.timeNow().UTC()
s.LastNackMessage = msg
s.mu.Unlock()
}
func (s *lockableStreamStatus) trackConnected() {
func (s *MutableStatus) TrackConnected() {
s.mu.Lock()
s.Connected = true
s.DisconnectTime = time.Time{}
s.mu.Unlock()
}
func (s *lockableStreamStatus) trackDisconnected() {
func (s *MutableStatus) TrackDisconnected() {
s.mu.Lock()
s.Connected = false
s.DisconnectTime = s.timeNow().UTC()
s.mu.Unlock()
}
func (s *lockableStreamStatus) connected() bool {
func (s *MutableStatus) IsConnected() bool {
var resp bool
s.mu.RLock()
@ -203,9 +215,9 @@ func (s *lockableStreamStatus) connected() bool {
return resp
}
func (s *lockableStreamStatus) status() StreamStatus {
func (s *MutableStatus) GetStatus() Status {
s.mu.RLock()
copy := s.StreamStatus
copy := s.Status
s.mu.RUnlock()
return copy

View File

@ -1,16 +1,17 @@
package peering
package peerstream
import (
"sort"
"testing"
"time"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/sdk/testutil"
)
func TestStreamTracker_EnsureConnectedDisconnected(t *testing.T) {
tracker := newStreamTracker()
func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
tracker := NewTracker()
peerID := "63b60245-c475-426b-b314-4588d210859d"
it := incrementalTime{
@ -19,25 +20,25 @@ func TestStreamTracker_EnsureConnectedDisconnected(t *testing.T) {
tracker.timeNow = it.Now
var (
statusPtr *lockableStreamStatus
statusPtr *MutableStatus
err error
)
testutil.RunStep(t, "new stream", func(t *testing.T) {
statusPtr, err = tracker.connected(peerID)
statusPtr, err = tracker.Connected(peerID)
require.NoError(t, err)
expect := StreamStatus{
expect := Status{
Connected: true,
}
status, ok := tracker.streamStatus(peerID)
status, ok := tracker.StreamStatus(peerID)
require.True(t, ok)
require.Equal(t, expect, status)
})
testutil.RunStep(t, "duplicate gets rejected", func(t *testing.T) {
_, err := tracker.connected(peerID)
_, err := tracker.Connected(peerID)
require.Error(t, err)
require.Contains(t, err.Error(), `there is an active stream for the given PeerID "63b60245-c475-426b-b314-4588d210859d"`)
})
@ -46,14 +47,14 @@ func TestStreamTracker_EnsureConnectedDisconnected(t *testing.T) {
var lastSuccess time.Time
testutil.RunStep(t, "stream updated", func(t *testing.T) {
statusPtr.trackAck()
statusPtr.TrackAck()
sequence++
status, ok := tracker.streamStatus(peerID)
status, ok := tracker.StreamStatus(peerID)
require.True(t, ok)
lastSuccess = it.base.Add(time.Duration(sequence) * time.Second).UTC()
expect := StreamStatus{
expect := Status{
Connected: true,
LastAck: lastSuccess,
}
@ -61,58 +62,58 @@ func TestStreamTracker_EnsureConnectedDisconnected(t *testing.T) {
})
testutil.RunStep(t, "disconnect", func(t *testing.T) {
tracker.disconnected(peerID)
tracker.Disconnected(peerID)
sequence++
expect := StreamStatus{
expect := Status{
Connected: false,
DisconnectTime: it.base.Add(time.Duration(sequence) * time.Second).UTC(),
LastAck: lastSuccess,
}
status, ok := tracker.streamStatus(peerID)
status, ok := tracker.StreamStatus(peerID)
require.True(t, ok)
require.Equal(t, expect, status)
})
testutil.RunStep(t, "re-connect", func(t *testing.T) {
_, err := tracker.connected(peerID)
_, err := tracker.Connected(peerID)
require.NoError(t, err)
expect := StreamStatus{
expect := Status{
Connected: true,
LastAck: lastSuccess,
// DisconnectTime gets cleared on re-connect.
}
status, ok := tracker.streamStatus(peerID)
status, ok := tracker.StreamStatus(peerID)
require.True(t, ok)
require.Equal(t, expect, status)
})
testutil.RunStep(t, "delete", func(t *testing.T) {
tracker.deleteStatus(peerID)
tracker.DeleteStatus(peerID)
status, ok := tracker.streamStatus(peerID)
status, ok := tracker.StreamStatus(peerID)
require.False(t, ok)
require.Zero(t, status)
})
}
func TestStreamTracker_connectedStreams(t *testing.T) {
func TestTracker_connectedStreams(t *testing.T) {
type testCase struct {
name string
setup func(t *testing.T, s *streamTracker)
setup func(t *testing.T, s *Tracker)
expect []string
}
run := func(t *testing.T, tc testCase) {
tracker := newStreamTracker()
tracker := NewTracker()
if tc.setup != nil {
tc.setup(t, tracker)
}
streams := tracker.connectedStreams()
streams := tracker.ConnectedStreams()
var keys []string
for key := range streams {
@ -130,25 +131,25 @@ func TestStreamTracker_connectedStreams(t *testing.T) {
},
{
name: "all streams active",
setup: func(t *testing.T, s *streamTracker) {
_, err := s.connected("foo")
setup: func(t *testing.T, s *Tracker) {
_, err := s.Connected("foo")
require.NoError(t, err)
_, err = s.connected("bar")
_, err = s.Connected("bar")
require.NoError(t, err)
},
expect: []string{"bar", "foo"},
},
{
name: "mixed active and inactive",
setup: func(t *testing.T, s *streamTracker) {
status, err := s.connected("foo")
setup: func(t *testing.T, s *Tracker) {
status, err := s.Connected("foo")
require.NoError(t, err)
// Mark foo as disconnected to avoid showing it as an active stream
status.trackDisconnected()
status.TrackDisconnected()
_, err = s.connected("bar")
_, err = s.Connected("bar")
require.NoError(t, err)
},
expect: []string{"bar"},

View File

@ -1,4 +1,4 @@
package peering
package peerstream
import (
"context"
@ -21,7 +21,7 @@ import (
func (m *subscriptionManager) notifyExportedServicesForPeerID(ctx context.Context, state *subscriptionState, peerID string) {
// syncSubscriptionsAndBlock ensures that the subscriptions to the subscription backend
// match the list of services exported to the peer.
m.syncViaBlockingQuery(ctx, "exported-services", func(ctx context.Context, store Store, ws memdb.WatchSet) (interface{}, error) {
m.syncViaBlockingQuery(ctx, "exported-services", func(ctx context.Context, store StateStore, ws memdb.WatchSet) (interface{}, error) {
// Get exported services for peer id
_, list, err := store.ExportedServicesForPeer(ws, peerID, m.config.Datacenter)
if err != nil {
@ -34,7 +34,7 @@ func (m *subscriptionManager) notifyExportedServicesForPeerID(ctx context.Contex
// TODO: add a new streaming subscription type to list-by-kind-and-partition since we're getting evictions
func (m *subscriptionManager) notifyMeshGatewaysForPartition(ctx context.Context, state *subscriptionState, partition string) {
m.syncViaBlockingQuery(ctx, "mesh-gateways", func(ctx context.Context, store Store, ws memdb.WatchSet) (interface{}, error) {
m.syncViaBlockingQuery(ctx, "mesh-gateways", func(ctx context.Context, store StateStore, ws memdb.WatchSet) (interface{}, error) {
// Fetch our current list of all mesh gateways.
entMeta := structs.DefaultEnterpriseMetaInPartition(partition)
idx, nodes, err := store.ServiceDump(ws, structs.ServiceKindMeshGateway, true, entMeta, structs.DefaultPeerKeyword)
@ -61,7 +61,7 @@ func (m *subscriptionManager) notifyMeshGatewaysForPartition(ctx context.Context
func (m *subscriptionManager) syncViaBlockingQuery(
ctx context.Context,
queryType string,
queryFn func(ctx context.Context, store Store, ws memdb.WatchSet) (interface{}, error),
queryFn func(ctx context.Context, store StateStore, ws memdb.WatchSet) (interface{}, error),
correlationID string,
updateCh chan<- cache.UpdateEvent,
) {
@ -77,7 +77,7 @@ func (m *subscriptionManager) syncViaBlockingQuery(
logger = m.logger.With("queryType", queryType)
}
store := m.backend.Store()
store := m.getStore()
for {
ws := memdb.NewWatchSet()

View File

@ -1,4 +1,4 @@
package peering
package peerstream
import (
"context"
@ -29,7 +29,6 @@ type MaterializedViewStore interface {
type SubscriptionBackend interface {
Subscriber
Store() Store
}
// subscriptionManager handlers requests to subscribe to events from an events publisher.
@ -39,6 +38,7 @@ type subscriptionManager struct {
trustDomain string
viewStore MaterializedViewStore
backend SubscriptionBackend
getStore func() StateStore
}
// TODO(peering): Maybe centralize so that there is a single manager per datacenter, rather than per peering.
@ -48,6 +48,7 @@ func newSubscriptionManager(
config Config,
trustDomain string,
backend SubscriptionBackend,
getStore func() StateStore,
) *subscriptionManager {
logger = logger.Named("subscriptions")
store := submatview.NewStore(logger.Named("viewstore"))
@ -59,6 +60,7 @@ func newSubscriptionManager(
trustDomain: trustDomain,
viewStore: store,
backend: backend,
getStore: getStore,
}
}
@ -167,13 +169,6 @@ func (m *subscriptionManager) handleEvent(ctx context.Context, state *subscripti
// skip checks since we just generated one from scratch
}
// Scrub raft indexes
for _, instance := range csn.Nodes {
instance.Node.RaftIndex = nil
instance.Service.RaftIndex = nil
// skip checks since we just generated one from scratch
}
id := servicePayloadIDPrefix + strings.TrimPrefix(u.CorrelationID, subExportedService)
// Just ferry this one directly along to the destination.
@ -223,6 +218,9 @@ func (m *subscriptionManager) handleEvent(ctx context.Context, state *subscripti
if instance.Service.Connect != nil || instance.Service.Proxy != nil {
instance.Service.Connect = nil
instance.Service.Proxy = nil
// VirtualIPs assigned in this cluster won't make sense on the importing side
delete(instance.Service.TaggedAddresses, structs.TaggedAddressVirtualIP)
}
}
@ -284,6 +282,9 @@ func filterConnectReferences(orig *pbservice.IndexedCheckServiceNodes) {
csn = proto.Clone(csn).(*pbservice.CheckServiceNode)
csn.Service.Connect = nil
csn.Service.Proxy = nil
// VirtualIPs assigned in this cluster won't make sense on the importing side
delete(csn.Service.TaggedAddresses, structs.TaggedAddressVirtualIP)
}
newNodes = append(newNodes, csn)
@ -347,7 +348,7 @@ func (m *subscriptionManager) subscribeCARoots(
// following a snapshot restore) reset idx to ensure we don't skip over the
// new store's events.
select {
case <-m.backend.Store().AbandonCh():
case <-m.getStore().AbandonCh():
idx = 0
default:
}
@ -556,6 +557,12 @@ func createDiscoChainHealth(
trustDomain,
)
gwSpiffeID := connect.SpiffeIDMeshGateway{
Host: trustDomain,
Partition: sn.PartitionOrDefault(),
Datacenter: datacenter,
}
// Create common peer meta.
//
// TODO(peering): should this be replicated by service and not by instance?
@ -563,19 +570,14 @@ func createDiscoChainHealth(
SNI: []string{sni},
SpiffeID: []string{
mainSpiffeIDString,
// Always include the gateway id here to facilitate error-free
// L4/L7 upgrade/downgrade scenarios.
gwSpiffeID.URI().String(),
},
Protocol: info.Protocol,
}
if structs.IsProtocolHTTPLike(info.Protocol) {
gwSpiffeID := connect.SpiffeIDMeshGateway{
Host: trustDomain,
Partition: sn.PartitionOrDefault(),
Datacenter: datacenter,
}
peerMeta.SpiffeID = append(peerMeta.SpiffeID, gwSpiffeID.URI().String())
} else {
if !structs.IsProtocolHTTPLike(info.Protocol) {
for _, target := range info.TCPTargets {
targetSpiffeID := connect.SpiffeIDService{
Host: trustDomain,

View File

@ -1,4 +1,4 @@
package peering
package peerstream
import (
"context"
@ -35,7 +35,9 @@ func TestSubscriptionManager_RegisterDeregister(t *testing.T) {
mgr := newSubscriptionManager(ctx, testutil.Logger(t), Config{
Datacenter: "dc1",
ConnectEnabled: true,
}, connect.TestTrustDomain, backend)
}, connect.TestTrustDomain, backend, func() StateStore {
return backend.store
})
subCh := mgr.subscribe(ctx, id, "my-peering", partition)
var (
@ -51,6 +53,19 @@ func TestSubscriptionManager_RegisterDeregister(t *testing.T) {
checkEvent(t, got, gatewayCorrID, 0)
})
// Initially add in L4 failover so that later we can test removing it. We
// cannot do the other way around because it would fail validation to
// remove a target.
backend.ensureConfigEntry(t, &structs.ServiceResolverConfigEntry{
Kind: structs.ServiceResolver,
Name: "mysql",
Failover: map[string]structs.ServiceResolverFailover{
"*": {
Service: "failover",
},
},
})
testutil.RunStep(t, "initial export syncs empty instance lists", func(t *testing.T) {
backend.ensureConfigEntry(t, &structs.ExportedServicesConfigEntry{
Name: "default",
@ -260,6 +275,8 @@ func TestSubscriptionManager_RegisterDeregister(t *testing.T) {
},
SpiffeID: []string{
"spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/mysql",
"spiffe://11111111-2222-3333-4444-555555555555.consul/gateway/mesh/dc/dc1",
"spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/failover",
},
Protocol: "tcp",
},
@ -285,12 +302,6 @@ func TestSubscriptionManager_RegisterDeregister(t *testing.T) {
backend.ensureConfigEntry(t, &structs.ServiceResolverConfigEntry{
Kind: structs.ServiceResolver,
Name: "mysql",
Failover: map[string]structs.ServiceResolverFailover{
"*": {
Service: "failover",
Datacenters: []string{"dc2", "dc3"},
},
},
})
// ensure we get updated peer meta
@ -325,52 +336,7 @@ func TestSubscriptionManager_RegisterDeregister(t *testing.T) {
},
SpiffeID: []string{
"spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/mysql",
"spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc2/svc/failover",
"spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc3/svc/failover",
},
Protocol: "tcp",
},
},
},
}, res.Nodes[0])
},
)
// reset so the next subtest is valid
backend.deleteConfigEntry(t, structs.ServiceResolver, "mysql")
// ensure we get peer meta is restored
expectEvents(t, subCh,
func(t *testing.T, got cache.UpdateEvent) {
require.Equal(t, mysqlProxyCorrID, got.CorrelationID)
res := got.Result.(*pbservice.IndexedCheckServiceNodes)
require.Equal(t, uint64(0), res.Index)
require.Len(t, res.Nodes, 1)
prototest.AssertDeepEqual(t, &pbservice.CheckServiceNode{
Node: pbNode("mgw", "10.1.1.1", partition),
Service: &pbservice.NodeService{
Kind: "connect-proxy",
ID: "mysql-sidecar-proxy-instance-0",
Service: "mysql-sidecar-proxy",
Port: 8443,
Weights: &pbservice.Weights{
Passing: 1,
Warning: 1,
},
EnterpriseMeta: pbcommon.DefaultEnterpriseMeta,
Proxy: &pbservice.ConnectProxyConfig{
DestinationServiceID: "mysql-instance-0",
DestinationServiceName: "mysql",
},
Connect: &pbservice.ServiceConnect{
PeerMeta: &pbservice.PeeringServiceMeta{
SNI: []string{
"mysql.default.default.my-peering.external.11111111-2222-3333-4444-555555555555.consul",
},
SpiffeID: []string{
"spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/mysql",
"spiffe://11111111-2222-3333-4444-555555555555.consul/gateway/mesh/dc/dc1",
},
Protocol: "tcp",
},
@ -479,7 +445,9 @@ func TestSubscriptionManager_InitialSnapshot(t *testing.T) {
mgr := newSubscriptionManager(ctx, testutil.Logger(t), Config{
Datacenter: "dc1",
ConnectEnabled: true,
}, connect.TestTrustDomain, backend)
}, connect.TestTrustDomain, backend, func() StateStore {
return backend.store
})
subCh := mgr.subscribe(ctx, id, "my-peering", partition)
// Register two services that are not yet exported
@ -606,7 +574,9 @@ func TestSubscriptionManager_CARoots(t *testing.T) {
mgr := newSubscriptionManager(ctx, testutil.Logger(t), Config{
Datacenter: "dc1",
ConnectEnabled: true,
}, connect.TestTrustDomain, backend)
}, connect.TestTrustDomain, backend, func() StateStore {
return backend.store
})
subCh := mgr.subscribe(ctx, id, "my-peering", partition)
testutil.RunStep(t, "initial events contain trust bundle", func(t *testing.T) {
@ -682,10 +652,6 @@ func newTestSubscriptionBackend(t *testing.T) *testSubscriptionBackend {
return backend
}
func (b *testSubscriptionBackend) Store() Store {
return b.store
}
func (b *testSubscriptionBackend) ensurePeering(t *testing.T, name string) (uint64, string) {
b.lastIdx++
return b.lastIdx, setupTestPeering(t, b.store, name, b.lastIdx)

View File

@ -1,4 +1,4 @@
package peering
package peerstream
import (
"context"

View File

@ -1,4 +1,4 @@
package peering
package peerstream
import (
"context"

View File

@ -1,4 +1,4 @@
package peering
package peerstream
import (
"fmt"

View File

@ -1,4 +1,4 @@
package peering
package peerstream
import (
"context"

View File

@ -0,0 +1,128 @@
package peerstream
import (
"context"
"io"
"sync"
"time"
"google.golang.org/grpc/metadata"
"github.com/hashicorp/consul/proto/pbpeerstream"
)
type MockClient struct {
mu sync.Mutex
ErrCh chan error
ReplicationStream *MockStream
}
func (c *MockClient) Send(r *pbpeerstream.ReplicationMessage) error {
c.ReplicationStream.recvCh <- r
return nil
}
func (c *MockClient) Recv() (*pbpeerstream.ReplicationMessage, error) {
select {
case err := <-c.ErrCh:
return nil, err
case r := <-c.ReplicationStream.sendCh:
return r, nil
case <-time.After(10 * time.Millisecond):
return nil, io.EOF
}
}
func (c *MockClient) RecvWithTimeout(dur time.Duration) (*pbpeerstream.ReplicationMessage, error) {
select {
case err := <-c.ErrCh:
return nil, err
case r := <-c.ReplicationStream.sendCh:
return r, nil
case <-time.After(dur):
return nil, io.EOF
}
}
func (c *MockClient) Close() {
close(c.ReplicationStream.recvCh)
}
func NewMockClient(ctx context.Context) *MockClient {
return &MockClient{
ReplicationStream: newTestReplicationStream(ctx),
}
}
// MockStream mocks peering.PeeringService_StreamResourcesServer
type MockStream struct {
sendCh chan *pbpeerstream.ReplicationMessage
recvCh chan *pbpeerstream.ReplicationMessage
ctx context.Context
mu sync.Mutex
}
var _ pbpeerstream.PeerStreamService_StreamResourcesServer = (*MockStream)(nil)
func newTestReplicationStream(ctx context.Context) *MockStream {
return &MockStream{
sendCh: make(chan *pbpeerstream.ReplicationMessage, 1),
recvCh: make(chan *pbpeerstream.ReplicationMessage, 1),
ctx: ctx,
}
}
// Send implements pbpeerstream.PeeringService_StreamResourcesServer
func (s *MockStream) Send(r *pbpeerstream.ReplicationMessage) error {
s.sendCh <- r
return nil
}
// Recv implements pbpeerstream.PeeringService_StreamResourcesServer
func (s *MockStream) Recv() (*pbpeerstream.ReplicationMessage, error) {
r := <-s.recvCh
if r == nil {
return nil, io.EOF
}
return r, nil
}
// Context implements grpc.ServerStream and grpc.ClientStream
func (s *MockStream) Context() context.Context {
return s.ctx
}
// SendMsg implements grpc.ServerStream and grpc.ClientStream
func (s *MockStream) SendMsg(m interface{}) error {
return nil
}
// RecvMsg implements grpc.ServerStream and grpc.ClientStream
func (s *MockStream) RecvMsg(m interface{}) error {
return nil
}
// SetHeader implements grpc.ServerStream
func (s *MockStream) SetHeader(metadata.MD) error {
return nil
}
// SendHeader implements grpc.ServerStream
func (s *MockStream) SendHeader(metadata.MD) error {
return nil
}
// SetTrailer implements grpc.ServerStream
func (s *MockStream) SetTrailer(metadata.MD) {}
type incrementalTime struct {
base time.Time
next uint64
}
func (t *incrementalTime) Now() time.Time {
t.next++
return t.base.Add(time.Duration(t.next) * time.Second)
}

View File

@ -12,7 +12,7 @@ import (
"github.com/hashicorp/consul/agent/consul/autopilotevents"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/consul/stream"
"github.com/hashicorp/consul/agent/grpc/public/testutils"
"github.com/hashicorp/consul/agent/grpc-external/testutils"
"github.com/hashicorp/consul/proto-public/pbserverdiscovery"
)

View File

@ -4,15 +4,16 @@ import (
"context"
"errors"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/autopilotevents"
"github.com/hashicorp/consul/agent/consul/stream"
"github.com/hashicorp/consul/agent/grpc/public"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto-public/pbserverdiscovery"
"github.com/hashicorp/go-hclog"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/autopilotevents"
"github.com/hashicorp/consul/agent/consul/stream"
external "github.com/hashicorp/consul/agent/grpc-external"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto-public/pbserverdiscovery"
)
// WatchServers provides a stream on which you can receive the list of servers
@ -20,12 +21,12 @@ import (
// current set of ready servers are sent immediately at the start of the
// stream and new updates will be sent whenver the set of ready servers changes.
func (s *Server) WatchServers(req *pbserverdiscovery.WatchServersRequest, serverStream pbserverdiscovery.ServerDiscoveryService_WatchServersServer) error {
logger := s.Logger.Named("watch-servers").With("request_id", public.TraceID())
logger := s.Logger.Named("watch-servers").With("request_id", external.TraceID())
logger.Debug("starting stream")
defer logger.Trace("stream closed")
token := public.TokenFromContext(serverStream.Context())
token := external.TokenFromContext(serverStream.Context())
// Serve the ready servers from an EventPublisher subscription. If the subscription is
// closed due to an ACL change, we'll attempt to re-authorize and resume it to

View File

@ -16,8 +16,8 @@ import (
resolver "github.com/hashicorp/consul/acl/resolver"
"github.com/hashicorp/consul/agent/consul/autopilotevents"
"github.com/hashicorp/consul/agent/consul/stream"
"github.com/hashicorp/consul/agent/grpc/public"
"github.com/hashicorp/consul/agent/grpc/public/testutils"
external "github.com/hashicorp/consul/agent/grpc-external"
"github.com/hashicorp/consul/agent/grpc-external/testutils"
"github.com/hashicorp/consul/proto-public/pbserverdiscovery"
"github.com/hashicorp/consul/proto/prototest"
"github.com/hashicorp/consul/sdk/testutil"
@ -125,7 +125,7 @@ func TestWatchServers_StreamLifeCycle(t *testing.T) {
Return(testutils.TestAuthorizerServiceWriteAny(t), nil).Twice()
// add the token to the requests context
ctx := public.ContextWithToken(context.Background(), testACLToken)
ctx := external.ContextWithToken(context.Background(), testACLToken)
// setup the server
server := NewServer(Config{
@ -198,7 +198,7 @@ func TestWatchServers_ACLToken_PermissionDenied(t *testing.T) {
Return(testutils.TestAuthorizerDenyAll(t), nil).Once()
// add the token to the requests context
ctx := public.ContextWithToken(context.Background(), testACLToken)
ctx := external.ContextWithToken(context.Background(), testACLToken)
// setup the server
server := NewServer(Config{
@ -229,7 +229,7 @@ func TestWatchServers_ACLToken_Unauthenticated(t *testing.T) {
Return(resolver.Result{}, acl.ErrNotFound).Once()
// add the token to the requests context
ctx := public.ContextWithToken(context.Background(), testACLToken)
ctx := external.ContextWithToken(context.Background(), testACLToken)
// setup the server
server := NewServer(Config{

View File

@ -1,4 +1,4 @@
package public
package external
import (
"context"

View File

@ -1,4 +1,4 @@
package public
package external
import "github.com/hashicorp/go-uuid"

View File

@ -1,4 +1,4 @@
package private
package internal
import (
"context"

View File

@ -1,4 +1,4 @@
package private
package internal
import (
"context"
@ -14,8 +14,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/grpc/private/internal/testservice"
"github.com/hashicorp/consul/agent/grpc/private/resolver"
"github.com/hashicorp/consul/agent/grpc-internal/internal/testservice"
"github.com/hashicorp/consul/agent/grpc-internal/resolver"
"github.com/hashicorp/consul/agent/metadata"
"github.com/hashicorp/consul/ipaddr"
"github.com/hashicorp/consul/sdk/freeport"
@ -145,9 +145,9 @@ func TestNewDialer_IntegrationWithTLSEnabledHandler(t *testing.T) {
tlsConf, err := tlsutil.NewConfigurator(tlsutil.Config{
InternalRPC: tlsutil.ProtocolConfig{
VerifyIncoming: true,
CAFile: "../../../test/hostname/CertAuth.crt",
CertFile: "../../../test/hostname/Alice.crt",
KeyFile: "../../../test/hostname/Alice.key",
CAFile: "../../test/hostname/CertAuth.crt",
CertFile: "../../test/hostname/Alice.crt",
KeyFile: "../../test/hostname/Alice.key",
VerifyOutgoing: true,
},
}, hclog.New(nil))
@ -192,9 +192,9 @@ func TestNewDialer_IntegrationWithTLSEnabledHandler_viaMeshGateway(t *testing.T)
tlsConf, err := tlsutil.NewConfigurator(tlsutil.Config{
InternalRPC: tlsutil.ProtocolConfig{
VerifyIncoming: true,
CAFile: "../../../test/hostname/CertAuth.crt",
CertFile: "../../../test/hostname/Bob.crt",
KeyFile: "../../../test/hostname/Bob.key",
CAFile: "../../test/hostname/CertAuth.crt",
CertFile: "../../test/hostname/Bob.crt",
KeyFile: "../../test/hostname/Bob.key",
VerifyOutgoing: true,
VerifyServerHostname: true,
},
@ -222,9 +222,9 @@ func TestNewDialer_IntegrationWithTLSEnabledHandler_viaMeshGateway(t *testing.T)
clientTLSConf, err := tlsutil.NewConfigurator(tlsutil.Config{
InternalRPC: tlsutil.ProtocolConfig{
VerifyIncoming: true,
CAFile: "../../../test/hostname/CertAuth.crt",
CertFile: "../../../test/hostname/Betty.crt",
KeyFile: "../../../test/hostname/Betty.key",
CAFile: "../../test/hostname/CertAuth.crt",
CertFile: "../../test/hostname/Betty.crt",
KeyFile: "../../test/hostname/Betty.key",
VerifyOutgoing: true,
VerifyServerHostname: true,
},

View File

@ -1,11 +1,11 @@
package private
package internal
import (
"fmt"
"net"
"time"
agentmiddleware "github.com/hashicorp/consul/agent/grpc/middleware"
agentmiddleware "github.com/hashicorp/consul/agent/grpc-middleware"
middleware "github.com/grpc-ecosystem/go-grpc-middleware"
recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery"

View File

@ -1,4 +1,4 @@
package private
package internal
import (
"bytes"
@ -13,8 +13,8 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/hashicorp/consul/agent/grpc/private/internal/testservice"
"github.com/hashicorp/consul/agent/grpc/private/resolver"
"github.com/hashicorp/consul/agent/grpc-internal/internal/testservice"
"github.com/hashicorp/consul/agent/grpc-internal/resolver"
)
func TestHandler_PanicRecoveryInterceptor(t *testing.T) {
@ -57,5 +57,5 @@ func TestHandler_PanicRecoveryInterceptor(t *testing.T) {
// Checking the entire stack trace is not possible, let's
// make sure that it contains a couple of expected strings.
require.Contains(t, strLog, `[ERROR] panic serving grpc request: panic="panic from Something`)
require.Contains(t, strLog, `github.com/hashicorp/consul/agent/grpc/private.(*simplePanic).Something`)
require.Contains(t, strLog, `github.com/hashicorp/consul/agent/grpc-internal.(*simplePanic).Something`)
}

View File

@ -1,5 +1,5 @@
// Code generated by protoc-gen-go-binary. DO NOT EDIT.
// source: agent/grpc/private/internal/testservice/simple.proto
// source: agent/grpc-internal/internal/testservice/simple.proto
package testservice

View File

@ -2,7 +2,7 @@
// versions:
// protoc-gen-go v1.23.0
// protoc v3.15.8
// source: agent/grpc/private/internal/testservice/simple.proto
// source: agent/grpc-internal/internal/testservice/simple.proto
package testservice
@ -376,5 +376,5 @@ var _Simple_serviceDesc = grpc.ServiceDesc{
ServerStreams: true,
},
},
Metadata: "agent/grpc/private/internal/testservice/simple.proto",
Metadata: "agent/grpc-internal/internal/testservice/simple.proto",
}

View File

@ -2,7 +2,7 @@ syntax = "proto3";
package testservice;
option go_package = "github.com/hashicorp/consul/agent/grpc/private/internal/testservice";
option go_package = "github.com/hashicorp/consul/agent/grpc-internal/internal/testservice";
// Simple service is used to test gRPC plumbing.
service Simple {

View File

@ -1,4 +1,4 @@
package private
package internal
import (
"context"
@ -15,7 +15,7 @@ import (
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
"github.com/hashicorp/consul/agent/grpc/private/internal/testservice"
"github.com/hashicorp/consul/agent/grpc-internal/internal/testservice"
"github.com/hashicorp/consul/agent/metadata"
"github.com/hashicorp/consul/agent/pool"
"github.com/hashicorp/consul/tlsutil"

View File

@ -21,7 +21,7 @@ import (
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/consul/stream"
grpc "github.com/hashicorp/consul/agent/grpc/private"
grpc "github.com/hashicorp/consul/agent/grpc-internal"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/proto/pbcommon"

View File

@ -1,4 +1,4 @@
package private
package internal
import (
"context"

View File

@ -1,4 +1,4 @@
package private
package internal
import (
"context"
@ -14,9 +14,10 @@ import (
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
"github.com/hashicorp/consul/agent/grpc/private/internal/testservice"
"github.com/hashicorp/consul/proto/prototest"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/consul/agent/grpc-internal/internal/testservice"
"github.com/hashicorp/consul/proto/prototest"
)
func noopRegister(*grpc.Server) {}

View File

@ -23,25 +23,26 @@ func (k *Key) Equal(x *Key) bool {
// Server is used to return details of a consul server
type Server struct {
Name string // <node>.<dc>
ShortName string // <node>
ID string
Datacenter string
Segment string
Port int
SegmentAddrs map[string]string
SegmentPorts map[string]int
WanJoinPort int
LanJoinPort int
Bootstrap bool
Expect int
Build version.Version
Version int
RaftVersion int
Addr net.Addr
Status serf.MemberStatus
ReadReplica bool
FeatureFlags map[string]int
Name string // <node>.<dc>
ShortName string // <node>
ID string
Datacenter string
Segment string
Port int
SegmentAddrs map[string]string
SegmentPorts map[string]int
WanJoinPort int
LanJoinPort int
ExternalGRPCPort int
Bootstrap bool
Expect int
Build version.Version
Version int
RaftVersion int
Addr net.Addr
Status serf.MemberStatus
ReadReplica bool
FeatureFlags map[string]int
// If true, use TLS when connecting to this server
UseTLS bool
@ -136,6 +137,18 @@ func IsConsulServer(m serf.Member) (bool, *Server) {
}
}
externalGRPCPort := 0
externalGRPCPortStr, ok := m.Tags["grpc_port"]
if ok {
externalGRPCPort, err = strconv.Atoi(externalGRPCPortStr)
if err != nil {
return false, nil
}
if externalGRPCPort < 1 {
return false, nil
}
}
vsnStr := m.Tags["vsn"]
vsn, err := strconv.Atoi(vsnStr)
if err != nil {
@ -160,24 +173,25 @@ func IsConsulServer(m serf.Member) (bool, *Server) {
addr := &net.TCPAddr{IP: m.Addr, Port: port}
parts := &Server{
Name: m.Name,
ShortName: strings.TrimSuffix(m.Name, "."+datacenter),
ID: m.Tags["id"],
Datacenter: datacenter,
Segment: segment,
Port: port,
SegmentAddrs: segmentAddrs,
SegmentPorts: segmentPorts,
WanJoinPort: wanJoinPort,
LanJoinPort: int(m.Port),
Bootstrap: bootstrap,
Expect: expect,
Addr: addr,
Build: *buildVersion,
Version: vsn,
RaftVersion: raftVsn,
Status: m.Status,
UseTLS: useTLS,
Name: m.Name,
ShortName: strings.TrimSuffix(m.Name, "."+datacenter),
ID: m.Tags["id"],
Datacenter: datacenter,
Segment: segment,
Port: port,
SegmentAddrs: segmentAddrs,
SegmentPorts: segmentPorts,
WanJoinPort: wanJoinPort,
LanJoinPort: int(m.Port),
ExternalGRPCPort: externalGRPCPort,
Bootstrap: bootstrap,
Expect: expect,
Addr: addr,
Build: *buildVersion,
Version: vsn,
RaftVersion: raftVsn,
Status: m.Status,
UseTLS: useTLS,
// DEPRECATED - remove nonVoter check once support for that tag is removed
ReadReplica: nonVoter || readReplica,
FeatureFlags: featureFlags,

View File

@ -4,6 +4,7 @@ import (
"net"
"testing"
"github.com/hashicorp/go-version"
"github.com/hashicorp/serf/serf"
"github.com/stretchr/testify/require"
@ -53,173 +54,136 @@ func TestServer_Key_params(t *testing.T) {
}
func TestIsConsulServer(t *testing.T) {
m := serf.Member{
Name: "foo",
Addr: net.IP([]byte{127, 0, 0, 1}),
Tags: map[string]string{
"role": "consul",
"id": "asdf",
"dc": "east-aws",
"port": "10000",
"build": "0.8.0",
"wan_join_port": "1234",
"vsn": "1",
"expect": "3",
"raft_vsn": "3",
"use_tls": "1",
"read_replica": "1",
},
Status: serf.StatusLeft,
}
ok, parts := metadata.IsConsulServer(m)
if !ok || parts.Datacenter != "east-aws" || parts.Port != 10000 {
t.Fatalf("bad: %v %v", ok, parts)
}
if parts.Name != "foo" {
t.Fatalf("bad: %v", parts)
}
if parts.ID != "asdf" {
t.Fatalf("bad: %v", parts.ID)
}
if parts.Bootstrap {
t.Fatalf("unexpected bootstrap")
}
if parts.Expect != 3 {
t.Fatalf("bad: %v", parts.Expect)
}
if parts.Port != 10000 {
t.Fatalf("bad: %v", parts.Port)
}
if parts.WanJoinPort != 1234 {
t.Fatalf("bad: %v", parts.WanJoinPort)
}
if parts.RaftVersion != 3 {
t.Fatalf("bad: %v", parts.RaftVersion)
}
if parts.Status != serf.StatusLeft {
t.Fatalf("bad: %v", parts.Status)
}
if !parts.UseTLS {
t.Fatalf("bad: %v", parts.UseTLS)
}
if !parts.ReadReplica {
t.Fatalf("unexpected voter")
}
m.Tags["bootstrap"] = "1"
m.Tags["disabled"] = "1"
ok, parts = metadata.IsConsulServer(m)
if !ok {
t.Fatalf("expected a valid consul server")
}
if !parts.Bootstrap {
t.Fatalf("expected bootstrap")
}
if parts.Addr.String() != "127.0.0.1:10000" {
t.Fatalf("bad addr: %v", parts.Addr)
}
if parts.Version != 1 {
t.Fatalf("bad: %v", parts)
}
m.Tags["expect"] = "3"
delete(m.Tags, "bootstrap")
delete(m.Tags, "disabled")
ok, parts = metadata.IsConsulServer(m)
if !ok || parts.Expect != 3 {
t.Fatalf("bad: %v", parts.Expect)
}
if parts.Bootstrap {
t.Fatalf("unexpected bootstrap")
mustVersion := func(s string) *version.Version {
v, err := version.NewVersion(s)
require.NoError(t, err)
return v
}
delete(m.Tags, "read_replica")
ok, parts = metadata.IsConsulServer(m)
if !ok || parts.ReadReplica {
t.Fatalf("unexpected read replica")
}
newCase := func(variant string) (in serf.Member, expect *metadata.Server) {
m := serf.Member{
Name: "foo",
Addr: net.IP([]byte{127, 0, 0, 1}),
Port: 5454,
Tags: map[string]string{
"role": "consul",
"id": "asdf",
"dc": "east-aws",
"port": "10000",
"build": "0.8.0",
"wan_join_port": "1234",
"grpc_port": "9876",
"vsn": "1",
"expect": "3",
"raft_vsn": "3",
"use_tls": "1",
},
Status: serf.StatusLeft,
}
m.Tags["nonvoter"] = "1"
ok, parts = metadata.IsConsulServer(m)
if !ok || !parts.ReadReplica {
t.Fatalf("expected read replica")
}
expected := &metadata.Server{
Name: "foo",
ShortName: "foo",
ID: "asdf",
Datacenter: "east-aws",
Segment: "",
Port: 10000,
SegmentAddrs: map[string]string{},
SegmentPorts: map[string]int{},
WanJoinPort: 1234,
LanJoinPort: 5454,
ExternalGRPCPort: 9876,
Bootstrap: false,
Expect: 3,
Addr: &net.TCPAddr{
IP: net.IP([]byte{127, 0, 0, 1}),
Port: 10000,
},
Build: *mustVersion("0.8.0"),
Version: 1,
RaftVersion: 3,
Status: serf.StatusLeft,
UseTLS: true,
ReadReplica: false,
FeatureFlags: map[string]int{},
}
delete(m.Tags, "role")
ok, _ = metadata.IsConsulServer(m)
require.False(t, ok, "expected to not be a consul server")
}
func TestIsConsulServer_Optional(t *testing.T) {
m := serf.Member{
Name: "foo",
Addr: net.IP([]byte{127, 0, 0, 1}),
Tags: map[string]string{
"role": "consul",
"id": "asdf",
"dc": "east-aws",
"port": "10000",
"vsn": "1",
"build": "0.8.0",
// wan_join_port, raft_vsn, and expect are optional and
switch variant {
case "normal":
case "read-replica":
m.Tags["read_replica"] = "1"
expected.ReadReplica = true
case "non-voter":
m.Tags["nonvoter"] = "1"
expected.ReadReplica = true
case "expect-3":
m.Tags["expect"] = "3"
expected.Expect = 3
case "bootstrapped":
m.Tags["bootstrap"] = "1"
m.Tags["disabled"] = "1"
expected.Bootstrap = true
case "optionals":
// grpc_port, wan_join_port, raft_vsn, and expect are optional and
// should default to zero.
},
}
ok, parts := metadata.IsConsulServer(m)
if !ok || parts.Datacenter != "east-aws" || parts.Port != 10000 {
t.Fatalf("bad: %v %v", ok, parts)
}
if parts.Name != "foo" {
t.Fatalf("bad: %v", parts)
}
if parts.ID != "asdf" {
t.Fatalf("bad: %v", parts.ID)
}
if parts.Bootstrap {
t.Fatalf("unexpected bootstrap")
}
if parts.Expect != 0 {
t.Fatalf("bad: %v", parts.Expect)
}
if parts.Port != 10000 {
t.Fatalf("bad: %v", parts.Port)
}
if parts.WanJoinPort != 0 {
t.Fatalf("bad: %v", parts.WanJoinPort)
}
if parts.RaftVersion != 0 {
t.Fatalf("bad: %v", parts.RaftVersion)
delete(m.Tags, "grpc_port")
delete(m.Tags, "wan_join_port")
delete(m.Tags, "raft_vsn")
delete(m.Tags, "expect")
expected.RaftVersion = 0
expected.Expect = 0
expected.WanJoinPort = 0
expected.ExternalGRPCPort = 0
case "feature-namespaces":
m.Tags["ft_ns"] = "1"
expected.FeatureFlags = map[string]int{"ns": 1}
//
case "bad-grpc-port":
m.Tags["grpc_port"] = "three"
case "negative-grpc-port":
m.Tags["grpc_port"] = "-1"
case "zero-grpc-port":
m.Tags["grpc_port"] = "0"
case "no-role":
delete(m.Tags, "role")
default:
t.Fatalf("unhandled variant: %s", variant)
}
return m, expected
}
m.Tags["bootstrap"] = "1"
m.Tags["disabled"] = "1"
m.Tags["ft_ns"] = "1"
ok, parts = metadata.IsConsulServer(m)
if !ok {
t.Fatalf("expected a valid consul server")
}
if !parts.Bootstrap {
t.Fatalf("expected bootstrap")
}
if parts.Addr.String() != "127.0.0.1:10000" {
t.Fatalf("bad addr: %v", parts.Addr)
}
if parts.Version != 1 {
t.Fatalf("bad: %v", parts)
}
expectedFlags := map[string]int{"ns": 1}
require.Equal(t, expectedFlags, parts.FeatureFlags)
run := func(t *testing.T, variant string, expectOK bool) {
m, expected := newCase(variant)
ok, parts := metadata.IsConsulServer(m)
m.Tags["expect"] = "3"
delete(m.Tags, "bootstrap")
delete(m.Tags, "disabled")
ok, parts = metadata.IsConsulServer(m)
if !ok || parts.Expect != 3 {
t.Fatalf("bad: %v", parts.Expect)
}
if parts.Bootstrap {
t.Fatalf("unexpected bootstrap")
if expectOK {
require.True(t, ok, "expected a valid consul server")
require.Equal(t, expected, parts)
} else {
ok, _ := metadata.IsConsulServer(m)
require.False(t, ok, "expected to not be a consul server")
}
}
delete(m.Tags, "role")
ok, _ = metadata.IsConsulServer(m)
require.False(t, ok, "expected to not be a consul server")
cases := map[string]bool{
"normal": true,
"read-replica": true,
"non-voter": true,
"expect-3": true,
"bootstrapped": true,
"optionals": true,
"feature-namespaces": true,
//
"no-role": false,
"bad-grpc-port": false,
"negative-grpc-port": false,
"zero-grpc-port": false,
}
for variant, expectOK := range cases {
t.Run(variant, func(t *testing.T) {
run(t, variant, expectOK)
})
}
}

View File

@ -107,7 +107,7 @@ func TestHTTP_Peering_GenerateToken(t *testing.T) {
require.NoError(t, json.Unmarshal(tokenJSON, &token))
require.Nil(t, token.CA)
require.Equal(t, []string{fmt.Sprintf("127.0.0.1:%d", a.config.ServerPort)}, token.ServerAddresses)
require.Equal(t, []string{fmt.Sprintf("127.0.0.1:%d", a.config.GRPCPort)}, token.ServerAddresses)
require.Equal(t, "server.dc1.consul", token.ServerName)
// The PeerID in the token is randomly generated so we don't assert on its value.

Some files were not shown because too many files have changed in this diff Show More