[NET6429] Add listeners for mesh-gateway v2 (#20253)

Add listeners for mesh-gateway v2
This commit is contained in:
John Maguire 2024-01-18 12:52:06 -05:00 committed by GitHub
parent 15ab80c832
commit 7888d00e49
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 594 additions and 12 deletions

View File

@ -4,13 +4,21 @@
package builder
import (
"context"
"fmt"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/envoyextensions/xdscommon"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/gatewayproxy/fetcher"
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/resource"
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
meshv2beta1 "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbmesh/v2beta1/pbproxystate"
pbmulticluster "github.com/hashicorp/consul/proto-public/pbmulticluster/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
@ -43,13 +51,148 @@ func (b *proxyStateTemplateBuilder) identity() *pbresource.Reference {
}
func (b *proxyStateTemplateBuilder) listeners() []*pbproxystate.Listener {
// TODO NET-6429
return nil
var listeners []*pbproxystate.Listener
var address *pbcatalog.WorkloadAddress
// TODO: NET-7260 we think there should only ever be a single address for a gateway,
// need to validate this
if len(b.workload.Data.Addresses) > 0 {
address = b.workload.Data.Addresses[0]
}
// if the address defines no ports we assume the intention is to bind to all
// ports on the workload
if len(address.Ports) == 0 {
for _, workloadPort := range b.workload.Data.Ports {
listeners = append(listeners, b.buildListener(address, workloadPort.Port))
}
return listeners
}
for _, portName := range address.Ports {
workloadPort, ok := b.workload.Data.Ports[portName]
if !ok {
b.logger.Trace("port does not exist for workload", "port name", portName)
continue
}
listeners = append(listeners, b.buildListener(address, workloadPort.Port))
}
return listeners
}
func (b *proxyStateTemplateBuilder) buildListener(address *pbcatalog.WorkloadAddress, port uint32) *pbproxystate.Listener {
return &pbproxystate.Listener{
Name: xdscommon.PublicListenerName,
Direction: pbproxystate.Direction_DIRECTION_INBOUND,
BindAddress: &pbproxystate.Listener_HostPort{
HostPort: &pbproxystate.HostPortAddress{
Host: address.Host,
Port: port,
},
},
Capabilities: []pbproxystate.Capability{
pbproxystate.Capability_CAPABILITY_L4_TLS_INSPECTION,
},
DefaultRouter: &pbproxystate.Router{
Destination: &pbproxystate.Router_L4{
L4: &pbproxystate.L4Destination{
Destination: &pbproxystate.L4Destination_Cluster{
Cluster: &pbproxystate.DestinationCluster{
Name: "",
},
},
StatPrefix: "prefix",
},
},
},
Routers: b.routers(),
}
}
// routers loops through the ports and consumers for each exported service and generates
// a pbproxystate.Router matching the SNI to the target cluster. The target port name
// will be included in the ALPN. The targeted cluster will marry this port name with the SNI.
func (b *proxyStateTemplateBuilder) routers() []*pbproxystate.Router {
var routers []*pbproxystate.Router
if b.exportedServices == nil {
return routers
}
for _, exportedService := range b.exportedServices.Data.Services {
serviceID := resource.IDFromReference(exportedService.TargetRef)
service, err := b.dataFetcher.FetchService(context.Background(), serviceID)
if err != nil {
b.logger.Trace("error reading exported service", "error", err)
continue
} else if service == nil {
b.logger.Trace("service does not exist, skipping router", "service", serviceID)
continue
}
for _, port := range service.Data.Ports {
for _, consumer := range exportedService.Consumers {
routers = append(routers, &pbproxystate.Router{
Match: &pbproxystate.Match{
AlpnProtocols: []string{alpnProtocol(port.TargetPort)},
ServerNames: []string{b.sni(exportedService.TargetRef, consumer)},
},
Destination: &pbproxystate.Router_L4{
L4: &pbproxystate.L4Destination{
Destination: &pbproxystate.L4Destination_Cluster{
Cluster: &pbproxystate.DestinationCluster{
Name: b.clusterName(exportedService.TargetRef, consumer, port.TargetPort),
},
},
StatPrefix: "prefix",
},
},
})
}
}
}
return routers
}
func (b *proxyStateTemplateBuilder) clusters() map[string]*pbproxystate.Cluster {
// TODO NET-6430
return nil
clusters := map[string]*pbproxystate.Cluster{}
if b.exportedServices == nil {
return clusters
}
for _, exportedService := range b.exportedServices.Data.Services {
serviceID := resource.IDFromReference(exportedService.TargetRef)
service, err := b.dataFetcher.FetchService(context.Background(), serviceID)
if err != nil {
b.logger.Trace("error reading exported service", "error", err)
continue
} else if service == nil {
b.logger.Trace("service does not exist, skipping router", "service", serviceID)
continue
}
for _, port := range service.Data.Ports {
for _, consumer := range exportedService.Consumers {
clusterName := b.clusterName(exportedService.TargetRef, consumer, port.TargetPort)
clusters[clusterName] = &pbproxystate.Cluster{
Name: clusterName,
Protocol: pbproxystate.Protocol_PROTOCOL_TCP, // TODO
Group: &pbproxystate.Cluster_EndpointGroup{
EndpointGroup: &pbproxystate.EndpointGroup{
Group: &pbproxystate.EndpointGroup_Dynamic{},
},
},
AltStatName: "prefix",
}
}
}
}
return clusters
}
func (b *proxyStateTemplateBuilder) endpoints() map[string]*pbproxystate.Endpoints {
@ -67,12 +210,65 @@ func (b *proxyStateTemplateBuilder) Build() *meshv2beta1.ProxyStateTemplate {
ProxyState: &meshv2beta1.ProxyState{
Identity: b.identity(),
Listeners: b.listeners(),
Clusters: b.clusters(),
Endpoints: b.endpoints(),
Clusters: b.clusters(),
Routes: b.routes(),
},
RequiredEndpoints: make(map[string]*pbproxystate.EndpointRef),
RequiredEndpoints: b.requiredEndpoints(),
RequiredLeafCertificates: make(map[string]*pbproxystate.LeafCertificateRef),
RequiredTrustBundles: make(map[string]*pbproxystate.TrustBundleRef),
}
}
// requiredEndpoints loops through the consumers for each exported service
// and adds a pbproxystate.EndpointRef to be hydrated for each cluster.
func (b *proxyStateTemplateBuilder) requiredEndpoints() map[string]*pbproxystate.EndpointRef {
requiredEndpoints := make(map[string]*pbproxystate.EndpointRef)
if b.exportedServices == nil {
return requiredEndpoints
}
for _, exportedService := range b.exportedServices.Data.Services {
serviceID := resource.IDFromReference(exportedService.TargetRef)
service, err := b.dataFetcher.FetchService(context.Background(), serviceID)
if err != nil {
b.logger.Trace("error reading exported service", "error", err)
continue
} else if service == nil {
b.logger.Trace("service does not exist, skipping router", "service", serviceID)
continue
}
for _, port := range service.Data.Ports {
for _, consumer := range exportedService.Consumers {
clusterName := b.clusterName(exportedService.TargetRef, consumer, port.TargetPort)
requiredEndpoints[clusterName] = &pbproxystate.EndpointRef{
Id: resource.ReplaceType(pbcatalog.ServiceEndpointsType, serviceID),
Port: port.TargetPort,
}
}
}
}
return requiredEndpoints
}
func (b *proxyStateTemplateBuilder) clusterName(serviceRef *pbresource.Reference, consumer *pbmulticluster.ComputedExportedServiceConsumer, port string) string {
return fmt.Sprintf("%s.%s", port, b.sni(serviceRef, consumer))
}
func (b *proxyStateTemplateBuilder) sni(serviceRef *pbresource.Reference, consumer *pbmulticluster.ComputedExportedServiceConsumer) string {
switch tConsumer := consumer.Tenancy.(type) {
case *pbmulticluster.ComputedExportedServiceConsumer_Partition:
return connect.ServiceSNI(serviceRef.Name, "", serviceRef.Tenancy.Namespace, tConsumer.Partition, b.dc, b.trustDomain)
case *pbmulticluster.ComputedExportedServiceConsumer_Peer:
return connect.PeeredServiceSNI(serviceRef.Name, serviceRef.Tenancy.Namespace, serviceRef.Tenancy.Partition, tConsumer.Peer, b.trustDomain)
default:
return ""
}
}
func alpnProtocol(portName string) string {
return fmt.Sprintf("consul~%s", portName)
}

View File

@ -0,0 +1,361 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package builder
import (
"context"
"fmt"
"testing"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/hashicorp/consul/agent/connect"
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
"github.com/hashicorp/consul/envoyextensions/xdscommon"
"github.com/hashicorp/consul/internal/catalog"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/gatewayproxy/fetcher"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache"
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/multicluster"
"github.com/hashicorp/consul/internal/resource/resourcetest"
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbmesh/v2beta1/pbproxystate"
pbmulticluster "github.com/hashicorp/consul/proto-public/pbmulticluster/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/version/versiontest"
)
type proxyStateTemplateBuilderSuite struct {
suite.Suite
ctx context.Context
client pbresource.ResourceServiceClient
resourceClient *resourcetest.Client
rt controller.Runtime
workloadWithAddressPorts *types.DecodedWorkload
workloadWithOutAddressPorts *types.DecodedWorkload
apiService *pbresource.Resource
exportedServicesPartitionData *types.DecodedComputedExportedServices
exportedServicesPartitionResource *pbresource.Resource
exportedServicesPeerData *types.DecodedComputedExportedServices
exportedServicesPeerResource *pbresource.Resource
tenancies []*pbresource.Tenancy
}
func (suite *proxyStateTemplateBuilderSuite) SetupTest() {
suite.ctx = testutil.TestContext(suite.T())
suite.tenancies = resourcetest.TestTenancies()
suite.client = svctest.NewResourceServiceBuilder().
WithRegisterFns(types.Register, catalog.RegisterTypes, multicluster.RegisterTypes).
WithTenancies(suite.tenancies...).
Run(suite.T())
suite.resourceClient = resourcetest.NewClient(suite.client)
suite.rt = controller.Runtime{
Client: suite.client,
Logger: testutil.Logger(suite.T()),
}
}
func (suite *proxyStateTemplateBuilderSuite) setupWithTenancy(tenancy *pbresource.Tenancy) {
suite.workloadWithAddressPorts = &types.DecodedWorkload{
Data: &pbcatalog.Workload{
Identity: "test",
Addresses: []*pbcatalog.WorkloadAddress{
// we want to test that the first address is used
{
Host: "testhostname",
Ports: []string{"wan"},
External: false,
},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"wan": {
Port: 443,
Protocol: 0,
},
},
},
Resource: &pbresource.Resource{
Id: &pbresource.ID{
Name: "test",
Tenancy: tenancy,
},
},
}
suite.workloadWithOutAddressPorts = &types.DecodedWorkload{
Data: &pbcatalog.Workload{
Identity: "test",
Addresses: []*pbcatalog.WorkloadAddress{
// we want to test that the first address is used
{
Host: "testhostname",
Ports: []string{},
External: false,
},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"wan": {
Port: 443,
Protocol: 0,
},
},
},
Resource: &pbresource.Resource{
Id: &pbresource.ID{
Name: "test",
Tenancy: tenancy,
},
},
}
// write the service to export
suite.apiService = resourcetest.Resource(pbcatalog.ServiceType, "api-1").
WithTenancy(tenancy).
WithData(suite.T(), &pbcatalog.Service{
Ports: []*pbcatalog.ServicePort{
{TargetPort: "tcp", VirtualPort: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
{TargetPort: "mesh", VirtualPort: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
}).Write(suite.T(), suite.client)
consumers := []*pbmulticluster.ComputedExportedServiceConsumer{
{
Tenancy: &pbmulticluster.ComputedExportedServiceConsumer_Partition{Partition: tenancy.Partition},
},
}
if !versiontest.IsEnterprise() {
consumers = []*pbmulticluster.ComputedExportedServiceConsumer{}
}
suite.exportedServicesPartitionData = &types.DecodedComputedExportedServices{
Resource: &pbresource.Resource{},
Data: &pbmulticluster.ComputedExportedServices{
Services: []*pbmulticluster.ComputedExportedService{
{
TargetRef: &pbresource.Reference{
Type: pbcatalog.ServiceType,
Tenancy: tenancy,
Name: "api-1",
Section: "",
},
Consumers: consumers,
},
},
},
}
suite.exportedServicesPartitionResource = resourcetest.Resource(pbmulticluster.ComputedExportedServicesType, "global").
WithData(suite.T(), suite.exportedServicesPartitionData.Data).
Write(suite.T(), suite.client)
suite.exportedServicesPeerData = &types.DecodedComputedExportedServices{
Resource: &pbresource.Resource{},
Data: &pbmulticluster.ComputedExportedServices{
Services: []*pbmulticluster.ComputedExportedService{
{
TargetRef: &pbresource.Reference{
Type: pbcatalog.ServiceType,
Tenancy: tenancy,
Name: "api-1",
Section: "",
},
Consumers: []*pbmulticluster.ComputedExportedServiceConsumer{
{
Tenancy: &pbmulticluster.ComputedExportedServiceConsumer_Peer{Peer: "api-1"},
},
},
},
},
},
}
suite.exportedServicesPeerResource = resourcetest.Resource(pbmulticluster.ComputedExportedServicesType, "global").
WithData(suite.T(), suite.exportedServicesPartitionData.Data).
Write(suite.T(), suite.client)
}
func (suite *proxyStateTemplateBuilderSuite) TestProxyStateTemplateBuilder_BuildForPeeredExportedServices() {
suite.runTestCaseWithTenancies(func(tenancy *pbresource.Tenancy) {
c := cache.New()
f := fetcher.New(suite.client, c)
dc := "dc"
trustDomain := "trustDomain"
logger := testutil.Logger(suite.T())
for name, workload := range map[string]*types.DecodedWorkload{
"with address ports": suite.workloadWithAddressPorts,
"without address ports": suite.workloadWithOutAddressPorts,
} {
testutil.RunStep(suite.T(), name, func(t *testing.T) {
builder := NewProxyStateTemplateBuilder(workload, suite.exportedServicesPeerData, logger, f, dc, trustDomain)
expectedProxyStateTemplate := &pbmesh.ProxyStateTemplate{
ProxyState: &pbmesh.ProxyState{
Identity: &pbresource.Reference{
Name: "test",
Tenancy: tenancy,
Type: pbauth.WorkloadIdentityType,
},
Listeners: []*pbproxystate.Listener{
{
Name: xdscommon.PublicListenerName,
Direction: pbproxystate.Direction_DIRECTION_INBOUND,
BindAddress: &pbproxystate.Listener_HostPort{
HostPort: &pbproxystate.HostPortAddress{
Host: "testhostname",
Port: 443,
},
},
Capabilities: []pbproxystate.Capability{
pbproxystate.Capability_CAPABILITY_L4_TLS_INSPECTION,
},
DefaultRouter: &pbproxystate.Router{
Destination: &pbproxystate.Router_L4{
L4: &pbproxystate.L4Destination{
Destination: &pbproxystate.L4Destination_Cluster{
Cluster: &pbproxystate.DestinationCluster{
Name: "",
},
},
StatPrefix: "prefix",
},
},
},
Routers: []*pbproxystate.Router{
{
Match: &pbproxystate.Match{
AlpnProtocols: []string{"consul~tcp"},
ServerNames: []string{connect.PeeredServiceSNI("api-1", tenancy.Namespace, tenancy.Partition, "api-1", "trustDomain")},
},
Destination: &pbproxystate.Router_L4{
L4: &pbproxystate.L4Destination{
Destination: &pbproxystate.L4Destination_Cluster{
Cluster: &pbproxystate.DestinationCluster{
Name: fmt.Sprintf("tcp.%s", connect.PeeredServiceSNI("api-1", tenancy.Namespace, tenancy.Partition, "api-1", "trustDomain")),
},
},
StatPrefix: "prefix",
},
},
},
{
Match: &pbproxystate.Match{
AlpnProtocols: []string{"consul~mesh"},
ServerNames: []string{connect.PeeredServiceSNI("api-1", tenancy.Namespace, tenancy.Partition, "api-1", "trustDomain")},
},
Destination: &pbproxystate.Router_L4{
L4: &pbproxystate.L4Destination{
Destination: &pbproxystate.L4Destination_Cluster{
Cluster: &pbproxystate.DestinationCluster{
Name: fmt.Sprintf("mesh.%s", connect.PeeredServiceSNI("api-1", tenancy.Namespace, tenancy.Partition, "api-1", "trustDomain")),
},
},
StatPrefix: "prefix",
},
},
},
},
},
},
Clusters: map[string]*pbproxystate.Cluster{
fmt.Sprintf("mesh.%s", connect.PeeredServiceSNI("api-1", tenancy.Namespace, tenancy.Partition, "api-1", "trustDomain")): {
Name: fmt.Sprintf("mesh.%s", connect.PeeredServiceSNI("api-1", tenancy.Namespace, tenancy.Partition, "api-1", "trustDomain")),
Group: &pbproxystate.Cluster_EndpointGroup{
EndpointGroup: &pbproxystate.EndpointGroup{
Group: &pbproxystate.EndpointGroup_Dynamic{},
},
},
AltStatName: "prefix",
Protocol: pbproxystate.Protocol_PROTOCOL_TCP, // TODO
},
fmt.Sprintf("tcp.%s", connect.PeeredServiceSNI("api-1", tenancy.Namespace, tenancy.Partition, "api-1", "trustDomain")): {
Name: fmt.Sprintf("tcp.%s", connect.PeeredServiceSNI("api-1", tenancy.Namespace, tenancy.Partition, "api-1", "trustDomain")),
Group: &pbproxystate.Cluster_EndpointGroup{
EndpointGroup: &pbproxystate.EndpointGroup{
Group: &pbproxystate.EndpointGroup_Dynamic{},
},
},
AltStatName: "prefix",
Protocol: pbproxystate.Protocol_PROTOCOL_TCP, // TODO
},
},
},
RequiredEndpoints: map[string]*pbproxystate.EndpointRef{
fmt.Sprintf("mesh.%s", connect.PeeredServiceSNI("api-1", tenancy.Namespace, tenancy.Partition, "api-1", "trustDomain")): {
Id: &pbresource.ID{
Name: "api-1",
Type: pbcatalog.ServiceEndpointsType,
Tenancy: tenancy,
},
Port: "mesh",
},
fmt.Sprintf("tcp.%s", connect.PeeredServiceSNI("api-1", tenancy.Namespace, tenancy.Partition, "api-1", "trustDomain")): {
Id: &pbresource.ID{
Name: "api-1",
Type: pbcatalog.ServiceEndpointsType,
Tenancy: tenancy,
},
Port: "tcp",
},
},
RequiredLeafCertificates: make(map[string]*pbproxystate.LeafCertificateRef),
RequiredTrustBundles: make(map[string]*pbproxystate.TrustBundleRef),
}
require.Equal(t, expectedProxyStateTemplate, builder.Build())
})
}
})
}
func version(partitionName, partitionOrPeer string) string {
if partitionOrPeer == "peer" {
return "external"
}
if partitionName == "default" {
return "internal"
}
return "internal-v1"
}
func withPartition(partition string) string {
if partition == "default" {
return ""
}
return "." + partition
}
func TestProxyStateTemplateBuilder(t *testing.T) {
suite.Run(t, new(proxyStateTemplateBuilderSuite))
}
func (suite *proxyStateTemplateBuilderSuite) appendTenancyInfo(tenancy *pbresource.Tenancy) string {
return fmt.Sprintf("%s_Namespace_%s_Partition", tenancy.Namespace, tenancy.Partition)
}
func (suite *proxyStateTemplateBuilderSuite) runTestCaseWithTenancies(t func(*pbresource.Tenancy)) {
for _, tenancy := range suite.tenancies {
suite.Run(suite.appendTenancyInfo(tenancy), func() {
suite.setupWithTenancy(tenancy)
suite.T().Cleanup(func() {
suite.cleanUpNodes()
})
t(tenancy)
})
}
}
func (suite *proxyStateTemplateBuilderSuite) cleanUpNodes() {
suite.resourceClient.MustDelete(suite.T(), suite.exportedServicesPartitionResource.Id)
suite.resourceClient.MustDelete(suite.T(), suite.exportedServicesPeerResource.Id)
suite.resourceClient.MustDelete(suite.T(), suite.apiService.Id)
}

View File

@ -15,7 +15,6 @@ import (
"github.com/hashicorp/consul/internal/mesh/internal/controllers/gatewayproxy/fetcher"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache"
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/resource"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
@ -76,7 +75,28 @@ func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req c
// If the workload is not for a xGateway, let the sidecarproxy reconciler handle it
if gatewayKind := workload.Metadata["gateway-kind"]; gatewayKind == "" {
rt.Logger.Trace("workload is not a gateway; skipping reconciliation", "workload", workloadID)
rt.Logger.Trace("workload is not a gateway; skipping reconciliation", "workload", workloadID, "workloadData", workload.Data)
return nil
}
// TODO NET-7014 Determine what gateway controls this workload
// For now, we cheat by knowing the MeshGateway's name, type + tenancy ahead of time
gatewayID := &pbresource.ID{
Name: "mesh-gateway",
Type: pbmesh.MeshGatewayType,
Tenancy: resource.DefaultPartitionedTenancy(),
}
// Check if the gateway exists.
gateway, err := dataFetcher.FetchMeshGateway(ctx, gatewayID)
if err != nil {
rt.Logger.Error("error reading the associated gateway", "error", err)
return err
}
if gateway == nil {
// If gateway has been deleted, then return as ProxyStateTemplate should be
// cleaned up by the garbage collector because of the owner reference.
rt.Logger.Trace("gateway doesn't exist; skipping reconciliation", "gateway", gatewayID)
return nil
}
@ -96,13 +116,18 @@ func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req c
Tenancy: &pbresource.Tenancy{
Partition: req.ID.Tenancy.Partition,
},
Type: pbmulticluster.ExportedServicesType,
Type: pbmulticluster.ComputedExportedServicesType,
}
exportedServices, err := dataFetcher.FetchExportedServices(ctx, exportedServicesID)
if err != nil {
rt.Logger.Error("error reading the associated exported services", "error", err)
exportedServices = &types.DecodedComputedExportedServices{}
if err != nil || exportedServices == nil {
if err != nil {
rt.Logger.Error("error reading the associated exported services", "error", err)
}
if exportedServices == nil {
rt.Logger.Error("exported services was nil")
}
}
trustDomain, err := r.getTrustDomain()