mirror of https://github.com/status-im/consul.git
sidecar-proxy controller: L4 controller with explicit upstreams (NET-3988) (#18352)
* This controller generates and saves ProxyStateTemplate for sidecar proxies. * It currently supports single-port L4 ports only. * It keeps a cache of all destinations to make it easier to compute and retrieve destinations. * It will update the status of the pbmesh.Upstreams resource if anything is invalid. * This commit also changes service endpoints to include workload identity. This made the implementation a bit easier as we don't need to look up as many workloads and instead rely on endpoints data.
This commit is contained in:
parent
a17f4a0b89
commit
3c70e14713
|
@ -8,6 +8,7 @@ import (
|
|||
"net/url"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
// SpiffeIDService is the structure to represent the SPIFFE ID for a service.
|
||||
|
@ -52,3 +53,34 @@ func (id SpiffeIDService) uriPath() string {
|
|||
}
|
||||
return path
|
||||
}
|
||||
|
||||
// SpiffeIDWorkloadIdentity is the structure to represent the SPIFFE ID for a workload identity.
|
||||
type SpiffeIDWorkloadIdentity struct {
|
||||
Host string
|
||||
Partition string
|
||||
Namespace string
|
||||
Identity string
|
||||
}
|
||||
|
||||
func (id SpiffeIDWorkloadIdentity) URI() *url.URL {
|
||||
var result url.URL
|
||||
result.Scheme = "spiffe"
|
||||
result.Host = id.Host
|
||||
result.Path = fmt.Sprintf("/ap/%s/ns/%s/identity/%s",
|
||||
id.Partition,
|
||||
id.Namespace,
|
||||
id.Identity,
|
||||
)
|
||||
return &result
|
||||
}
|
||||
|
||||
// SpiffeIDFromIdentityRef creates the SPIFFE ID from a workload identity.
|
||||
// TODO (ishustava): make sure ref type is workload identity.
|
||||
func SpiffeIDFromIdentityRef(trustDomain string, ref *pbresource.Reference) string {
|
||||
return SpiffeIDWorkloadIdentity{
|
||||
Host: trustDomain,
|
||||
Partition: ref.Tenancy.Partition,
|
||||
Namespace: ref.Tenancy.Namespace,
|
||||
Identity: ref.Name,
|
||||
}.URI().String()
|
||||
}
|
||||
|
|
|
@ -19,6 +19,9 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/internal/mesh"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/hashicorp/consul-net-rpc/net/rpc"
|
||||
"github.com/hashicorp/go-connlimit"
|
||||
|
@ -72,9 +75,7 @@ import (
|
|||
"github.com/hashicorp/consul/agent/token"
|
||||
"github.com/hashicorp/consul/internal/catalog"
|
||||
"github.com/hashicorp/consul/internal/controller"
|
||||
"github.com/hashicorp/consul/internal/mesh"
|
||||
proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
"github.com/hashicorp/consul/internal/resource/demo"
|
||||
"github.com/hashicorp/consul/internal/resource/reaper"
|
||||
raftstorage "github.com/hashicorp/consul/internal/storage/raft"
|
||||
|
@ -907,6 +908,15 @@ func (s *Server) registerControllers(deps Deps, proxyUpdater ProxyUpdater) {
|
|||
return &bundle, nil
|
||||
},
|
||||
ProxyUpdater: proxyUpdater,
|
||||
// This function is adapted from server_connect.go:getCARoots.
|
||||
TrustDomainFetcher: func() (string, error) {
|
||||
_, caConfig, err := s.fsm.State().CAConfig(nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return s.getTrustDomain(caConfig)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -19,21 +19,15 @@ func (s *Server) getCARoots(ws memdb.WatchSet, state *state.Store) (*structs.Ind
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if config == nil || config.ClusterID == "" {
|
||||
return nil, fmt.Errorf("CA has not finished initializing")
|
||||
|
||||
trustDomain, err := s.getTrustDomain(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
indexedRoots := &structs.IndexedCARoots{}
|
||||
|
||||
// Build TrustDomain based on the ClusterID stored.
|
||||
signingID := connect.SpiffeIDSigningForCluster(config.ClusterID)
|
||||
if signingID == nil {
|
||||
// If CA is bootstrapped at all then this should never happen but be
|
||||
// defensive.
|
||||
return nil, fmt.Errorf("no cluster trust domain setup")
|
||||
}
|
||||
|
||||
indexedRoots.TrustDomain = signingID.Host()
|
||||
indexedRoots.TrustDomain = trustDomain
|
||||
|
||||
indexedRoots.Index, indexedRoots.Roots = index, roots
|
||||
if indexedRoots.Roots == nil {
|
||||
|
@ -77,3 +71,19 @@ func (s *Server) getCARoots(ws memdb.WatchSet, state *state.Store) (*structs.Ind
|
|||
|
||||
return indexedRoots, nil
|
||||
}
|
||||
|
||||
func (s *Server) getTrustDomain(config *structs.CAConfiguration) (string, error) {
|
||||
if config == nil || config.ClusterID == "" {
|
||||
return "", fmt.Errorf("CA has not finished initializing")
|
||||
}
|
||||
|
||||
// Build TrustDomain based on the ClusterID stored.
|
||||
signingID := connect.SpiffeIDSigningForCluster(config.ClusterID)
|
||||
if signingID == nil {
|
||||
// If CA is bootstrapped at all then this should never happen but be
|
||||
// defensive.
|
||||
return "", fmt.Errorf("no cluster trust domain setup")
|
||||
}
|
||||
|
||||
return signingID.Host(), nil
|
||||
}
|
||||
|
|
18
go.mod
18
go.mod
|
@ -105,12 +105,12 @@ require (
|
|||
go.opentelemetry.io/otel/sdk/metric v0.39.0
|
||||
go.opentelemetry.io/proto/otlp v0.19.0
|
||||
go.uber.org/goleak v1.1.10
|
||||
golang.org/x/crypto v0.11.0
|
||||
golang.org/x/exp v0.0.0-20230321023759-10a507213a29
|
||||
golang.org/x/net v0.13.0
|
||||
golang.org/x/crypto v0.12.0
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63
|
||||
golang.org/x/net v0.14.0
|
||||
golang.org/x/oauth2 v0.6.0
|
||||
golang.org/x/sync v0.2.0
|
||||
golang.org/x/sys v0.10.0
|
||||
golang.org/x/sync v0.3.0
|
||||
golang.org/x/sys v0.11.0
|
||||
golang.org/x/time v0.3.0
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1
|
||||
google.golang.org/grpc v1.55.0
|
||||
|
@ -251,10 +251,10 @@ require (
|
|||
go.opentelemetry.io/otel/trace v1.16.0 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
|
||||
golang.org/x/mod v0.10.0 // indirect
|
||||
golang.org/x/term v0.10.0 // indirect
|
||||
golang.org/x/text v0.11.0 // indirect
|
||||
golang.org/x/tools v0.9.1 // indirect
|
||||
golang.org/x/mod v0.12.0 // indirect
|
||||
golang.org/x/term v0.11.0 // indirect
|
||||
golang.org/x/text v0.12.0 // indirect
|
||||
golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect
|
||||
google.golang.org/api v0.114.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
|
|
36
go.sum
36
go.sum
|
@ -1011,8 +1011,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
|
|||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
|
||||
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
|
||||
golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
@ -1023,8 +1023,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug=
|
||||
golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ=
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
@ -1052,8 +1052,8 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
|
||||
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180611182652-db08ff08e862/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -1108,8 +1108,8 @@ golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qx
|
|||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY=
|
||||
golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
|
||||
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -1141,8 +1141,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
|
||||
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -1227,13 +1227,13 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
|
||||
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
|
||||
golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0=
|
||||
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -1245,8 +1245,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
|
||||
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
@ -1318,8 +1318,8 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
|||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo=
|
||||
golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
|
||||
golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E=
|
||||
golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
|
|
@ -165,6 +165,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_PASSING,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-2
|
||||
{
|
||||
|
@ -179,6 +180,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_WARNING,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-3
|
||||
{
|
||||
|
@ -193,6 +195,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-4
|
||||
{
|
||||
|
@ -207,6 +210,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-5
|
||||
{
|
||||
|
@ -221,6 +225,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_WARNING,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-6
|
||||
{
|
||||
|
@ -235,6 +240,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_WARNING,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-7
|
||||
{
|
||||
|
@ -249,6 +255,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-8
|
||||
{
|
||||
|
@ -263,6 +270,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-9
|
||||
{
|
||||
|
@ -277,6 +285,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-10
|
||||
{
|
||||
|
@ -291,6 +300,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-11
|
||||
{
|
||||
|
@ -305,6 +315,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-12
|
||||
{
|
||||
|
@ -319,6 +330,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-13
|
||||
{
|
||||
|
@ -333,6 +345,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-14
|
||||
{
|
||||
|
@ -347,6 +360,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-15
|
||||
{
|
||||
|
@ -361,6 +375,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-16
|
||||
{
|
||||
|
@ -375,6 +390,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-17
|
||||
{
|
||||
|
@ -389,6 +405,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_PASSING,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-18
|
||||
{
|
||||
|
@ -403,6 +420,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_WARNING,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-19
|
||||
{
|
||||
|
@ -417,6 +435,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-20
|
||||
{
|
||||
|
@ -431,6 +450,7 @@ func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.Servi
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
|
||||
Identity: "api",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -449,6 +469,7 @@ func expectedHTTPApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S
|
|||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_PASSING,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-10
|
||||
{
|
||||
|
@ -460,6 +481,7 @@ func expectedHTTPApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S
|
|||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-11
|
||||
{
|
||||
|
@ -471,6 +493,7 @@ func expectedHTTPApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S
|
|||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-12
|
||||
{
|
||||
|
@ -482,6 +505,7 @@ func expectedHTTPApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S
|
|||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-13
|
||||
{
|
||||
|
@ -493,6 +517,7 @@ func expectedHTTPApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S
|
|||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-14
|
||||
{
|
||||
|
@ -504,6 +529,7 @@ func expectedHTTPApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S
|
|||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-15
|
||||
{
|
||||
|
@ -515,6 +541,7 @@ func expectedHTTPApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S
|
|||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-16
|
||||
{
|
||||
|
@ -526,6 +553,7 @@ func expectedHTTPApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S
|
|||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-17
|
||||
{
|
||||
|
@ -537,6 +565,7 @@ func expectedHTTPApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S
|
|||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_PASSING,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-18
|
||||
{
|
||||
|
@ -548,6 +577,7 @@ func expectedHTTPApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S
|
|||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_WARNING,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-19
|
||||
{
|
||||
|
@ -559,6 +589,7 @@ func expectedHTTPApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S
|
|||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
|
||||
Identity: "api",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -579,6 +610,7 @@ func expectedGRPCApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_PASSING,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-2
|
||||
{
|
||||
|
@ -592,6 +624,7 @@ func expectedGRPCApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_WARNING,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-3
|
||||
{
|
||||
|
@ -605,6 +638,7 @@ func expectedGRPCApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-4
|
||||
{
|
||||
|
@ -618,6 +652,7 @@ func expectedGRPCApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-5
|
||||
{
|
||||
|
@ -631,6 +666,7 @@ func expectedGRPCApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_WARNING,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-6
|
||||
{
|
||||
|
@ -644,6 +680,7 @@ func expectedGRPCApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_WARNING,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-7
|
||||
{
|
||||
|
@ -657,6 +694,7 @@ func expectedGRPCApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-8
|
||||
{
|
||||
|
@ -670,6 +708,7 @@ func expectedGRPCApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-9
|
||||
{
|
||||
|
@ -683,6 +722,7 @@ func expectedGRPCApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
|
||||
Identity: "api",
|
||||
},
|
||||
// api-20
|
||||
{
|
||||
|
@ -696,6 +736,7 @@ func expectedGRPCApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.S
|
|||
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
|
||||
Identity: "api",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -482,6 +482,7 @@ func RunCatalogV1Alpha1EndpointsLifecycleIntegrationTest(t *testing.T, client pb
|
|||
"http": {Port: 443, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_PASSING,
|
||||
Identity: "api",
|
||||
},
|
||||
},
|
||||
}).
|
||||
|
@ -530,6 +531,7 @@ func RunCatalogV1Alpha1EndpointsLifecycleIntegrationTest(t *testing.T, client pb
|
|||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_PASSING,
|
||||
Identity: "api",
|
||||
},
|
||||
{
|
||||
TargetRef: api3.Id,
|
||||
|
@ -540,6 +542,7 @@ func RunCatalogV1Alpha1EndpointsLifecycleIntegrationTest(t *testing.T, client pb
|
|||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_PASSING,
|
||||
Identity: "api",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
@ -569,6 +572,7 @@ func RunCatalogV1Alpha1EndpointsLifecycleIntegrationTest(t *testing.T, client pb
|
|||
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_PASSING,
|
||||
Identity: "api",
|
||||
},
|
||||
{
|
||||
TargetRef: api2.Id,
|
||||
|
@ -580,6 +584,7 @@ func RunCatalogV1Alpha1EndpointsLifecycleIntegrationTest(t *testing.T, client pb
|
|||
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_PASSING,
|
||||
Identity: "api",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
@ -614,6 +619,7 @@ func RunCatalogV1Alpha1EndpointsLifecycleIntegrationTest(t *testing.T, client pb
|
|||
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_PASSING,
|
||||
Identity: "api",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
@ -645,6 +651,7 @@ func RunCatalogV1Alpha1EndpointsLifecycleIntegrationTest(t *testing.T, client pb
|
|||
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_PASSING,
|
||||
Identity: "api",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
@ -664,6 +671,7 @@ func RunCatalogV1Alpha1EndpointsLifecycleIntegrationTest(t *testing.T, client pb
|
|||
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
|
||||
Identity: "api",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
|
|
@ -117,7 +117,7 @@ func (r *serviceEndpointsReconciler) Reconcile(ctx context.Context, rt controlle
|
|||
// cause this service to be rereconciled.
|
||||
r.workloadMap.TrackIDForSelector(req.ID, serviceData.service.GetWorkloads())
|
||||
|
||||
// Now read and umarshal all workloads selected by the service. It is imperative
|
||||
// Now read and unmarshal all workloads selected by the service. It is imperative
|
||||
// that this happens after we notify the selection tracker to be tracking that
|
||||
// selection criteria. If the order were reversed we could potentially miss
|
||||
// workload creations that should be selected if they happen after gathering
|
||||
|
@ -380,5 +380,6 @@ func workloadToEndpoint(svc *pbcatalog.Service, data *workloadData) *pbcatalog.E
|
|||
HealthStatus: health,
|
||||
Addresses: workloadAddrs,
|
||||
Ports: endpointPorts,
|
||||
Identity: data.workload.Identity,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -123,6 +123,7 @@ func TestWorkloadToEndpoint(t *testing.T) {
|
|||
// the protocol is wrong here so it will not show up in the endpoints.
|
||||
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP2},
|
||||
},
|
||||
Identity: "test-identity",
|
||||
}
|
||||
|
||||
data := &workloadData{
|
||||
|
@ -146,6 +147,7 @@ func TestWorkloadToEndpoint(t *testing.T) {
|
|||
// that we can properly determine the health status and the overall
|
||||
// controller tests will prove that the integration works as expected.
|
||||
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
|
||||
Identity: workload.Identity,
|
||||
}
|
||||
|
||||
prototest.AssertDeepEqual(t, expected, workloadToEndpoint(service, data))
|
||||
|
@ -611,6 +613,7 @@ func (suite *controllerSuite) TestController() {
|
|||
Addresses: []*pbcatalog.WorkloadAddress{{Host: "127.0.0.1"}},
|
||||
Ports: map[string]*pbcatalog.WorkloadPort{
|
||||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
|
||||
"grpc": {Port: 8081, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
|
||||
},
|
||||
Identity: "api",
|
||||
}).
|
||||
|
@ -629,6 +632,7 @@ func (suite *controllerSuite) TestController() {
|
|||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
|
||||
Identity: "api",
|
||||
})
|
||||
|
||||
// Update the health status of the workload
|
||||
|
@ -660,6 +664,7 @@ func (suite *controllerSuite) TestController() {
|
|||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
|
||||
},
|
||||
HealthStatus: pbcatalog.Health_HEALTH_PASSING,
|
||||
Identity: "api",
|
||||
})
|
||||
|
||||
// rewrite the service to add more selection criteria. This should trigger
|
||||
|
@ -683,6 +688,23 @@ func (suite *controllerSuite) TestController() {
|
|||
// Verify that the endpoints were not regenerated
|
||||
suite.client.RequireVersionUnchanged(suite.T(), endpointsID, endpoints.Version)
|
||||
|
||||
// Update the service.
|
||||
updatedService := rtest.Resource(types.ServiceType, "api").
|
||||
WithData(suite.T(), &pbcatalog.Service{
|
||||
Workloads: &pbcatalog.WorkloadSelector{
|
||||
Prefixes: []string{"api-"},
|
||||
},
|
||||
Ports: []*pbcatalog.ServicePort{
|
||||
{TargetPort: "http", Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
|
||||
{TargetPort: "grpc", Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
|
||||
},
|
||||
}).
|
||||
Write(suite.T(), suite.client)
|
||||
|
||||
// Wait for the endpoints to be regenerated
|
||||
endpoints = suite.client.WaitForNewVersion(suite.T(), endpointsID, endpoints.Version)
|
||||
rtest.RequireOwner(suite.T(), endpoints, updatedService.Id, false)
|
||||
|
||||
// Delete the endpoints. The controller should bring these back momentarily
|
||||
suite.client.Delete(suite.ctx, &pbresource.DeleteRequest{Id: endpointsID})
|
||||
|
||||
|
|
|
@ -6,6 +6,8 @@ package mesh
|
|||
import (
|
||||
"github.com/hashicorp/consul/internal/controller"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/controllers"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/status"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
)
|
||||
|
@ -28,6 +30,7 @@ var (
|
|||
TCPRouteKind = types.TCPRouteKind
|
||||
DestinationPolicyKind = types.DestinationPolicyKind
|
||||
ComputedRoutesKind = types.ComputedRoutesKind
|
||||
ProxyStateTemplateKind = types.ProxyStateTemplateKind
|
||||
|
||||
// Resource Types for the v1alpha1 version.
|
||||
|
||||
|
@ -40,21 +43,34 @@ var (
|
|||
TCPRouteV1Alpha1Type = types.TCPRouteV1Alpha1Type
|
||||
DestinationPolicyV1Alpha1Type = types.DestinationPolicyV1Alpha1Type
|
||||
ComputedRoutesV1Alpha1Type = types.ComputedRoutesV1Alpha1Type
|
||||
ProxyStateTemplateV1AlphaType = types.ProxyStateTemplateV1Alpha1Type
|
||||
|
||||
// Resource Types for the latest version.
|
||||
|
||||
ProxyConfigurationType = types.ProxyConfigurationType
|
||||
UpstreamsType = types.UpstreamsType
|
||||
UpstreamsConfigurationType = types.UpstreamsConfigurationType
|
||||
ProxyStateTemplateConfigurationType = types.ProxyStateTemplateType
|
||||
HTTPRouteType = types.HTTPRouteType
|
||||
GRPCRouteType = types.GRPCRouteType
|
||||
TCPRouteType = types.TCPRouteType
|
||||
DestinationPolicyType = types.DestinationPolicyType
|
||||
ComputedRoutesType = types.ComputedRoutesType
|
||||
ProxyConfigurationType = types.ProxyConfigurationType
|
||||
UpstreamsType = types.UpstreamsType
|
||||
UpstreamsConfigurationType = types.UpstreamsConfigurationType
|
||||
ProxyStateTemplateType = types.ProxyStateTemplateType
|
||||
HTTPRouteType = types.HTTPRouteType
|
||||
GRPCRouteType = types.GRPCRouteType
|
||||
TCPRouteType = types.TCPRouteType
|
||||
DestinationPolicyType = types.DestinationPolicyType
|
||||
ComputedRoutesType = types.ComputedRoutesType
|
||||
|
||||
// Controller statuses.
|
||||
|
||||
// Sidecar-proxy controller.
|
||||
SidecarProxyStatusKey = sidecarproxy.ControllerName
|
||||
SidecarProxyStatusConditionMeshDestination = status.StatusConditionDestinationAccepted
|
||||
SidecarProxyStatusReasonNonMeshDestination = status.StatusReasonMeshProtocolNotFound
|
||||
SidecarProxyStatusReasonMeshDestination = status.StatusReasonMeshProtocolFound
|
||||
SidecarProxyStatusReasonDestinationServiceNotFound = status.StatusReasonDestinationServiceNotFound
|
||||
SidecarProxyStatusReasonDestinationServiceFound = status.StatusReasonDestinationServiceFound
|
||||
SidecarProxyStatusReasonMeshProtocolDestinationPort = status.StatusReasonMeshProtocolDestinationPort
|
||||
SidecarProxyStatusReasonNonMeshProtocolDestinationPort = status.StatusReasonNonMeshProtocolDestinationPort
|
||||
)
|
||||
|
||||
// RegisterTypes adds all resource types within the "catalog" API group
|
||||
// RegisterTypes adds all resource types within the "mesh" API group
|
||||
// to the given type registry
|
||||
func RegisterTypes(r resource.Registry) {
|
||||
types.Register(r)
|
||||
|
@ -66,4 +82,6 @@ func RegisterControllers(mgr *controller.Manager, deps ControllerDependencies) {
|
|||
controllers.Register(mgr, deps)
|
||||
}
|
||||
|
||||
type TrustDomainFetcher = sidecarproxy.TrustDomainFetcher
|
||||
|
||||
type ControllerDependencies = controllers.Dependencies
|
||||
|
|
|
@ -0,0 +1,185 @@
|
|||
package sidecarproxycache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/consul/internal/catalog"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/types/intermediate"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
// Cache stores information needed for the sidecar-proxy controller to reconcile efficiently.
|
||||
// This currently means storing a list of all destinations for easy look up
|
||||
// as well as indices of source proxies where those destinations are referenced.
|
||||
//
|
||||
// It is the responsibility of the controller and its subcomponents (like mapper and data fetcher)
|
||||
// to keep this cache up-to-date as we're observing new data.
|
||||
type Cache struct {
|
||||
lock sync.RWMutex
|
||||
|
||||
// store is a map from destination service reference and port as a reference key
|
||||
// to the object representing destination reference.
|
||||
store map[ReferenceKeyWithPort]intermediate.CombinedDestinationRef
|
||||
|
||||
// sourceProxiesIndex stores a map from a reference key of source proxy IDs
|
||||
// to the keys in the store map.
|
||||
sourceProxiesIndex map[resource.ReferenceKey]storeKeys
|
||||
}
|
||||
|
||||
type storeKeys map[ReferenceKeyWithPort]struct{}
|
||||
|
||||
func New() *Cache {
|
||||
return &Cache{
|
||||
store: make(map[ReferenceKeyWithPort]intermediate.CombinedDestinationRef),
|
||||
sourceProxiesIndex: make(map[resource.ReferenceKey]storeKeys),
|
||||
}
|
||||
}
|
||||
|
||||
type ReferenceKeyWithPort struct {
|
||||
resource.ReferenceKey
|
||||
port string
|
||||
}
|
||||
|
||||
func KeyFromRefAndPort(ref *pbresource.Reference, port string) ReferenceKeyWithPort {
|
||||
refKey := resource.NewReferenceKey(ref)
|
||||
return ReferenceKeyWithPort{refKey, port}
|
||||
}
|
||||
|
||||
// WriteDestination adds destination reference to the cache.
|
||||
func (c *Cache) WriteDestination(d intermediate.CombinedDestinationRef) {
|
||||
// Check that reference is a catalog.Service type.
|
||||
if !resource.EqualType(catalog.ServiceType, d.ServiceRef.Type) {
|
||||
panic("ref must of type catalog.Service")
|
||||
}
|
||||
|
||||
// Also, check that explicit destination reference is a mesh.Upstreams type.
|
||||
if d.ExplicitDestinationsID != nil &&
|
||||
!resource.EqualType(types.UpstreamsType, d.ExplicitDestinationsID.Type) {
|
||||
panic("ExplicitDestinationsID must be of type mesh.Upstreams")
|
||||
}
|
||||
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
c.deleteLocked(d.ServiceRef, d.Port)
|
||||
c.addLocked(d)
|
||||
}
|
||||
|
||||
// DeleteDestination deletes a given destination reference and port from cache.
|
||||
func (c *Cache) DeleteDestination(ref *pbresource.Reference, port string) {
|
||||
// Check that reference is a catalog.Service type.
|
||||
if !resource.EqualType(catalog.ServiceType, ref.Type) {
|
||||
panic("ref must of type catalog.Service")
|
||||
}
|
||||
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
c.deleteLocked(ref, port)
|
||||
}
|
||||
|
||||
func (c *Cache) addLocked(d intermediate.CombinedDestinationRef) {
|
||||
key := KeyFromRefAndPort(d.ServiceRef, d.Port)
|
||||
|
||||
c.store[key] = d
|
||||
|
||||
// Update source proxies index.
|
||||
for proxyRef := range d.SourceProxies {
|
||||
_, ok := c.sourceProxiesIndex[proxyRef]
|
||||
if !ok {
|
||||
c.sourceProxiesIndex[proxyRef] = make(storeKeys)
|
||||
}
|
||||
|
||||
c.sourceProxiesIndex[proxyRef][key] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) deleteLocked(ref *pbresource.Reference, port string) {
|
||||
key := KeyFromRefAndPort(ref, port)
|
||||
|
||||
// First get it from the store.
|
||||
dest, ok := c.store[key]
|
||||
if !ok {
|
||||
// If it's not there, return as there's nothing for us to.
|
||||
return
|
||||
}
|
||||
|
||||
// Update source proxies indices.
|
||||
for proxyRef := range dest.SourceProxies {
|
||||
// Delete our destination key from this source proxy.
|
||||
delete(c.sourceProxiesIndex[proxyRef], key)
|
||||
}
|
||||
|
||||
// Finally, delete this destination from the store.
|
||||
delete(c.store, key)
|
||||
}
|
||||
|
||||
// DeleteSourceProxy deletes the source proxy given by id from the cache.
|
||||
func (c *Cache) DeleteSourceProxy(id *pbresource.ID) {
|
||||
// Check that id is the ProxyStateTemplate type.
|
||||
if !resource.EqualType(types.ProxyStateTemplateType, id.Type) {
|
||||
panic("id must of type mesh.ProxyStateTemplate")
|
||||
}
|
||||
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
proxyIDKey := resource.NewReferenceKey(id)
|
||||
|
||||
// Get all destination keys.
|
||||
destKeys := c.sourceProxiesIndex[proxyIDKey]
|
||||
|
||||
for destKey := range destKeys {
|
||||
// Read destination.
|
||||
dest, ok := c.store[destKey]
|
||||
if !ok {
|
||||
// If there's no destination with that key, skip it as there's nothing for us to do.
|
||||
continue
|
||||
}
|
||||
|
||||
// Delete the source proxy ID.
|
||||
delete(dest.SourceProxies, proxyIDKey)
|
||||
}
|
||||
|
||||
// Finally, delete the index for this proxy.
|
||||
delete(c.sourceProxiesIndex, proxyIDKey)
|
||||
}
|
||||
|
||||
// ReadDestination returns a destination reference for the given service reference and port.
|
||||
func (c *Cache) ReadDestination(ref *pbresource.Reference, port string) (intermediate.CombinedDestinationRef, bool) {
|
||||
// Check that reference is a catalog.Service type.
|
||||
if !resource.EqualType(catalog.ServiceType, ref.Type) {
|
||||
panic("ref must of type catalog.Service")
|
||||
}
|
||||
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
key := KeyFromRefAndPort(ref, port)
|
||||
|
||||
d, found := c.store[key]
|
||||
return d, found
|
||||
}
|
||||
|
||||
// DestinationsBySourceProxy returns all destinations that are a referenced by the given source proxy id.
|
||||
func (c *Cache) DestinationsBySourceProxy(id *pbresource.ID) []intermediate.CombinedDestinationRef {
|
||||
// Check that id is the ProxyStateTemplate type.
|
||||
if !resource.EqualType(types.ProxyStateTemplateType, id.Type) {
|
||||
panic("id must of type mesh.ProxyStateTemplate")
|
||||
}
|
||||
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
var destinations []intermediate.CombinedDestinationRef
|
||||
|
||||
proxyIDKey := resource.NewReferenceKey(id)
|
||||
|
||||
for destKey := range c.sourceProxiesIndex[proxyIDKey] {
|
||||
destinations = append(destinations, c.store[destKey])
|
||||
}
|
||||
|
||||
return destinations
|
||||
}
|
|
@ -0,0 +1,188 @@
|
|||
package sidecarproxycache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/internal/catalog"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/types/intermediate"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
"github.com/hashicorp/consul/internal/resource/resourcetest"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestWrite_Create(t *testing.T) {
|
||||
cache := New()
|
||||
|
||||
proxyID := resourcetest.Resource(types.ProxyStateTemplateType, "service-workload-abc").ID()
|
||||
destination := testDestination(proxyID)
|
||||
cache.WriteDestination(destination)
|
||||
|
||||
destKey := KeyFromRefAndPort(destination.ServiceRef, destination.Port)
|
||||
require.Equal(t, destination, cache.store[destKey])
|
||||
actualSourceProxies := cache.sourceProxiesIndex
|
||||
expectedSourceProxies := map[resource.ReferenceKey]storeKeys{
|
||||
resource.NewReferenceKey(proxyID): {destKey: struct{}{}},
|
||||
}
|
||||
require.Equal(t, expectedSourceProxies, actualSourceProxies)
|
||||
|
||||
// Check that we can read back the destination successfully.
|
||||
actualDestination, found := cache.ReadDestination(destination.ServiceRef, destination.Port)
|
||||
require.True(t, found)
|
||||
require.Equal(t, destination, actualDestination)
|
||||
}
|
||||
|
||||
func TestWrite_Update(t *testing.T) {
|
||||
cache := New()
|
||||
|
||||
proxyID := resourcetest.Resource(types.ProxyStateTemplateType, "service-workload-abc").ID()
|
||||
destination1 := testDestination(proxyID)
|
||||
cache.WriteDestination(destination1)
|
||||
|
||||
// Add another destination for the same proxy ID.
|
||||
destination2 := testDestination(proxyID)
|
||||
destination2.ServiceRef = resourcetest.Resource(catalog.ServiceType, "test-service-2").ReferenceNoSection()
|
||||
cache.WriteDestination(destination2)
|
||||
|
||||
// Check that the source proxies are updated.
|
||||
actualSourceProxies := cache.sourceProxiesIndex
|
||||
expectedSourceProxies := map[resource.ReferenceKey]storeKeys{
|
||||
resource.NewReferenceKey(proxyID): {
|
||||
KeyFromRefAndPort(destination1.ServiceRef, destination1.Port): struct{}{},
|
||||
KeyFromRefAndPort(destination2.ServiceRef, destination2.Port): struct{}{},
|
||||
},
|
||||
}
|
||||
require.Equal(t, expectedSourceProxies, actualSourceProxies)
|
||||
|
||||
// Add another destination for a different proxy.
|
||||
anotherProxyID := resourcetest.Resource(types.ProxyStateTemplateType, "service-workload-def").ID()
|
||||
destination3 := testDestination(anotherProxyID)
|
||||
destination3.ServiceRef = resourcetest.Resource(catalog.ServiceType, "test-service-3").ReferenceNoSection()
|
||||
cache.WriteDestination(destination3)
|
||||
|
||||
actualSourceProxies = cache.sourceProxiesIndex
|
||||
expectedSourceProxies = map[resource.ReferenceKey]storeKeys{
|
||||
resource.NewReferenceKey(proxyID): {
|
||||
KeyFromRefAndPort(destination1.ServiceRef, destination1.Port): struct{}{},
|
||||
KeyFromRefAndPort(destination2.ServiceRef, destination2.Port): struct{}{},
|
||||
},
|
||||
resource.NewReferenceKey(anotherProxyID): {
|
||||
KeyFromRefAndPort(destination3.ServiceRef, destination3.Port): struct{}{},
|
||||
},
|
||||
}
|
||||
require.Equal(t, expectedSourceProxies, actualSourceProxies)
|
||||
|
||||
// Overwrite the proxy id completely.
|
||||
destination1.SourceProxies = map[resource.ReferenceKey]struct{}{resource.NewReferenceKey(anotherProxyID): {}}
|
||||
cache.WriteDestination(destination1)
|
||||
|
||||
actualSourceProxies = cache.sourceProxiesIndex
|
||||
expectedSourceProxies = map[resource.ReferenceKey]storeKeys{
|
||||
resource.NewReferenceKey(proxyID): {
|
||||
KeyFromRefAndPort(destination2.ServiceRef, destination2.Port): struct{}{},
|
||||
},
|
||||
resource.NewReferenceKey(anotherProxyID): {
|
||||
KeyFromRefAndPort(destination1.ServiceRef, destination1.Port): struct{}{},
|
||||
KeyFromRefAndPort(destination3.ServiceRef, destination3.Port): struct{}{},
|
||||
},
|
||||
}
|
||||
require.Equal(t, expectedSourceProxies, actualSourceProxies)
|
||||
}
|
||||
|
||||
func TestWrite_Delete(t *testing.T) {
|
||||
cache := New()
|
||||
|
||||
proxyID := resourcetest.Resource(types.ProxyStateTemplateType, "service-workload-abc").ID()
|
||||
destination1 := testDestination(proxyID)
|
||||
cache.WriteDestination(destination1)
|
||||
|
||||
// Add another destination for the same proxy ID.
|
||||
destination2 := testDestination(proxyID)
|
||||
destination2.ServiceRef = resourcetest.Resource(catalog.ServiceType, "test-service-2").ReferenceNoSection()
|
||||
cache.WriteDestination(destination2)
|
||||
|
||||
cache.DeleteDestination(destination1.ServiceRef, destination1.Port)
|
||||
|
||||
require.NotContains(t, cache.store, KeyFromRefAndPort(destination1.ServiceRef, destination1.Port))
|
||||
|
||||
// Check that the source proxies are updated.
|
||||
actualSourceProxies := cache.sourceProxiesIndex
|
||||
expectedSourceProxies := map[resource.ReferenceKey]storeKeys{
|
||||
resource.NewReferenceKey(proxyID): {
|
||||
KeyFromRefAndPort(destination2.ServiceRef, destination2.Port): struct{}{},
|
||||
},
|
||||
}
|
||||
require.Equal(t, expectedSourceProxies, actualSourceProxies)
|
||||
|
||||
// Try to delete non-existing destination and check that nothing has changed..
|
||||
cache.DeleteDestination(
|
||||
resourcetest.Resource(catalog.ServiceType, "does-not-exist").ReferenceNoSection(),
|
||||
"doesn't-matter")
|
||||
|
||||
require.Contains(t, cache.store, KeyFromRefAndPort(destination2.ServiceRef, destination2.Port))
|
||||
require.Equal(t, expectedSourceProxies, cache.sourceProxiesIndex)
|
||||
}
|
||||
|
||||
func TestDeleteSourceProxy(t *testing.T) {
|
||||
cache := New()
|
||||
|
||||
proxyID := resourcetest.Resource(types.ProxyStateTemplateType, "service-workload-abc").ID()
|
||||
destination1 := testDestination(proxyID)
|
||||
cache.WriteDestination(destination1)
|
||||
|
||||
// Add another destination for the same proxy ID.
|
||||
destination2 := testDestination(proxyID)
|
||||
destination2.ServiceRef = resourcetest.Resource(catalog.ServiceType, "test-service-2").ReferenceNoSection()
|
||||
cache.WriteDestination(destination2)
|
||||
|
||||
cache.DeleteSourceProxy(proxyID)
|
||||
|
||||
// Check that source proxy index is gone.
|
||||
proxyKey := resource.NewReferenceKey(proxyID)
|
||||
require.NotContains(t, cache.sourceProxiesIndex, proxyKey)
|
||||
|
||||
// Check that the destinations no longer have this proxy as the source.
|
||||
require.NotContains(t, destination1.SourceProxies, proxyKey)
|
||||
require.NotContains(t, destination2.SourceProxies, proxyKey)
|
||||
|
||||
// Try to add a non-existent key to source proxy index
|
||||
cache.sourceProxiesIndex[proxyKey] = map[ReferenceKeyWithPort]struct{}{
|
||||
{port: "doesn't-matter"}: {}}
|
||||
cache.DeleteSourceProxy(proxyID)
|
||||
|
||||
// Check that source proxy index is gone.
|
||||
require.NotContains(t, cache.sourceProxiesIndex, proxyKey)
|
||||
|
||||
// Check that the destinations no longer have this proxy as the source.
|
||||
require.NotContains(t, destination1.SourceProxies, proxyKey)
|
||||
require.NotContains(t, destination2.SourceProxies, proxyKey)
|
||||
}
|
||||
|
||||
func TestDestinationsBySourceProxy(t *testing.T) {
|
||||
cache := New()
|
||||
|
||||
proxyID := resourcetest.Resource(types.ProxyStateTemplateType, "service-workload-abc").ID()
|
||||
destination1 := testDestination(proxyID)
|
||||
cache.WriteDestination(destination1)
|
||||
|
||||
// Add another destination for the same proxy ID.
|
||||
destination2 := testDestination(proxyID)
|
||||
destination2.ServiceRef = resourcetest.Resource(catalog.ServiceType, "test-service-2").ReferenceNoSection()
|
||||
cache.WriteDestination(destination2)
|
||||
|
||||
actualDestinations := cache.DestinationsBySourceProxy(proxyID)
|
||||
expectedDestinations := []intermediate.CombinedDestinationRef{destination1, destination2}
|
||||
require.ElementsMatch(t, expectedDestinations, actualDestinations)
|
||||
}
|
||||
|
||||
func testDestination(proxyID *pbresource.ID) intermediate.CombinedDestinationRef {
|
||||
return intermediate.CombinedDestinationRef{
|
||||
ServiceRef: resourcetest.Resource(catalog.ServiceType, "test-service").ReferenceNoSection(),
|
||||
Port: "tcp",
|
||||
ExplicitDestinationsID: resourcetest.Resource(types.UpstreamsType, "test-servicedestinations").ID(),
|
||||
SourceProxies: map[resource.ReferenceKey]struct{}{
|
||||
resource.NewReferenceKey(proxyID): {},
|
||||
},
|
||||
}
|
||||
}
|
|
@ -6,17 +6,24 @@ package controllers
|
|||
import (
|
||||
"github.com/hashicorp/consul/internal/catalog"
|
||||
"github.com/hashicorp/consul/internal/controller"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/controllers/xds"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/mappers/sidecarproxymapper"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
||||
"github.com/hashicorp/consul/internal/resource/mappers/bimapper"
|
||||
)
|
||||
|
||||
type Dependencies struct {
|
||||
TrustDomainFetcher sidecarproxy.TrustDomainFetcher
|
||||
TrustBundleFetcher xds.TrustBundleFetcher
|
||||
ProxyUpdater xds.ProxyUpdater
|
||||
}
|
||||
|
||||
func Register(mgr *controller.Manager, deps Dependencies) {
|
||||
c := sidecarproxycache.New()
|
||||
m := sidecarproxymapper.New(c)
|
||||
mapper := bimapper.New(types.ProxyStateTemplateType, catalog.ServiceEndpointsType)
|
||||
mgr.Register(xds.Controller(mapper, deps.ProxyUpdater, deps.TrustBundleFetcher))
|
||||
mgr.Register(sidecarproxy.Controller(c, m, deps.TrustDomainFetcher))
|
||||
}
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
package builder
|
||||
|
||||
import (
|
||||
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1"
|
||||
"github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1/pbproxystate"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
// Builder builds a ProxyStateTemplate.
|
||||
type Builder struct {
|
||||
id *pbresource.ID
|
||||
proxyStateTemplate *pbmesh.ProxyStateTemplate
|
||||
trustDomain string
|
||||
}
|
||||
|
||||
func New(id *pbresource.ID, identity *pbresource.Reference, trustDomain string) *Builder {
|
||||
return &Builder{
|
||||
id: id,
|
||||
trustDomain: trustDomain,
|
||||
proxyStateTemplate: &pbmesh.ProxyStateTemplate{
|
||||
ProxyState: &pbmesh.ProxyState{
|
||||
Identity: identity,
|
||||
Clusters: make(map[string]*pbproxystate.Cluster),
|
||||
Endpoints: make(map[string]*pbproxystate.Endpoints),
|
||||
},
|
||||
RequiredEndpoints: make(map[string]*pbproxystate.EndpointRef),
|
||||
RequiredLeafCertificates: make(map[string]*pbproxystate.LeafCertificateRef),
|
||||
RequiredTrustBundles: make(map[string]*pbproxystate.TrustBundleRef),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Builder) Build() *pbmesh.ProxyStateTemplate {
|
||||
return b.proxyStateTemplate
|
||||
}
|
||||
|
||||
type ListenerBuilder struct {
|
||||
listener *pbproxystate.Listener
|
||||
builder *Builder
|
||||
}
|
||||
|
||||
func (b *Builder) NewListenerBuilder(l *pbproxystate.Listener) *ListenerBuilder {
|
||||
return &ListenerBuilder{
|
||||
listener: l,
|
||||
builder: b,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *ListenerBuilder) buildListener() *Builder {
|
||||
l.builder.proxyStateTemplate.ProxyState.Listeners = append(l.builder.proxyStateTemplate.ProxyState.Listeners, l.listener)
|
||||
|
||||
return l.builder
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
package builder
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
flag.Parse()
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func protoToJSON(t *testing.T, pb proto.Message) string {
|
||||
t.Helper()
|
||||
m := protojson.MarshalOptions{
|
||||
Indent: " ",
|
||||
}
|
||||
gotJSON, err := m.Marshal(pb)
|
||||
require.NoError(t, err)
|
||||
return string(gotJSON)
|
||||
}
|
|
@ -0,0 +1,148 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package builder
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/types/intermediate"
|
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1"
|
||||
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1"
|
||||
"github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1/pbproxystate"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
func (b *Builder) BuildDestinations(destinations []*intermediate.Destination) *Builder {
|
||||
for _, destination := range destinations {
|
||||
if destination.Explicit != nil {
|
||||
b.buildExplicitDestination(destination)
|
||||
}
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *Builder) buildExplicitDestination(destination *intermediate.Destination) {
|
||||
clusterName := DestinationClusterName(destination.Explicit.DestinationRef, destination.Explicit.Datacenter, b.trustDomain)
|
||||
statPrefix := DestinationStatPrefix(destination.Explicit.DestinationRef, destination.Explicit.Datacenter)
|
||||
|
||||
// All endpoints should have the same protocol as the endpoints controller ensures that is the case,
|
||||
// so it's sufficient to read just the first endpoint.
|
||||
if len(destination.ServiceEndpoints.Endpoints.Endpoints) > 0 {
|
||||
// Get destination port so that we can configure this destination correctly based on its protocol.
|
||||
destPort := destination.ServiceEndpoints.Endpoints.Endpoints[0].Ports[destination.Explicit.DestinationPort]
|
||||
|
||||
// Find the destination proxy's port.
|
||||
// Endpoints refs will need to route to mesh port instead of the destination port as that
|
||||
// is the port of the destination's proxy.
|
||||
meshPortName := findMeshPort(destination.ServiceEndpoints.Endpoints.Endpoints[0].Ports)
|
||||
|
||||
if destPort != nil {
|
||||
b.addOutboundDestinationListener(destination.Explicit).
|
||||
addRouter(clusterName, statPrefix, destPort.Protocol).
|
||||
buildListener().
|
||||
addCluster(clusterName, destination.Identities).
|
||||
addEndpointsRef(clusterName, destination.ServiceEndpoints.Resource.Id, meshPortName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Builder) addOutboundDestinationListener(explicit *pbmesh.Upstream) *ListenerBuilder {
|
||||
listener := &pbproxystate.Listener{
|
||||
Direction: pbproxystate.Direction_DIRECTION_OUTBOUND,
|
||||
}
|
||||
|
||||
// Create outbound listener address.
|
||||
switch explicit.ListenAddr.(type) {
|
||||
case *pbmesh.Upstream_IpPort:
|
||||
destinationAddr := explicit.ListenAddr.(*pbmesh.Upstream_IpPort)
|
||||
listener.BindAddress = &pbproxystate.Listener_HostPort{
|
||||
HostPort: &pbproxystate.HostPortAddress{
|
||||
Host: destinationAddr.IpPort.Ip,
|
||||
Port: destinationAddr.IpPort.Port,
|
||||
},
|
||||
}
|
||||
listener.Name = DestinationListenerName(explicit.DestinationRef.Name, explicit.DestinationPort, destinationAddr.IpPort.Ip, destinationAddr.IpPort.Port)
|
||||
case *pbmesh.Upstream_Unix:
|
||||
destinationAddr := explicit.ListenAddr.(*pbmesh.Upstream_Unix)
|
||||
listener.BindAddress = &pbproxystate.Listener_UnixSocket{
|
||||
UnixSocket: &pbproxystate.UnixSocketAddress{
|
||||
Path: destinationAddr.Unix.Path,
|
||||
Mode: destinationAddr.Unix.Mode,
|
||||
},
|
||||
}
|
||||
listener.Name = DestinationListenerName(explicit.DestinationRef.Name, explicit.DestinationPort, destinationAddr.Unix.Path, 0)
|
||||
}
|
||||
|
||||
return b.NewListenerBuilder(listener)
|
||||
}
|
||||
|
||||
func (l *ListenerBuilder) addRouter(clusterName, statPrefix string, protocol pbcatalog.Protocol) *ListenerBuilder {
|
||||
// For explicit destinations, we have no filter chain match, and filters are based on port protocol.
|
||||
switch protocol {
|
||||
case pbcatalog.Protocol_PROTOCOL_TCP:
|
||||
router := &pbproxystate.Router{
|
||||
Destination: &pbproxystate.Router_L4{
|
||||
L4: &pbproxystate.L4Destination{
|
||||
Name: clusterName,
|
||||
StatPrefix: statPrefix,
|
||||
},
|
||||
},
|
||||
}
|
||||
l.listener.Routers = append(l.listener.Routers, router)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func (b *Builder) addCluster(clusterName string, destinationIdentities []*pbresource.Reference) *Builder {
|
||||
var spiffeIDs []string
|
||||
for _, identity := range destinationIdentities {
|
||||
spiffeIDs = append(spiffeIDs, connect.SpiffeIDFromIdentityRef(b.trustDomain, identity))
|
||||
}
|
||||
|
||||
// Create destination cluster
|
||||
cluster := &pbproxystate.Cluster{
|
||||
Group: &pbproxystate.Cluster_EndpointGroup{
|
||||
EndpointGroup: &pbproxystate.EndpointGroup{
|
||||
Group: &pbproxystate.EndpointGroup_Dynamic{
|
||||
Dynamic: &pbproxystate.DynamicEndpointGroup{
|
||||
Config: &pbproxystate.DynamicEndpointGroupConfig{
|
||||
DisablePanicThreshold: true,
|
||||
},
|
||||
OutboundTls: &pbproxystate.TransportSocket{
|
||||
ConnectionTls: &pbproxystate.TransportSocket_OutboundMesh{
|
||||
OutboundMesh: &pbproxystate.OutboundMeshMTLS{
|
||||
IdentityKey: b.proxyStateTemplate.ProxyState.Identity.Name,
|
||||
ValidationContext: &pbproxystate.MeshOutboundValidationContext{
|
||||
SpiffeIds: spiffeIDs,
|
||||
},
|
||||
Sni: clusterName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
b.proxyStateTemplate.ProxyState.Clusters[clusterName] = cluster
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *Builder) addEndpointsRef(clusterName string, serviceEndpointsID *pbresource.ID, destinationPort string) *Builder {
|
||||
b.proxyStateTemplate.RequiredEndpoints[clusterName] = &pbproxystate.EndpointRef{
|
||||
Id: serviceEndpointsID,
|
||||
Port: destinationPort,
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func findMeshPort(ports map[string]*pbcatalog.WorkloadPort) string {
|
||||
for name, port := range ports {
|
||||
if port.Protocol == pbcatalog.Protocol_PROTOCOL_MESH {
|
||||
return name
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
|
@ -0,0 +1,108 @@
|
|||
package builder
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/internal/catalog"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/types/intermediate"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
"github.com/hashicorp/consul/internal/resource/resourcetest"
|
||||
"github.com/hashicorp/consul/internal/testing/golden"
|
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1"
|
||||
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
var (
|
||||
endpointsData = &pbcatalog.ServiceEndpoints{
|
||||
Endpoints: []*pbcatalog.Endpoint{
|
||||
{
|
||||
Addresses: []*pbcatalog.WorkloadAddress{
|
||||
{Host: "10.0.0.1"},
|
||||
},
|
||||
Ports: map[string]*pbcatalog.WorkloadPort{
|
||||
"tcp": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
|
||||
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
|
||||
"mesh": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func TestBuildExplicitDestinations(t *testing.T) {
|
||||
api1Endpoints := resourcetest.Resource(catalog.ServiceEndpointsType, "api-1").
|
||||
WithData(t, endpointsData).WithTenancy(resource.DefaultNamespacedTenancy()).Build()
|
||||
|
||||
api2Endpoints := resourcetest.Resource(catalog.ServiceEndpointsType, "api-2").
|
||||
WithData(t, endpointsData).WithTenancy(resource.DefaultNamespacedTenancy()).Build()
|
||||
|
||||
api1Identity := &pbresource.Reference{
|
||||
Name: "api1-identity",
|
||||
Tenancy: api1Endpoints.Id.Tenancy,
|
||||
}
|
||||
|
||||
api2Identity := &pbresource.Reference{
|
||||
Name: "api2-identity",
|
||||
Tenancy: api2Endpoints.Id.Tenancy,
|
||||
}
|
||||
|
||||
destinationIpPort := &intermediate.Destination{
|
||||
Explicit: &pbmesh.Upstream{
|
||||
DestinationRef: resource.Reference(api1Endpoints.Id, ""),
|
||||
DestinationPort: "tcp",
|
||||
Datacenter: "dc1",
|
||||
ListenAddr: &pbmesh.Upstream_IpPort{
|
||||
IpPort: &pbmesh.IPPortAddress{Ip: "1.1.1.1", Port: 1234},
|
||||
},
|
||||
},
|
||||
ServiceEndpoints: &intermediate.ServiceEndpoints{
|
||||
Resource: api1Endpoints,
|
||||
Endpoints: endpointsData,
|
||||
},
|
||||
Identities: []*pbresource.Reference{api1Identity},
|
||||
}
|
||||
|
||||
destinationUnix := &intermediate.Destination{
|
||||
Explicit: &pbmesh.Upstream{
|
||||
DestinationRef: resource.Reference(api2Endpoints.Id, ""),
|
||||
DestinationPort: "tcp",
|
||||
Datacenter: "dc1",
|
||||
ListenAddr: &pbmesh.Upstream_Unix{
|
||||
Unix: &pbmesh.UnixSocketAddress{Path: "/path/to/socket", Mode: "0666"},
|
||||
},
|
||||
},
|
||||
ServiceEndpoints: &intermediate.ServiceEndpoints{
|
||||
Resource: api2Endpoints,
|
||||
Endpoints: endpointsData,
|
||||
},
|
||||
Identities: []*pbresource.Reference{api2Identity},
|
||||
}
|
||||
|
||||
cases := map[string]struct {
|
||||
destinations []*intermediate.Destination
|
||||
}{
|
||||
"l4-single-destination-ip-port-bind-address": {
|
||||
destinations: []*intermediate.Destination{destinationIpPort},
|
||||
},
|
||||
"l4-single-destination-unix-socket-bind-address": {
|
||||
destinations: []*intermediate.Destination{destinationUnix},
|
||||
},
|
||||
"l4-multi-destination": {
|
||||
destinations: []*intermediate.Destination{destinationIpPort, destinationUnix},
|
||||
},
|
||||
}
|
||||
|
||||
for name, c := range cases {
|
||||
proxyTmpl := New(testProxyStateTemplateID(), testIdentityRef(), "foo.consul").
|
||||
BuildDestinations(c.destinations).
|
||||
Build()
|
||||
|
||||
actual := protoToJSON(t, proxyTmpl)
|
||||
expected := golden.Get(t, actual, name+".golden")
|
||||
|
||||
require.JSONEq(t, expected, actual)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,162 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package builder
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/consul/envoyextensions/xdscommon"
|
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1"
|
||||
"github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1/pbproxystate"
|
||||
)
|
||||
|
||||
func (b *Builder) BuildLocalApp(workload *pbcatalog.Workload) *Builder {
|
||||
// Go through workload ports and add the first non-mesh port we see.
|
||||
// Note that the order of ports is non-deterministic here but the xds generation
|
||||
// code should make sure to send it in the same order to Envoy to avoid unnecessary
|
||||
// updates.
|
||||
// todo (ishustava): Note we will need to support multiple ports in the future.
|
||||
for portName, port := range workload.Ports {
|
||||
clusterName := fmt.Sprintf("%s:%s", xdscommon.LocalAppClusterName, portName)
|
||||
|
||||
if port.Protocol != pbcatalog.Protocol_PROTOCOL_MESH {
|
||||
b.addInboundListener(xdscommon.PublicListenerName, workload).
|
||||
addInboundRouter(clusterName, port).
|
||||
addInboundTLS().
|
||||
buildListener().
|
||||
addLocalAppCluster(clusterName).
|
||||
addLocalAppStaticEndpoints(clusterName, port)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *Builder) addInboundListener(name string, workload *pbcatalog.Workload) *ListenerBuilder {
|
||||
listener := &pbproxystate.Listener{
|
||||
Name: name,
|
||||
Direction: pbproxystate.Direction_DIRECTION_INBOUND,
|
||||
}
|
||||
|
||||
// We will take listener bind port from the workload.
|
||||
// Find mesh port.
|
||||
meshPort, ok := workload.GetMeshPortName()
|
||||
if !ok {
|
||||
// At this point, we should only get workloads that have mesh ports.
|
||||
return &ListenerBuilder{
|
||||
builder: b,
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the workload has a specific address for the mesh port.
|
||||
meshAddresses := workload.GetNonExternalAddressesForPort(meshPort)
|
||||
|
||||
// If there are no mesh addresses, return. This should be impossible.
|
||||
if len(meshAddresses) == 0 {
|
||||
return &ListenerBuilder{
|
||||
builder: b,
|
||||
}
|
||||
}
|
||||
|
||||
// If there are more than one mesh address, use the first one in the list.
|
||||
var meshAddress string
|
||||
if len(meshAddresses) > 0 {
|
||||
meshAddress = meshAddresses[0].Host
|
||||
}
|
||||
|
||||
listener.BindAddress = &pbproxystate.Listener_HostPort{
|
||||
HostPort: &pbproxystate.HostPortAddress{
|
||||
Host: meshAddress,
|
||||
Port: workload.Ports[meshPort].Port,
|
||||
},
|
||||
}
|
||||
|
||||
return b.NewListenerBuilder(listener)
|
||||
}
|
||||
|
||||
func (l *ListenerBuilder) addInboundRouter(clusterName string, port *pbcatalog.WorkloadPort) *ListenerBuilder {
|
||||
if l.listener == nil {
|
||||
return l
|
||||
}
|
||||
|
||||
if port.Protocol == pbcatalog.Protocol_PROTOCOL_TCP {
|
||||
r := &pbproxystate.Router{
|
||||
Destination: &pbproxystate.Router_L4{
|
||||
L4: &pbproxystate.L4Destination{
|
||||
Name: clusterName,
|
||||
StatPrefix: l.listener.Name,
|
||||
},
|
||||
},
|
||||
}
|
||||
l.listener.Routers = append(l.listener.Routers, r)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func (b *Builder) addLocalAppCluster(clusterName string) *Builder {
|
||||
// Make cluster for this router destination.
|
||||
b.proxyStateTemplate.ProxyState.Clusters[clusterName] = &pbproxystate.Cluster{
|
||||
Group: &pbproxystate.Cluster_EndpointGroup{
|
||||
EndpointGroup: &pbproxystate.EndpointGroup{
|
||||
Group: &pbproxystate.EndpointGroup_Static{
|
||||
Static: &pbproxystate.StaticEndpointGroup{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *Builder) addLocalAppStaticEndpoints(clusterName string, port *pbcatalog.WorkloadPort) *Builder {
|
||||
// We're adding endpoints statically as opposed to creating an endpoint ref
|
||||
// because this endpoint is less likely to change as we're not tracking the health.
|
||||
endpoint := &pbproxystate.Endpoint{
|
||||
Address: &pbproxystate.Endpoint_HostPort{
|
||||
HostPort: &pbproxystate.HostPortAddress{
|
||||
Host: "127.0.0.1",
|
||||
Port: port.Port,
|
||||
},
|
||||
},
|
||||
}
|
||||
b.proxyStateTemplate.ProxyState.Endpoints[clusterName] = &pbproxystate.Endpoints{
|
||||
Endpoints: []*pbproxystate.Endpoint{endpoint},
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func (l *ListenerBuilder) addInboundTLS() *ListenerBuilder {
|
||||
if l.listener == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// For inbound TLS, we want to use this proxy's identity.
|
||||
workloadIdentity := l.builder.proxyStateTemplate.ProxyState.Identity.Name
|
||||
|
||||
inboundTLS := &pbproxystate.TransportSocket{
|
||||
ConnectionTls: &pbproxystate.TransportSocket_InboundMesh{
|
||||
InboundMesh: &pbproxystate.InboundMeshMTLS{
|
||||
IdentityKey: workloadIdentity,
|
||||
ValidationContext: &pbproxystate.MeshInboundValidationContext{
|
||||
TrustBundlePeerNameKeys: []string{l.builder.id.Tenancy.PeerName},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
l.builder.proxyStateTemplate.RequiredLeafCertificates[workloadIdentity] = &pbproxystate.LeafCertificateRef{
|
||||
Name: workloadIdentity,
|
||||
Namespace: l.builder.id.Tenancy.Namespace,
|
||||
Partition: l.builder.id.Tenancy.Partition,
|
||||
}
|
||||
|
||||
l.builder.proxyStateTemplate.RequiredTrustBundles[l.builder.id.Tenancy.PeerName] = &pbproxystate.TrustBundleRef{
|
||||
Peer: l.builder.id.Tenancy.PeerName,
|
||||
}
|
||||
|
||||
for i := range l.listener.Routers {
|
||||
l.listener.Routers[i].InboundTls = inboundTLS
|
||||
}
|
||||
return l
|
||||
}
|
|
@ -0,0 +1,96 @@
|
|||
package builder
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
"github.com/hashicorp/consul/internal/resource/resourcetest"
|
||||
"github.com/hashicorp/consul/internal/testing/golden"
|
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
func TestBuildLocalApp(t *testing.T) {
|
||||
cases := map[string]struct {
|
||||
workload *pbcatalog.Workload
|
||||
}{
|
||||
"l4-single-workload-address-without-ports": {
|
||||
workload: &pbcatalog.Workload{
|
||||
Addresses: []*pbcatalog.WorkloadAddress{
|
||||
{
|
||||
Host: "10.0.0.1",
|
||||
},
|
||||
},
|
||||
Ports: map[string]*pbcatalog.WorkloadPort{
|
||||
"port1": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
|
||||
"port2": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
},
|
||||
},
|
||||
"l4-multiple-workload-addresses-without-ports": {
|
||||
workload: &pbcatalog.Workload{
|
||||
Addresses: []*pbcatalog.WorkloadAddress{
|
||||
{
|
||||
Host: "10.0.0.1",
|
||||
},
|
||||
{
|
||||
Host: "10.0.0.2",
|
||||
},
|
||||
},
|
||||
Ports: map[string]*pbcatalog.WorkloadPort{
|
||||
"port1": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
|
||||
"port2": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
},
|
||||
},
|
||||
"l4-multiple-workload-addresses-with-specific-ports": {
|
||||
workload: &pbcatalog.Workload{
|
||||
Addresses: []*pbcatalog.WorkloadAddress{
|
||||
{
|
||||
Host: "127.0.0.1",
|
||||
Ports: []string{"port1"},
|
||||
},
|
||||
{
|
||||
Host: "10.0.0.2",
|
||||
Ports: []string{"port2"},
|
||||
},
|
||||
},
|
||||
Ports: map[string]*pbcatalog.WorkloadPort{
|
||||
"port1": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
|
||||
"port2": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, c := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
proxyTmpl := New(testProxyStateTemplateID(), testIdentityRef(), "foo.consul").BuildLocalApp(c.workload).
|
||||
Build()
|
||||
actual := protoToJSON(t, proxyTmpl)
|
||||
expected := golden.Get(t, actual, name+".golden")
|
||||
|
||||
require.JSONEq(t, expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testProxyStateTemplateID() *pbresource.ID {
|
||||
return resourcetest.Resource(types.ProxyStateTemplateType, "test").
|
||||
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||
ID()
|
||||
}
|
||||
|
||||
func testIdentityRef() *pbresource.Reference {
|
||||
return &pbresource.Reference{
|
||||
Name: "test-identity",
|
||||
Tenancy: &pbresource.Tenancy{
|
||||
Namespace: "default",
|
||||
Partition: "default",
|
||||
PeerName: "local",
|
||||
},
|
||||
}
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
package builder
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
func DestinationClusterName(serviceRef *pbresource.Reference, datacenter, trustDomain string) string {
|
||||
return connect.ServiceSNI(serviceRef.Name,
|
||||
"",
|
||||
serviceRef.Tenancy.Namespace,
|
||||
serviceRef.Tenancy.Partition,
|
||||
datacenter,
|
||||
trustDomain)
|
||||
}
|
||||
|
||||
func DestinationStatPrefix(serviceRef *pbresource.Reference, datacenter string) string {
|
||||
return fmt.Sprintf("upstream.%s.%s.%s.%s",
|
||||
serviceRef.Name,
|
||||
serviceRef.Tenancy.Namespace,
|
||||
serviceRef.Tenancy.Partition,
|
||||
datacenter)
|
||||
}
|
||||
|
||||
func DestinationListenerName(name, portName string, address string, port uint32) string {
|
||||
if port != 0 {
|
||||
return fmt.Sprintf("%s:%s:%s:%d", name, portName, address, port)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s:%s:%s", name, portName, address)
|
||||
}
|
122
internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-multi-destination.golden
vendored
Normal file
122
internal/mesh/internal/controllers/sidecarproxy/builder/testdata/l4-multi-destination.golden
vendored
Normal file
|
@ -0,0 +1,122 @@
|
|||
{
|
||||
"proxyState": {
|
||||
"identity": {
|
||||
"tenancy": {
|
||||
"partition": "default",
|
||||
"namespace": "default",
|
||||
"peerName": "local"
|
||||
},
|
||||
"name": "test-identity"
|
||||
},
|
||||
"listeners": [
|
||||
{
|
||||
"name": "api-1:tcp:1.1.1.1:1234",
|
||||
"direction": "DIRECTION_OUTBOUND",
|
||||
"hostPort": {
|
||||
"host": "1.1.1.1",
|
||||
"port": 1234
|
||||
},
|
||||
"routers": [
|
||||
{
|
||||
"l4": {
|
||||
"name": "api-1.default.dc1.internal.foo.consul",
|
||||
"statPrefix": "upstream.api-1.default.default.dc1"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "api-2:tcp:/path/to/socket",
|
||||
"direction": "DIRECTION_OUTBOUND",
|
||||
"unixSocket": {
|
||||
"path": "/path/to/socket",
|
||||
"mode": "0666"
|
||||
},
|
||||
"routers": [
|
||||
{
|
||||
"l4": {
|
||||
"name": "api-2.default.dc1.internal.foo.consul",
|
||||
"statPrefix": "upstream.api-2.default.default.dc1"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"clusters": {
|
||||
"api-1.default.dc1.internal.foo.consul": {
|
||||
"endpointGroup": {
|
||||
"dynamic": {
|
||||
"config": {
|
||||
"disablePanicThreshold": true
|
||||
},
|
||||
"outboundTls": {
|
||||
"outboundMesh": {
|
||||
"identityKey": "test-identity",
|
||||
"validationContext": {
|
||||
"spiffeIds": [
|
||||
"spiffe://foo.consul/ap/default/ns/default/identity/api1-identity"
|
||||
]
|
||||
},
|
||||
"sni": "api-1.default.dc1.internal.foo.consul"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"api-2.default.dc1.internal.foo.consul": {
|
||||
"endpointGroup": {
|
||||
"dynamic": {
|
||||
"config": {
|
||||
"disablePanicThreshold": true
|
||||
},
|
||||
"outboundTls": {
|
||||
"outboundMesh": {
|
||||
"identityKey": "test-identity",
|
||||
"validationContext": {
|
||||
"spiffeIds": [
|
||||
"spiffe://foo.consul/ap/default/ns/default/identity/api2-identity"
|
||||
]
|
||||
},
|
||||
"sni": "api-2.default.dc1.internal.foo.consul"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"requiredEndpoints": {
|
||||
"api-1.default.dc1.internal.foo.consul": {
|
||||
"id": {
|
||||
"name": "api-1",
|
||||
"type": {
|
||||
"group": "catalog",
|
||||
"groupVersion": "v1alpha1",
|
||||
"kind": "ServiceEndpoints"
|
||||
},
|
||||
"tenancy": {
|
||||
"partition": "default",
|
||||
"namespace": "default",
|
||||
"peerName": "local"
|
||||
}
|
||||
},
|
||||
"port": "mesh"
|
||||
},
|
||||
"api-2.default.dc1.internal.foo.consul": {
|
||||
"id": {
|
||||
"name": "api-2",
|
||||
"type": {
|
||||
"group": "catalog",
|
||||
"groupVersion": "v1alpha1",
|
||||
"kind": "ServiceEndpoints"
|
||||
},
|
||||
"tenancy": {
|
||||
"partition": "default",
|
||||
"namespace": "default",
|
||||
"peerName": "local"
|
||||
}
|
||||
},
|
||||
"port": "mesh"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
{
|
||||
"proxyState": {
|
||||
"identity": {
|
||||
"tenancy": {
|
||||
"partition": "default",
|
||||
"namespace": "default",
|
||||
"peerName": "local"
|
||||
},
|
||||
"name": "test-identity"
|
||||
},
|
||||
"listeners": [
|
||||
{
|
||||
"name": "public_listener",
|
||||
"direction": "DIRECTION_INBOUND",
|
||||
"hostPort": {
|
||||
"host": "10.0.0.2",
|
||||
"port": 20000
|
||||
},
|
||||
"routers": [
|
||||
{
|
||||
"l4": {
|
||||
"name": "local_app:port1",
|
||||
"statPrefix": "public_listener"
|
||||
},
|
||||
"inboundTls": {
|
||||
"inboundMesh": {
|
||||
"identityKey": "test-identity",
|
||||
"validationContext": {
|
||||
"trustBundlePeerNameKeys": [
|
||||
"local"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"clusters": {
|
||||
"local_app:port1": {
|
||||
"endpointGroup": {
|
||||
"static": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"endpoints": {
|
||||
"local_app:port1": {
|
||||
"endpoints": [
|
||||
{
|
||||
"hostPort": {
|
||||
"host": "127.0.0.1",
|
||||
"port": 8080
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"requiredLeafCertificates": {
|
||||
"test-identity": {
|
||||
"name": "test-identity",
|
||||
"namespace": "default",
|
||||
"partition": "default"
|
||||
}
|
||||
},
|
||||
"requiredTrustBundles": {
|
||||
"local": {
|
||||
"peer": "local"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
{
|
||||
"proxyState": {
|
||||
"identity": {
|
||||
"tenancy": {
|
||||
"partition": "default",
|
||||
"namespace": "default",
|
||||
"peerName": "local"
|
||||
},
|
||||
"name": "test-identity"
|
||||
},
|
||||
"listeners": [
|
||||
{
|
||||
"name": "public_listener",
|
||||
"direction": "DIRECTION_INBOUND",
|
||||
"hostPort": {
|
||||
"host": "10.0.0.1",
|
||||
"port": 20000
|
||||
},
|
||||
"routers": [
|
||||
{
|
||||
"l4": {
|
||||
"name": "local_app:port1",
|
||||
"statPrefix": "public_listener"
|
||||
},
|
||||
"inboundTls": {
|
||||
"inboundMesh": {
|
||||
"identityKey": "test-identity",
|
||||
"validationContext": {
|
||||
"trustBundlePeerNameKeys": [
|
||||
"local"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"clusters": {
|
||||
"local_app:port1": {
|
||||
"endpointGroup": {
|
||||
"static": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"endpoints": {
|
||||
"local_app:port1": {
|
||||
"endpoints": [
|
||||
{
|
||||
"hostPort": {
|
||||
"host": "127.0.0.1",
|
||||
"port": 8080
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"requiredLeafCertificates": {
|
||||
"test-identity": {
|
||||
"name": "test-identity",
|
||||
"namespace": "default",
|
||||
"partition": "default"
|
||||
}
|
||||
},
|
||||
"requiredTrustBundles": {
|
||||
"local": {
|
||||
"peer": "local"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,70 @@
|
|||
{
|
||||
"proxyState": {
|
||||
"identity": {
|
||||
"tenancy": {
|
||||
"partition": "default",
|
||||
"namespace": "default",
|
||||
"peerName": "local"
|
||||
},
|
||||
"name": "test-identity"
|
||||
},
|
||||
"listeners": [
|
||||
{
|
||||
"name": "api-1:tcp:1.1.1.1:1234",
|
||||
"direction": "DIRECTION_OUTBOUND",
|
||||
"hostPort": {
|
||||
"host": "1.1.1.1",
|
||||
"port": 1234
|
||||
},
|
||||
"routers": [
|
||||
{
|
||||
"l4": {
|
||||
"name": "api-1.default.dc1.internal.foo.consul",
|
||||
"statPrefix": "upstream.api-1.default.default.dc1"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"clusters": {
|
||||
"api-1.default.dc1.internal.foo.consul": {
|
||||
"endpointGroup": {
|
||||
"dynamic": {
|
||||
"config": {
|
||||
"disablePanicThreshold": true
|
||||
},
|
||||
"outboundTls": {
|
||||
"outboundMesh": {
|
||||
"identityKey": "test-identity",
|
||||
"validationContext": {
|
||||
"spiffeIds": [
|
||||
"spiffe://foo.consul/ap/default/ns/default/identity/api1-identity"
|
||||
]
|
||||
},
|
||||
"sni": "api-1.default.dc1.internal.foo.consul"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"requiredEndpoints": {
|
||||
"api-1.default.dc1.internal.foo.consul": {
|
||||
"id": {
|
||||
"name": "api-1",
|
||||
"type": {
|
||||
"group": "catalog",
|
||||
"groupVersion": "v1alpha1",
|
||||
"kind": "ServiceEndpoints"
|
||||
},
|
||||
"tenancy": {
|
||||
"partition": "default",
|
||||
"namespace": "default",
|
||||
"peerName": "local"
|
||||
}
|
||||
},
|
||||
"port": "mesh"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,70 @@
|
|||
{
|
||||
"proxyState": {
|
||||
"identity": {
|
||||
"tenancy": {
|
||||
"partition": "default",
|
||||
"namespace": "default",
|
||||
"peerName": "local"
|
||||
},
|
||||
"name": "test-identity"
|
||||
},
|
||||
"listeners": [
|
||||
{
|
||||
"name": "api-2:tcp:/path/to/socket",
|
||||
"direction": "DIRECTION_OUTBOUND",
|
||||
"unixSocket": {
|
||||
"path": "/path/to/socket",
|
||||
"mode": "0666"
|
||||
},
|
||||
"routers": [
|
||||
{
|
||||
"l4": {
|
||||
"name": "api-2.default.dc1.internal.foo.consul",
|
||||
"statPrefix": "upstream.api-2.default.default.dc1"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"clusters": {
|
||||
"api-2.default.dc1.internal.foo.consul": {
|
||||
"endpointGroup": {
|
||||
"dynamic": {
|
||||
"config": {
|
||||
"disablePanicThreshold": true
|
||||
},
|
||||
"outboundTls": {
|
||||
"outboundMesh": {
|
||||
"identityKey": "test-identity",
|
||||
"validationContext": {
|
||||
"spiffeIds": [
|
||||
"spiffe://foo.consul/ap/default/ns/default/identity/api2-identity"
|
||||
]
|
||||
},
|
||||
"sni": "api-2.default.dc1.internal.foo.consul"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"requiredEndpoints": {
|
||||
"api-2.default.dc1.internal.foo.consul": {
|
||||
"id": {
|
||||
"name": "api-2",
|
||||
"type": {
|
||||
"group": "catalog",
|
||||
"groupVersion": "v1alpha1",
|
||||
"kind": "ServiceEndpoints"
|
||||
},
|
||||
"tenancy": {
|
||||
"partition": "default",
|
||||
"namespace": "default",
|
||||
"peerName": "local"
|
||||
}
|
||||
},
|
||||
"port": "mesh"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
{
|
||||
"proxyState": {
|
||||
"identity": {
|
||||
"tenancy": {
|
||||
"partition": "default",
|
||||
"namespace": "default",
|
||||
"peerName": "local"
|
||||
},
|
||||
"name": "test-identity"
|
||||
},
|
||||
"listeners": [
|
||||
{
|
||||
"name": "public_listener",
|
||||
"direction": "DIRECTION_INBOUND",
|
||||
"hostPort": {
|
||||
"host": "10.0.0.1",
|
||||
"port": 20000
|
||||
},
|
||||
"routers": [
|
||||
{
|
||||
"l4": {
|
||||
"name": "local_app:port1",
|
||||
"statPrefix": "public_listener"
|
||||
},
|
||||
"inboundTls": {
|
||||
"inboundMesh": {
|
||||
"identityKey": "test-identity",
|
||||
"validationContext": {
|
||||
"trustBundlePeerNameKeys": [
|
||||
"local"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"clusters": {
|
||||
"local_app:port1": {
|
||||
"endpointGroup": {
|
||||
"static": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"endpoints": {
|
||||
"local_app:port1": {
|
||||
"endpoints": [
|
||||
{
|
||||
"hostPort": {
|
||||
"host": "127.0.0.1",
|
||||
"port": 8080
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"requiredLeafCertificates": {
|
||||
"test-identity": {
|
||||
"name": "test-identity",
|
||||
"namespace": "default",
|
||||
"partition": "default"
|
||||
}
|
||||
},
|
||||
"requiredTrustBundles": {
|
||||
"local": {
|
||||
"peer": "local"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,169 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package sidecarproxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
|
||||
"github.com/hashicorp/consul/internal/catalog"
|
||||
"github.com/hashicorp/consul/internal/controller"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/builder"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/fetcher"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/mappers/sidecarproxymapper"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/types/intermediate"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
// ControllerName is the name for this controller. It's used for logging or status keys.
|
||||
const ControllerName = "consul.io/sidecar-proxy-controller"
|
||||
|
||||
type TrustDomainFetcher func() (string, error)
|
||||
|
||||
func Controller(cache *sidecarproxycache.Cache, mapper *sidecarproxymapper.Mapper, trustDomainFetcher TrustDomainFetcher) controller.Controller {
|
||||
if cache == nil || mapper == nil || trustDomainFetcher == nil {
|
||||
panic("cache, mapper and trust domain fetcher are required")
|
||||
}
|
||||
|
||||
return controller.ForType(types.ProxyStateTemplateType).
|
||||
WithWatch(catalog.ServiceEndpointsType, mapper.MapServiceEndpointsToProxyStateTemplate).
|
||||
WithWatch(types.UpstreamsType, mapper.MapDestinationsToProxyStateTemplate).
|
||||
WithReconciler(&reconciler{cache: cache, getTrustDomain: trustDomainFetcher})
|
||||
}
|
||||
|
||||
type reconciler struct {
|
||||
cache *sidecarproxycache.Cache
|
||||
getTrustDomain TrustDomainFetcher
|
||||
}
|
||||
|
||||
func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req controller.Request) error {
|
||||
rt.Logger = rt.Logger.With("resource-id", req.ID, "controller", ControllerName)
|
||||
|
||||
rt.Logger.Trace("reconciling proxy state template")
|
||||
|
||||
// Instantiate a data fetcher to fetch all reconciliation data.
|
||||
dataFetcher := fetcher.Fetcher{Client: rt.Client, Cache: r.cache}
|
||||
|
||||
// Check if the workload exists.
|
||||
workloadID := resource.ReplaceType(catalog.WorkloadType, req.ID)
|
||||
workload, err := dataFetcher.FetchWorkload(ctx, workloadID)
|
||||
if err != nil {
|
||||
rt.Logger.Error("error reading the associated workload", "error", err)
|
||||
return err
|
||||
}
|
||||
if workload == nil {
|
||||
// If workload has been deleted, then return as ProxyStateTemplate should be cleaned up
|
||||
// by the garbage collector because of the owner reference.
|
||||
rt.Logger.Trace("workload doesn't exist; skipping reconciliation", "workload", workloadID)
|
||||
return nil
|
||||
}
|
||||
|
||||
proxyStateTemplate, err := dataFetcher.FetchProxyStateTemplate(ctx, req.ID)
|
||||
if err != nil {
|
||||
rt.Logger.Error("error reading proxy state template", "error", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
if proxyStateTemplate == nil {
|
||||
// If proxy state template has been deleted, we will need to generate a new one.
|
||||
rt.Logger.Trace("proxy state template for this workload doesn't yet exist; generating a new one")
|
||||
}
|
||||
|
||||
if !workload.Workload.IsMeshEnabled() {
|
||||
// Skip non-mesh workloads.
|
||||
|
||||
// If there's existing proxy state template, delete it.
|
||||
if proxyStateTemplate != nil {
|
||||
rt.Logger.Trace("deleting existing proxy state template because workload is no longer on the mesh")
|
||||
_, err = rt.Client.Delete(ctx, &pbresource.DeleteRequest{Id: req.ID})
|
||||
if err != nil {
|
||||
rt.Logger.Error("error deleting existing proxy state template", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove it from cache.
|
||||
r.cache.DeleteSourceProxy(req.ID)
|
||||
}
|
||||
rt.Logger.Trace("skipping proxy state template generation because workload is not on the mesh", "workload", workload.Resource.Id)
|
||||
return nil
|
||||
}
|
||||
|
||||
// First get the trust domain.
|
||||
trustDomain, err := r.getTrustDomain()
|
||||
if err != nil {
|
||||
rt.Logger.Error("error fetching trust domain to compute proxy state template", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
b := builder.New(req.ID, workloadIdentityRefFromWorkload(workload), trustDomain).
|
||||
BuildLocalApp(workload.Workload)
|
||||
|
||||
// Get all destinationsData.
|
||||
destinationsRefs := r.cache.DestinationsBySourceProxy(req.ID)
|
||||
destinationsData, statuses, err := dataFetcher.FetchDestinationsData(ctx, destinationsRefs)
|
||||
if err != nil {
|
||||
rt.Logger.Error("error fetching destinations for this proxy", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
b.BuildDestinations(destinationsData)
|
||||
|
||||
newProxyTemplate := b.Build()
|
||||
|
||||
if proxyStateTemplate == nil || !proto.Equal(proxyStateTemplate.Tmpl, newProxyTemplate) {
|
||||
proxyTemplateData, err := anypb.New(newProxyTemplate)
|
||||
if err != nil {
|
||||
rt.Logger.Error("error creating proxy state template data", "error", err)
|
||||
return err
|
||||
}
|
||||
rt.Logger.Trace("updating proxy state template")
|
||||
_, err = rt.Client.Write(ctx, &pbresource.WriteRequest{
|
||||
Resource: &pbresource.Resource{
|
||||
Id: req.ID,
|
||||
Owner: workload.Resource.Id,
|
||||
Data: proxyTemplateData,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
rt.Logger.Error("error writing proxy state template", "error", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
rt.Logger.Trace("proxy state template data has not changed, skipping update")
|
||||
}
|
||||
|
||||
// Update any statuses.
|
||||
for _, status := range statuses {
|
||||
updatedStatus := &pbresource.Status{
|
||||
ObservedGeneration: status.Generation,
|
||||
}
|
||||
updatedStatus.Conditions = status.Conditions
|
||||
// If the status is unchanged then we should return and avoid the unnecessary write
|
||||
if !resource.EqualStatus(status.OldStatus[ControllerName], updatedStatus, false) {
|
||||
rt.Logger.Trace("updating status", "id", status.ID)
|
||||
_, err = rt.Client.WriteStatus(ctx, &pbresource.WriteStatusRequest{
|
||||
Id: status.ID,
|
||||
Key: ControllerName,
|
||||
Status: updatedStatus,
|
||||
})
|
||||
if err != nil {
|
||||
rt.Logger.Error("error writing new status", "id", status.ID, "error", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func workloadIdentityRefFromWorkload(w *intermediate.Workload) *pbresource.Reference {
|
||||
return &pbresource.Reference{
|
||||
Name: w.Workload.Identity,
|
||||
Tenancy: w.Resource.Id.Tenancy,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,376 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package sidecarproxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
|
||||
"github.com/hashicorp/consul/internal/catalog"
|
||||
"github.com/hashicorp/consul/internal/controller"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/builder"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/status"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/mappers/sidecarproxymapper"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
"github.com/hashicorp/consul/internal/resource/resourcetest"
|
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1"
|
||||
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1"
|
||||
"github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1/pbproxystate"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"github.com/hashicorp/consul/proto/private/prototest"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
type meshControllerTestSuite struct {
|
||||
suite.Suite
|
||||
|
||||
client *resourcetest.Client
|
||||
runtime controller.Runtime
|
||||
|
||||
ctl *reconciler
|
||||
ctx context.Context
|
||||
|
||||
apiWorkloadID *pbresource.ID
|
||||
apiWorkload *pbcatalog.Workload
|
||||
apiService *pbresource.Resource
|
||||
apiEndpoints *pbresource.Resource
|
||||
apiEndpointsData *pbcatalog.ServiceEndpoints
|
||||
webWorkload *pbresource.Resource
|
||||
proxyStateTemplate *pbmesh.ProxyStateTemplate
|
||||
}
|
||||
|
||||
func (suite *meshControllerTestSuite) SetupTest() {
|
||||
resourceClient := svctest.RunResourceService(suite.T(), types.Register, catalog.RegisterTypes)
|
||||
suite.client = resourcetest.NewClient(resourceClient)
|
||||
suite.runtime = controller.Runtime{Client: resourceClient, Logger: testutil.Logger(suite.T())}
|
||||
suite.ctx = testutil.TestContext(suite.T())
|
||||
|
||||
suite.ctl = &reconciler{
|
||||
cache: sidecarproxycache.New(),
|
||||
getTrustDomain: func() (string, error) {
|
||||
return "test.consul", nil
|
||||
},
|
||||
}
|
||||
|
||||
suite.apiWorkload = &pbcatalog.Workload{
|
||||
Identity: "api-identity",
|
||||
Addresses: []*pbcatalog.WorkloadAddress{
|
||||
{
|
||||
Host: "10.0.0.1",
|
||||
},
|
||||
},
|
||||
Ports: map[string]*pbcatalog.WorkloadPort{
|
||||
"tcp": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
|
||||
"mesh": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
}
|
||||
|
||||
suite.apiWorkloadID = resourcetest.Resource(catalog.WorkloadType, "api-abc").
|
||||
WithData(suite.T(), suite.apiWorkload).
|
||||
Write(suite.T(), resourceClient).Id
|
||||
|
||||
suite.apiService = resourcetest.Resource(catalog.ServiceType, "api-service").
|
||||
WithData(suite.T(), &pbcatalog.Service{
|
||||
Workloads: &pbcatalog.WorkloadSelector{Names: []string{"api-abc"}},
|
||||
Ports: []*pbcatalog.ServicePort{
|
||||
{TargetPort: "tcp", Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
|
||||
}}).
|
||||
Write(suite.T(), suite.client.ResourceServiceClient)
|
||||
|
||||
suite.apiEndpointsData = &pbcatalog.ServiceEndpoints{
|
||||
Endpoints: []*pbcatalog.Endpoint{
|
||||
{
|
||||
TargetRef: suite.apiWorkloadID,
|
||||
Addresses: suite.apiWorkload.Addresses,
|
||||
Ports: suite.apiWorkload.Ports,
|
||||
Identity: "api-identity",
|
||||
},
|
||||
},
|
||||
}
|
||||
suite.apiEndpoints = resourcetest.Resource(catalog.ServiceEndpointsType, "api-service").
|
||||
WithData(suite.T(), suite.apiEndpointsData).
|
||||
Write(suite.T(), suite.client.ResourceServiceClient)
|
||||
|
||||
webWorkloadData := &pbcatalog.Workload{
|
||||
Identity: "web-identity",
|
||||
Addresses: []*pbcatalog.WorkloadAddress{
|
||||
{
|
||||
Host: "10.0.0.2",
|
||||
},
|
||||
},
|
||||
Ports: map[string]*pbcatalog.WorkloadPort{
|
||||
"tcp": {Port: 8081, Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
|
||||
"mesh": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
}
|
||||
suite.webWorkload = resourcetest.Resource(catalog.WorkloadType, "web-def").
|
||||
WithData(suite.T(), webWorkloadData).
|
||||
Write(suite.T(), suite.client)
|
||||
|
||||
resourcetest.Resource(catalog.ServiceType, "web").
|
||||
WithData(suite.T(), &pbcatalog.Service{
|
||||
Workloads: &pbcatalog.WorkloadSelector{Names: []string{"web-def"}},
|
||||
Ports: []*pbcatalog.ServicePort{
|
||||
{TargetPort: "tcp", Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
|
||||
{TargetPort: "mesh", Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
}}).
|
||||
Write(suite.T(), suite.client)
|
||||
|
||||
resourcetest.Resource(catalog.ServiceEndpointsType, "web").
|
||||
WithData(suite.T(), &pbcatalog.ServiceEndpoints{
|
||||
Endpoints: []*pbcatalog.Endpoint{
|
||||
{
|
||||
TargetRef: suite.webWorkload.Id,
|
||||
Addresses: webWorkloadData.Addresses,
|
||||
Ports: webWorkloadData.Ports,
|
||||
Identity: "web-identity",
|
||||
},
|
||||
},
|
||||
}).Write(suite.T(), suite.client)
|
||||
|
||||
identityRef := &pbresource.Reference{
|
||||
Name: suite.apiWorkload.Identity,
|
||||
Tenancy: suite.apiWorkloadID.Tenancy,
|
||||
}
|
||||
|
||||
suite.proxyStateTemplate = builder.New(suite.apiWorkloadID, identityRef, "test.consul").
|
||||
BuildLocalApp(suite.apiWorkload).
|
||||
Build()
|
||||
}
|
||||
|
||||
func (suite *meshControllerTestSuite) TestReconcile_NoWorkload() {
|
||||
// This test ensures that removed workloads are ignored and don't result
|
||||
// in the creation of the proxy state template.
|
||||
err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{
|
||||
ID: resourceID(types.ProxyStateTemplateType, "not-found"),
|
||||
})
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
suite.client.RequireResourceNotFound(suite.T(), resourceID(types.ProxyStateTemplateType, "not-found"))
|
||||
}
|
||||
|
||||
func (suite *meshControllerTestSuite) TestReconcile_NonMeshWorkload() {
|
||||
// This test ensures that non-mesh workloads are ignored by the controller.
|
||||
|
||||
nonMeshWorkload := &pbcatalog.Workload{
|
||||
Addresses: []*pbcatalog.WorkloadAddress{
|
||||
{
|
||||
Host: "10.0.0.1",
|
||||
},
|
||||
},
|
||||
Ports: map[string]*pbcatalog.WorkloadPort{
|
||||
"tcp": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
|
||||
},
|
||||
}
|
||||
|
||||
resourcetest.Resource(catalog.WorkloadType, "test-non-mesh-api-workload").
|
||||
WithData(suite.T(), nonMeshWorkload).
|
||||
Write(suite.T(), suite.client.ResourceServiceClient)
|
||||
|
||||
err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{
|
||||
ID: resourceID(types.ProxyStateTemplateType, "test-non-mesh-api-workload"),
|
||||
})
|
||||
|
||||
require.NoError(suite.T(), err)
|
||||
suite.client.RequireResourceNotFound(suite.T(), resourceID(types.ProxyStateTemplateType, "test-non-mesh-api-workload"))
|
||||
}
|
||||
|
||||
func (suite *meshControllerTestSuite) TestReconcile_NoExistingProxyStateTemplate() {
|
||||
err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{
|
||||
ID: resourceID(types.ProxyStateTemplateType, suite.apiWorkloadID.Name),
|
||||
})
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
res := suite.client.RequireResourceExists(suite.T(), resourceID(types.ProxyStateTemplateType, suite.apiWorkloadID.Name))
|
||||
require.NoError(suite.T(), err)
|
||||
require.NotNil(suite.T(), res.Data)
|
||||
prototest.AssertDeepEqual(suite.T(), suite.apiWorkloadID, res.Owner)
|
||||
}
|
||||
|
||||
func (suite *meshControllerTestSuite) TestReconcile_ExistingProxyStateTemplate_WithUpdates() {
|
||||
// This test ensures that we write a new proxy state template when there are changes.
|
||||
|
||||
// Write the original.
|
||||
resourcetest.Resource(types.ProxyStateTemplateType, "api-abc").
|
||||
WithData(suite.T(), suite.proxyStateTemplate).
|
||||
WithOwner(suite.apiWorkloadID).
|
||||
Write(suite.T(), suite.client.ResourceServiceClient)
|
||||
|
||||
// Update the apiWorkload.
|
||||
suite.apiWorkload.Ports["mesh"].Port = 21000
|
||||
updatedWorkloadID := resourcetest.Resource(catalog.WorkloadType, "api-abc").
|
||||
WithData(suite.T(), suite.apiWorkload).
|
||||
Write(suite.T(), suite.client.ResourceServiceClient).Id
|
||||
|
||||
err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{
|
||||
ID: resourceID(types.ProxyStateTemplateType, updatedWorkloadID.Name),
|
||||
})
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
res := suite.client.RequireResourceExists(suite.T(), resourceID(types.ProxyStateTemplateType, updatedWorkloadID.Name))
|
||||
require.NoError(suite.T(), err)
|
||||
require.NotNil(suite.T(), res.Data)
|
||||
prototest.AssertDeepEqual(suite.T(), updatedWorkloadID, res.Owner)
|
||||
|
||||
var updatedProxyStateTemplate pbmesh.ProxyStateTemplate
|
||||
err = res.Data.UnmarshalTo(&updatedProxyStateTemplate)
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
// Check that our value is updated in the proxy state template.
|
||||
inboundListenerPort := updatedProxyStateTemplate.ProxyState.Listeners[0].
|
||||
BindAddress.(*pbproxystate.Listener_HostPort).HostPort.Port
|
||||
require.Equal(suite.T(), uint32(21000), inboundListenerPort)
|
||||
}
|
||||
|
||||
func (suite *meshControllerTestSuite) TestReconcile_ExistingProxyStateTemplate_NoUpdates() {
|
||||
// This test ensures that we skip writing of the proxy state template when there are no changes to it.
|
||||
|
||||
// Write the original.
|
||||
originalProxyState := resourcetest.Resource(types.ProxyStateTemplateType, "api-abc").
|
||||
WithData(suite.T(), suite.proxyStateTemplate).
|
||||
WithOwner(suite.apiWorkloadID).
|
||||
Write(suite.T(), suite.client.ResourceServiceClient)
|
||||
|
||||
// Update the metadata on the apiWorkload which should result in no changes.
|
||||
updatedWorkloadID := resourcetest.Resource(catalog.WorkloadType, "api-abc").
|
||||
WithData(suite.T(), suite.apiWorkload).
|
||||
WithMeta("some", "meta").
|
||||
Write(suite.T(), suite.client.ResourceServiceClient).Id
|
||||
|
||||
err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{
|
||||
ID: resourceID(types.ProxyStateTemplateType, updatedWorkloadID.Name),
|
||||
})
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
updatedProxyState := suite.client.RequireResourceExists(suite.T(), resourceID(types.ProxyStateTemplateType, suite.apiWorkloadID.Name))
|
||||
resourcetest.RequireVersionUnchanged(suite.T(), updatedProxyState, originalProxyState.Version)
|
||||
}
|
||||
|
||||
func (suite *meshControllerTestSuite) TestController() {
|
||||
// This is a comprehensive test that checks the overall controller behavior as various resources change state.
|
||||
// This should test interactions between the reconciler, the mappers, and the cache to ensure they work
|
||||
// together and produce expected result.
|
||||
|
||||
// Run the controller manager
|
||||
mgr := controller.NewManager(suite.client, suite.runtime.Logger)
|
||||
c := sidecarproxycache.New()
|
||||
m := sidecarproxymapper.New(c)
|
||||
|
||||
mgr.Register(Controller(c, m, func() (string, error) {
|
||||
return "test.consul", nil
|
||||
}))
|
||||
mgr.SetRaftLeader(true)
|
||||
go mgr.Run(suite.ctx)
|
||||
|
||||
// Create proxy state template IDs to check against in this test.
|
||||
apiProxyStateTemplateID := resourcetest.Resource(types.ProxyStateTemplateType, "api-abc").ID()
|
||||
webProxyStateTemplateID := resourcetest.Resource(types.ProxyStateTemplateType, "web-def").ID()
|
||||
|
||||
// Check that proxy state template resource is generated for both the api and web workloads.
|
||||
var webProxyStateTemplate *pbresource.Resource
|
||||
retry.Run(suite.T(), func(r *retry.R) {
|
||||
suite.client.RequireResourceExists(r, apiProxyStateTemplateID)
|
||||
webProxyStateTemplate = suite.client.RequireResourceExists(r, webProxyStateTemplateID)
|
||||
})
|
||||
|
||||
// Add a source service and check that a new proxy state is generated.
|
||||
webDestinations := resourcetest.Resource(types.UpstreamsType, "web-destinations").
|
||||
WithData(suite.T(), &pbmesh.Upstreams{
|
||||
Workloads: &pbcatalog.WorkloadSelector{Names: []string{"web-def"}},
|
||||
Upstreams: []*pbmesh.Upstream{
|
||||
{
|
||||
DestinationRef: resource.Reference(suite.apiService.Id, ""),
|
||||
DestinationPort: "tcp",
|
||||
},
|
||||
},
|
||||
}).Write(suite.T(), suite.client)
|
||||
webProxyStateTemplate = suite.client.WaitForNewVersion(suite.T(), webProxyStateTemplateID, webProxyStateTemplate.Version)
|
||||
|
||||
// Update destination's service endpoints and workload to be non-mesh
|
||||
// and check that:
|
||||
// * api's proxy state template is deleted
|
||||
// * we get a new web proxy resource re-generated
|
||||
// * the status on Upstreams resource is updated with a validation error
|
||||
nonMeshPorts := map[string]*pbcatalog.WorkloadPort{
|
||||
"tcp": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
|
||||
}
|
||||
|
||||
// Note: the order matters here because in reality service endpoints will only
|
||||
// be reconciled after the workload has been updated, and so we need to write the
|
||||
// workload before we write service endpoints.
|
||||
suite.runtime.Logger.Trace("test: updating api-abc workload to be non-mesh")
|
||||
resourcetest.Resource(catalog.WorkloadType, "api-abc").
|
||||
WithData(suite.T(), &pbcatalog.Workload{
|
||||
Identity: "api-identity",
|
||||
Addresses: suite.apiWorkload.Addresses,
|
||||
Ports: nonMeshPorts}).
|
||||
Write(suite.T(), suite.client)
|
||||
|
||||
suite.runtime.Logger.Trace("test: updating api-service to be non-mesh")
|
||||
resourcetest.Resource(catalog.ServiceEndpointsType, "api-service").
|
||||
WithData(suite.T(), &pbcatalog.ServiceEndpoints{
|
||||
Endpoints: []*pbcatalog.Endpoint{
|
||||
{
|
||||
TargetRef: suite.apiWorkloadID,
|
||||
Addresses: suite.apiWorkload.Addresses,
|
||||
Ports: nonMeshPorts,
|
||||
Identity: "api-identity",
|
||||
},
|
||||
},
|
||||
}).
|
||||
Write(suite.T(), suite.client.ResourceServiceClient)
|
||||
|
||||
// Check that api proxy template is gone.
|
||||
retry.Run(suite.T(), func(r *retry.R) {
|
||||
suite.client.RequireResourceNotFound(r, apiProxyStateTemplateID)
|
||||
})
|
||||
|
||||
// Check status on the pbmesh.Upstreams resource.
|
||||
serviceRef := resource.ReferenceToString(resource.Reference(suite.apiService.Id, ""))
|
||||
suite.client.WaitForStatusCondition(suite.T(), webDestinations.Id, ControllerName,
|
||||
status.ConditionMeshProtocolNotFound(serviceRef))
|
||||
|
||||
// We should get a new web proxy template resource because this destination should be removed.
|
||||
webProxyStateTemplate = suite.client.WaitForNewVersion(suite.T(), webProxyStateTemplateID, webProxyStateTemplate.Version)
|
||||
|
||||
// Update destination's service apiEndpoints back to mesh and check that we get a new web proxy resource re-generated
|
||||
// and that the status on Upstreams resource is updated to be empty.
|
||||
resourcetest.Resource(catalog.ServiceEndpointsType, "api-service").
|
||||
WithData(suite.T(), suite.apiEndpointsData).
|
||||
Write(suite.T(), suite.client.ResourceServiceClient)
|
||||
|
||||
suite.client.WaitForStatusCondition(suite.T(), webDestinations.Id, ControllerName,
|
||||
status.ConditionMeshProtocolFound(serviceRef))
|
||||
|
||||
// We should also get a new web proxy template resource as this destination should be added again.
|
||||
webProxyStateTemplate = suite.client.WaitForNewVersion(suite.T(), webProxyStateTemplateID, webProxyStateTemplate.Version)
|
||||
|
||||
// Delete the proxy state template resource and check that it gets regenerated.
|
||||
_, err := suite.client.Delete(suite.ctx, &pbresource.DeleteRequest{Id: webProxyStateTemplateID})
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
suite.client.WaitForNewVersion(suite.T(), webProxyStateTemplateID, webProxyStateTemplate.Version)
|
||||
}
|
||||
|
||||
func TestMeshController(t *testing.T) {
|
||||
suite.Run(t, new(meshControllerTestSuite))
|
||||
}
|
||||
|
||||
func resourceID(rtype *pbresource.Type, name string) *pbresource.ID {
|
||||
return &pbresource.ID{
|
||||
Type: rtype,
|
||||
Tenancy: &pbresource.Tenancy{
|
||||
Partition: "default",
|
||||
Namespace: "default",
|
||||
PeerName: "local",
|
||||
},
|
||||
Name: name,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,258 @@
|
|||
package fetcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/consul/internal/catalog"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
|
||||
ctrlStatus "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/status"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
||||
intermediateTypes "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1"
|
||||
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type Fetcher struct {
|
||||
Client pbresource.ResourceServiceClient
|
||||
Cache *sidecarproxycache.Cache
|
||||
}
|
||||
|
||||
func (f *Fetcher) FetchWorkload(ctx context.Context, id *pbresource.ID) (*intermediateTypes.Workload, error) {
|
||||
rsp, err := f.Client.Read(ctx, &pbresource.ReadRequest{Id: id})
|
||||
|
||||
switch {
|
||||
case status.Code(err) == codes.NotFound:
|
||||
// We also need to make sure to delete the associated proxy from cache.
|
||||
// We are ignoring errors from cache here as this deletion is best effort.
|
||||
f.Cache.DeleteSourceProxy(resource.ReplaceType(types.ProxyStateTemplateType, id))
|
||||
return nil, nil
|
||||
case err != nil:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w := &intermediateTypes.Workload{
|
||||
Resource: rsp.Resource,
|
||||
}
|
||||
|
||||
var workload pbcatalog.Workload
|
||||
err = rsp.Resource.Data.UnmarshalTo(&workload)
|
||||
if err != nil {
|
||||
return nil, resource.NewErrDataParse(&workload, err)
|
||||
}
|
||||
|
||||
w.Workload = &workload
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (f *Fetcher) FetchProxyStateTemplate(ctx context.Context, id *pbresource.ID) (*intermediateTypes.ProxyStateTemplate, error) {
|
||||
rsp, err := f.Client.Read(ctx, &pbresource.ReadRequest{Id: id})
|
||||
|
||||
switch {
|
||||
case status.Code(err) == codes.NotFound:
|
||||
return nil, nil
|
||||
case err != nil:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p := &intermediateTypes.ProxyStateTemplate{
|
||||
Resource: rsp.Resource,
|
||||
}
|
||||
|
||||
var tmpl pbmesh.ProxyStateTemplate
|
||||
err = rsp.Resource.Data.UnmarshalTo(&tmpl)
|
||||
if err != nil {
|
||||
return nil, resource.NewErrDataParse(&tmpl, err)
|
||||
}
|
||||
|
||||
p.Tmpl = &tmpl
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (f *Fetcher) FetchServiceEndpoints(ctx context.Context, id *pbresource.ID) (*intermediateTypes.ServiceEndpoints, error) {
|
||||
rsp, err := f.Client.Read(ctx, &pbresource.ReadRequest{Id: id})
|
||||
|
||||
switch {
|
||||
case status.Code(err) == codes.NotFound:
|
||||
return nil, nil
|
||||
case err != nil:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
se := &intermediateTypes.ServiceEndpoints{
|
||||
Resource: rsp.Resource,
|
||||
}
|
||||
|
||||
var endpoints pbcatalog.ServiceEndpoints
|
||||
err = rsp.Resource.Data.UnmarshalTo(&endpoints)
|
||||
if err != nil {
|
||||
return nil, resource.NewErrDataParse(&endpoints, err)
|
||||
}
|
||||
|
||||
se.Endpoints = &endpoints
|
||||
return se, nil
|
||||
}
|
||||
|
||||
func (f *Fetcher) FetchDestinations(ctx context.Context, id *pbresource.ID) (*intermediateTypes.Destinations, error) {
|
||||
rsp, err := f.Client.Read(ctx, &pbresource.ReadRequest{Id: id})
|
||||
|
||||
switch {
|
||||
case status.Code(err) == codes.NotFound:
|
||||
return nil, nil
|
||||
case err != nil:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
u := &intermediateTypes.Destinations{
|
||||
Resource: rsp.Resource,
|
||||
}
|
||||
|
||||
var destinations pbmesh.Upstreams
|
||||
err = rsp.Resource.Data.UnmarshalTo(&destinations)
|
||||
if err != nil {
|
||||
return nil, resource.NewErrDataParse(&destinations, err)
|
||||
}
|
||||
|
||||
u.Destinations = &destinations
|
||||
return u, nil
|
||||
}
|
||||
|
||||
func (f *Fetcher) FetchDestinationsData(
|
||||
ctx context.Context,
|
||||
destinationRefs []intermediateTypes.CombinedDestinationRef,
|
||||
) ([]*intermediateTypes.Destination, map[string]*intermediateTypes.Status, error) {
|
||||
|
||||
var (
|
||||
destinations []*intermediateTypes.Destination
|
||||
statuses = make(map[string]*intermediateTypes.Status)
|
||||
)
|
||||
|
||||
for _, dest := range destinationRefs {
|
||||
// Fetch Destinations resource if there is one.
|
||||
us, err := f.FetchDestinations(ctx, dest.ExplicitDestinationsID)
|
||||
if err != nil {
|
||||
// If there's an error, return and force another reconcile instead of computing
|
||||
// partial proxy state.
|
||||
return nil, statuses, err
|
||||
}
|
||||
|
||||
if us == nil {
|
||||
// If the Destinations resource is not found, then we should delete it from cache and continue.
|
||||
f.Cache.DeleteDestination(dest.ServiceRef, dest.Port)
|
||||
continue
|
||||
}
|
||||
|
||||
d := &intermediateTypes.Destination{}
|
||||
// As Destinations resource contains a list of destinations,
|
||||
// we need to find the one that references our service and port.
|
||||
d.Explicit = findDestination(dest.ServiceRef, dest.Port, us.Destinations)
|
||||
|
||||
// Fetch ServiceEndpoints.
|
||||
serviceID := resource.IDFromReference(dest.ServiceRef)
|
||||
se, err := f.FetchServiceEndpoints(ctx, resource.ReplaceType(catalog.ServiceEndpointsType, serviceID))
|
||||
if err != nil {
|
||||
return nil, statuses, err
|
||||
}
|
||||
|
||||
serviceRef := resource.ReferenceToString(dest.ServiceRef)
|
||||
upstreamsRef := resource.IDToString(us.Resource.Id)
|
||||
if se == nil {
|
||||
// If the Service Endpoints resource is not found, then we update the status of the Upstreams resource
|
||||
// but don't remove it from cache in case it comes back.
|
||||
updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID,
|
||||
us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionDestinationServiceNotFound(serviceRef))
|
||||
continue
|
||||
} else {
|
||||
updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID,
|
||||
us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionDestinationServiceFound(serviceRef))
|
||||
}
|
||||
|
||||
d.ServiceEndpoints = se
|
||||
|
||||
// Check if this endpoints is mesh-enabled. If not, remove it from cache and return an error.
|
||||
if !IsMeshEnabled(se.Endpoints.Endpoints[0].Ports) {
|
||||
// Add invalid status but don't remove from cache. If this state changes,
|
||||
// we want to be able to detect this change.
|
||||
updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID,
|
||||
us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionMeshProtocolNotFound(serviceRef))
|
||||
|
||||
// This error should not cause the execution to stop, as we want to make sure that this non-mesh destination
|
||||
// gets removed from the proxy state.
|
||||
continue
|
||||
} else {
|
||||
// If everything was successful, add an empty condition so that we can remove any existing statuses.
|
||||
updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID,
|
||||
us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionMeshProtocolFound(serviceRef))
|
||||
}
|
||||
|
||||
// No destination port should point to a port with "mesh" protocol,
|
||||
// so check if destination port has the mesh protocol and update the status.
|
||||
if se.Endpoints.Endpoints[0].Ports[dest.Port].Protocol == pbcatalog.Protocol_PROTOCOL_MESH {
|
||||
updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID,
|
||||
us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionMeshProtocolDestinationPort(serviceRef, dest.Port))
|
||||
continue
|
||||
} else {
|
||||
updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID,
|
||||
us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionNonMeshProtocolDestinationPort(serviceRef, dest.Port))
|
||||
}
|
||||
|
||||
// Gather all identities.
|
||||
if se != nil {
|
||||
var identities []*pbresource.Reference
|
||||
for _, ep := range se.Endpoints.Endpoints {
|
||||
identities = append(identities, &pbresource.Reference{
|
||||
Name: ep.Identity,
|
||||
Tenancy: se.Resource.Id.Tenancy,
|
||||
})
|
||||
}
|
||||
d.Identities = identities
|
||||
}
|
||||
|
||||
destinations = append(destinations, d)
|
||||
}
|
||||
|
||||
return destinations, statuses, nil
|
||||
}
|
||||
|
||||
// IsMeshEnabled returns true if the workload or service endpoints port
|
||||
// contain a port with the "mesh" protocol.
|
||||
func IsMeshEnabled(ports map[string]*pbcatalog.WorkloadPort) bool {
|
||||
for _, port := range ports {
|
||||
if port.Protocol == pbcatalog.Protocol_PROTOCOL_MESH {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func findDestination(ref *pbresource.Reference, port string, destinations *pbmesh.Upstreams) *pbmesh.Upstream {
|
||||
for _, destination := range destinations.Upstreams {
|
||||
if resource.EqualReference(ref, destination.DestinationRef) &&
|
||||
port == destination.DestinationPort {
|
||||
return destination
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateStatusCondition(
|
||||
statuses map[string]*intermediateTypes.Status,
|
||||
key string,
|
||||
id *pbresource.ID,
|
||||
oldStatus map[string]*pbresource.Status,
|
||||
generation string,
|
||||
condition *pbresource.Condition) {
|
||||
if _, ok := statuses[key]; ok {
|
||||
statuses[key].Conditions = append(statuses[key].Conditions, condition)
|
||||
} else {
|
||||
statuses[key] = &intermediateTypes.Status{
|
||||
ID: id,
|
||||
Generation: generation,
|
||||
Conditions: []*pbresource.Condition{condition},
|
||||
OldStatus: oldStatus,
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,627 @@
|
|||
package fetcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
|
||||
"github.com/hashicorp/consul/internal/catalog"
|
||||
"github.com/hashicorp/consul/internal/controller"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
|
||||
meshStatus "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/status"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/types/intermediate"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
"github.com/hashicorp/consul/internal/resource/resourcetest"
|
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1"
|
||||
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"github.com/hashicorp/consul/proto/private/prototest"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
)
|
||||
|
||||
func TestIsMeshEnabled(t *testing.T) {
|
||||
cases := map[string]struct {
|
||||
ports map[string]*pbcatalog.WorkloadPort
|
||||
exp bool
|
||||
}{
|
||||
"nil ports": {
|
||||
ports: nil,
|
||||
exp: false,
|
||||
},
|
||||
"empty ports": {
|
||||
ports: make(map[string]*pbcatalog.WorkloadPort),
|
||||
exp: false,
|
||||
},
|
||||
"no mesh ports": {
|
||||
ports: map[string]*pbcatalog.WorkloadPort{
|
||||
"p1": {Port: 1000, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
|
||||
"p2": {Port: 2000, Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
|
||||
},
|
||||
exp: false,
|
||||
},
|
||||
"one mesh port": {
|
||||
ports: map[string]*pbcatalog.WorkloadPort{
|
||||
"p1": {Port: 1000, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
|
||||
"p2": {Port: 2000, Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
|
||||
"p3": {Port: 3000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
exp: true,
|
||||
},
|
||||
"multiple mesh ports": {
|
||||
ports: map[string]*pbcatalog.WorkloadPort{
|
||||
"p1": {Port: 1000, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
|
||||
"p2": {Port: 2000, Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
|
||||
"p3": {Port: 3000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
"p4": {Port: 4000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
exp: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, c := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
require.Equal(t, c.exp, IsMeshEnabled(c.ports))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type dataFetcherSuite struct {
|
||||
suite.Suite
|
||||
|
||||
ctx context.Context
|
||||
client pbresource.ResourceServiceClient
|
||||
rt controller.Runtime
|
||||
|
||||
api1Service *pbresource.Resource
|
||||
api2Service *pbresource.Resource
|
||||
api1ServiceEndpoints *pbresource.Resource
|
||||
api1ServiceEndpointsData *pbcatalog.ServiceEndpoints
|
||||
api2ServiceEndpoints *pbresource.Resource
|
||||
api2ServiceEndpointsData *pbcatalog.ServiceEndpoints
|
||||
webDestinations *pbresource.Resource
|
||||
webDestinationsData *pbmesh.Upstreams
|
||||
webProxy *pbresource.Resource
|
||||
webWorkload *pbresource.Resource
|
||||
}
|
||||
|
||||
func (suite *dataFetcherSuite) SetupTest() {
|
||||
suite.ctx = testutil.TestContext(suite.T())
|
||||
suite.client = svctest.RunResourceService(suite.T(), types.Register, catalog.RegisterTypes)
|
||||
suite.rt = controller.Runtime{
|
||||
Client: suite.client,
|
||||
Logger: testutil.Logger(suite.T()),
|
||||
}
|
||||
|
||||
suite.api1Service = resourcetest.Resource(catalog.ServiceType, "api-1").
|
||||
WithData(suite.T(), &pbcatalog.Service{}).
|
||||
Write(suite.T(), suite.client)
|
||||
|
||||
suite.api1ServiceEndpointsData = &pbcatalog.ServiceEndpoints{
|
||||
Endpoints: []*pbcatalog.Endpoint{
|
||||
{
|
||||
Addresses: []*pbcatalog.WorkloadAddress{{Host: "10.0.0.1"}},
|
||||
Ports: map[string]*pbcatalog.WorkloadPort{
|
||||
"tcp": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
|
||||
"mesh": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
Identity: "api-1-identity",
|
||||
},
|
||||
},
|
||||
}
|
||||
suite.api1ServiceEndpoints = resourcetest.Resource(catalog.ServiceEndpointsType, "api-1").
|
||||
WithData(suite.T(), suite.api1ServiceEndpointsData).Write(suite.T(), suite.client)
|
||||
|
||||
suite.api2Service = resourcetest.Resource(catalog.ServiceType, "api-2").
|
||||
WithData(suite.T(), &pbcatalog.Service{}).
|
||||
Write(suite.T(), suite.client)
|
||||
|
||||
suite.api2ServiceEndpointsData = &pbcatalog.ServiceEndpoints{
|
||||
Endpoints: []*pbcatalog.Endpoint{
|
||||
{
|
||||
Addresses: []*pbcatalog.WorkloadAddress{{Host: "10.0.0.2"}},
|
||||
Ports: map[string]*pbcatalog.WorkloadPort{
|
||||
"tcp1": {Port: 9080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
|
||||
"tcp2": {Port: 9081, Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
|
||||
"mesh": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
Identity: "api-2-identity",
|
||||
},
|
||||
},
|
||||
}
|
||||
suite.api2ServiceEndpoints = resourcetest.Resource(catalog.ServiceEndpointsType, "api-2").
|
||||
WithData(suite.T(), suite.api2ServiceEndpointsData).Write(suite.T(), suite.client)
|
||||
|
||||
suite.webDestinationsData = &pbmesh.Upstreams{
|
||||
Upstreams: []*pbmesh.Upstream{
|
||||
{
|
||||
DestinationRef: resource.Reference(suite.api1Service.Id, ""),
|
||||
DestinationPort: "tcp",
|
||||
},
|
||||
{
|
||||
DestinationRef: resource.Reference(suite.api2Service.Id, ""),
|
||||
DestinationPort: "tcp1",
|
||||
},
|
||||
{
|
||||
DestinationRef: resource.Reference(suite.api2Service.Id, ""),
|
||||
DestinationPort: "tcp2",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
suite.webDestinations = resourcetest.Resource(types.UpstreamsType, "web-destinations").
|
||||
WithData(suite.T(), suite.webDestinationsData).
|
||||
Write(suite.T(), suite.client)
|
||||
|
||||
suite.webProxy = resourcetest.Resource(types.ProxyStateTemplateType, "web-abc").
|
||||
WithData(suite.T(), &pbmesh.ProxyStateTemplate{}).
|
||||
Write(suite.T(), suite.client)
|
||||
|
||||
suite.webWorkload = resourcetest.Resource(catalog.WorkloadType, "web-abc").
|
||||
WithData(suite.T(), &pbcatalog.Workload{
|
||||
Addresses: []*pbcatalog.WorkloadAddress{{Host: "10.0.0.2"}},
|
||||
Ports: map[string]*pbcatalog.WorkloadPort{"tcp": {Port: 8081, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}},
|
||||
}).
|
||||
Write(suite.T(), suite.client)
|
||||
}
|
||||
|
||||
func (suite *dataFetcherSuite) TestFetcher_FetchWorkload_WorkloadNotFound() {
|
||||
// Test that when workload is not found, we remove it from cache.
|
||||
|
||||
proxyID := resourcetest.Resource(types.ProxyStateTemplateType, "service-workload-abc").ID()
|
||||
|
||||
// Create cache and pre-populate it.
|
||||
c := sidecarproxycache.New()
|
||||
dest1 := intermediate.CombinedDestinationRef{
|
||||
ServiceRef: resourcetest.Resource(catalog.ServiceType, "test-service-1").ReferenceNoSection(),
|
||||
Port: "tcp",
|
||||
ExplicitDestinationsID: resourcetest.Resource(types.UpstreamsType, "test-servicedestinations-1").ID(),
|
||||
SourceProxies: map[resource.ReferenceKey]struct{}{
|
||||
resource.NewReferenceKey(proxyID): {},
|
||||
},
|
||||
}
|
||||
dest2 := intermediate.CombinedDestinationRef{
|
||||
ServiceRef: resourcetest.Resource(catalog.ServiceType, "test-service-2").ReferenceNoSection(),
|
||||
Port: "tcp",
|
||||
ExplicitDestinationsID: resourcetest.Resource(types.UpstreamsType, "test-servicedestinations-2").ID(),
|
||||
SourceProxies: map[resource.ReferenceKey]struct{}{
|
||||
resource.NewReferenceKey(proxyID): {},
|
||||
},
|
||||
}
|
||||
c.WriteDestination(dest1)
|
||||
c.WriteDestination(dest2)
|
||||
|
||||
f := Fetcher{Cache: c, Client: suite.client}
|
||||
_, err := f.FetchWorkload(context.Background(), proxyID)
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
// Check that cache is updated to remove proxy id.
|
||||
require.Nil(suite.T(), c.DestinationsBySourceProxy(proxyID))
|
||||
}
|
||||
|
||||
func (suite *dataFetcherSuite) TestFetcher_NotFound() {
|
||||
// This test checks that we ignore not found errors for various types we need to fetch.
|
||||
|
||||
f := Fetcher{
|
||||
Client: suite.client,
|
||||
}
|
||||
|
||||
cases := map[string]struct {
|
||||
typ *pbresource.Type
|
||||
fetchFunc func(id *pbresource.ID) error
|
||||
}{
|
||||
"proxy state template": {
|
||||
typ: types.ProxyStateTemplateType,
|
||||
fetchFunc: func(id *pbresource.ID) error {
|
||||
_, err := f.FetchProxyStateTemplate(context.Background(), id)
|
||||
return err
|
||||
},
|
||||
},
|
||||
"service endpoints": {
|
||||
typ: catalog.ServiceEndpointsType,
|
||||
fetchFunc: func(id *pbresource.ID) error {
|
||||
_, err := f.FetchServiceEndpoints(context.Background(), id)
|
||||
return err
|
||||
},
|
||||
},
|
||||
"destinations": {
|
||||
typ: types.UpstreamsType,
|
||||
fetchFunc: func(id *pbresource.ID) error {
|
||||
_, err := f.FetchDestinations(context.Background(), id)
|
||||
return err
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, c := range cases {
|
||||
suite.T().Run(name, func(t *testing.T) {
|
||||
err := c.fetchFunc(resourcetest.Resource(c.typ, "not-found").ID())
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *dataFetcherSuite) TestFetcher_FetchErrors() {
|
||||
f := Fetcher{
|
||||
Client: suite.client,
|
||||
}
|
||||
|
||||
cases := map[string]struct {
|
||||
name string
|
||||
fetchFunc func(id *pbresource.ID) error
|
||||
}{
|
||||
"workload": {
|
||||
name: "web-abc",
|
||||
fetchFunc: func(id *pbresource.ID) error {
|
||||
_, err := f.FetchWorkload(context.Background(), id)
|
||||
return err
|
||||
},
|
||||
},
|
||||
"proxy state template": {
|
||||
name: "web-abc",
|
||||
fetchFunc: func(id *pbresource.ID) error {
|
||||
_, err := f.FetchProxyStateTemplate(context.Background(), id)
|
||||
return err
|
||||
},
|
||||
},
|
||||
"service endpoints": {
|
||||
name: "api-1",
|
||||
fetchFunc: func(id *pbresource.ID) error {
|
||||
_, err := f.FetchServiceEndpoints(context.Background(), id)
|
||||
return err
|
||||
},
|
||||
},
|
||||
"destinations": {
|
||||
name: "web-destinations",
|
||||
fetchFunc: func(id *pbresource.ID) error {
|
||||
_, err := f.FetchDestinations(context.Background(), id)
|
||||
return err
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, c := range cases {
|
||||
suite.T().Run(name+"-read", func(t *testing.T) {
|
||||
badType := &pbresource.Type{
|
||||
Group: "not",
|
||||
Kind: "found",
|
||||
GroupVersion: "vfake",
|
||||
}
|
||||
err := c.fetchFunc(resourcetest.Resource(badType, c.name).ID())
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.InvalidArgument, status.Code(err))
|
||||
})
|
||||
|
||||
suite.T().Run(name+"-unmarshal", func(t *testing.T) {
|
||||
// Create a dummy health checks type as it won't be any of the types mesh controller cares about
|
||||
resourcetest.Resource(catalog.HealthChecksType, c.name).
|
||||
WithData(suite.T(), &pbcatalog.HealthChecks{
|
||||
Workloads: &pbcatalog.WorkloadSelector{Names: []string{"web-abc"}},
|
||||
}).
|
||||
Write(suite.T(), suite.client)
|
||||
|
||||
err := c.fetchFunc(resourcetest.Resource(catalog.HealthChecksType, c.name).ID())
|
||||
require.Error(t, err)
|
||||
var parseErr resource.ErrDataParse
|
||||
require.ErrorAs(t, err, &parseErr)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *dataFetcherSuite) TestFetcher_FetchDestinationsData() {
|
||||
destination1 := intermediate.CombinedDestinationRef{
|
||||
ServiceRef: resource.Reference(suite.api1Service.Id, ""),
|
||||
Port: "tcp",
|
||||
ExplicitDestinationsID: suite.webDestinations.Id,
|
||||
SourceProxies: map[resource.ReferenceKey]struct{}{
|
||||
resource.NewReferenceKey(suite.webProxy.Id): {},
|
||||
},
|
||||
}
|
||||
destination2 := intermediate.CombinedDestinationRef{
|
||||
ServiceRef: resource.Reference(suite.api2Service.Id, ""),
|
||||
Port: "tcp1",
|
||||
ExplicitDestinationsID: suite.webDestinations.Id,
|
||||
SourceProxies: map[resource.ReferenceKey]struct{}{
|
||||
resource.NewReferenceKey(suite.webProxy.Id): {},
|
||||
},
|
||||
}
|
||||
destination3 := intermediate.CombinedDestinationRef{
|
||||
ServiceRef: resource.Reference(suite.api2Service.Id, ""),
|
||||
Port: "tcp2",
|
||||
ExplicitDestinationsID: suite.webDestinations.Id,
|
||||
SourceProxies: map[resource.ReferenceKey]struct{}{
|
||||
resource.NewReferenceKey(suite.webProxy.Id): {},
|
||||
},
|
||||
}
|
||||
|
||||
c := sidecarproxycache.New()
|
||||
c.WriteDestination(destination1)
|
||||
c.WriteDestination(destination2)
|
||||
c.WriteDestination(destination3)
|
||||
|
||||
f := Fetcher{
|
||||
Cache: c,
|
||||
Client: suite.client,
|
||||
}
|
||||
|
||||
suite.T().Run("destinations not found", func(t *testing.T) {
|
||||
destinationRefNoDestinations := intermediate.CombinedDestinationRef{
|
||||
ServiceRef: resource.Reference(suite.api1Service.Id, ""),
|
||||
Port: "tcp",
|
||||
ExplicitDestinationsID: resourcetest.Resource(types.UpstreamsType, "not-found").ID(),
|
||||
SourceProxies: map[resource.ReferenceKey]struct{}{
|
||||
resource.NewReferenceKey(suite.webProxy.Id): {},
|
||||
},
|
||||
}
|
||||
c.WriteDestination(destinationRefNoDestinations)
|
||||
|
||||
destinationRefs := []intermediate.CombinedDestinationRef{destinationRefNoDestinations}
|
||||
destinations, _, err := f.FetchDestinationsData(suite.ctx, destinationRefs)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, destinations)
|
||||
_, foundDest := c.ReadDestination(destinationRefNoDestinations.ServiceRef, destinationRefNoDestinations.Port)
|
||||
require.False(t, foundDest)
|
||||
})
|
||||
|
||||
suite.T().Run("service endpoints not found", func(t *testing.T) {
|
||||
notFoundServiceRef := resourcetest.Resource(catalog.ServiceType, "not-found").
|
||||
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||
ReferenceNoSection()
|
||||
destinationNoServiceEndpoints := intermediate.CombinedDestinationRef{
|
||||
ServiceRef: notFoundServiceRef,
|
||||
Port: "tcp",
|
||||
ExplicitDestinationsID: suite.webDestinations.Id,
|
||||
SourceProxies: map[resource.ReferenceKey]struct{}{
|
||||
resource.NewReferenceKey(suite.webProxy.Id): {},
|
||||
},
|
||||
}
|
||||
c.WriteDestination(destinationNoServiceEndpoints)
|
||||
|
||||
destinationRefs := []intermediate.CombinedDestinationRef{destinationNoServiceEndpoints}
|
||||
destinations, statuses, err := f.FetchDestinationsData(suite.ctx, destinationRefs)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, destinations)
|
||||
|
||||
destinationRef := resource.IDToString(destinationNoServiceEndpoints.ExplicitDestinationsID)
|
||||
serviceRef := resource.ReferenceToString(destinationNoServiceEndpoints.ServiceRef)
|
||||
|
||||
require.Len(t, statuses[destinationRef].Conditions, 1)
|
||||
require.Equal(t, statuses[destinationRef].Conditions[0],
|
||||
meshStatus.ConditionDestinationServiceNotFound(serviceRef))
|
||||
|
||||
_, foundDest := c.ReadDestination(destinationNoServiceEndpoints.ServiceRef, destinationNoServiceEndpoints.Port)
|
||||
require.True(t, foundDest)
|
||||
})
|
||||
|
||||
suite.T().Run("service endpoints not on mesh", func(t *testing.T) {
|
||||
apiNonMeshServiceEndpointsData := &pbcatalog.ServiceEndpoints{
|
||||
Endpoints: []*pbcatalog.Endpoint{
|
||||
{
|
||||
Addresses: []*pbcatalog.WorkloadAddress{{Host: "10.0.0.1"}},
|
||||
Ports: map[string]*pbcatalog.WorkloadPort{
|
||||
"tcp": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
|
||||
},
|
||||
Identity: "api-1-identity",
|
||||
},
|
||||
},
|
||||
}
|
||||
apiNonMeshServiceEndpoints := resourcetest.Resource(catalog.ServiceEndpointsType, "api-1").
|
||||
WithData(suite.T(), apiNonMeshServiceEndpointsData).Write(suite.T(), suite.client)
|
||||
destinationNonMeshServiceEndpoints := intermediate.CombinedDestinationRef{
|
||||
ServiceRef: resource.Reference(apiNonMeshServiceEndpoints.Owner, ""),
|
||||
Port: "tcp",
|
||||
ExplicitDestinationsID: suite.webDestinations.Id,
|
||||
SourceProxies: map[resource.ReferenceKey]struct{}{
|
||||
resource.NewReferenceKey(suite.webProxy.Id): {},
|
||||
},
|
||||
}
|
||||
c.WriteDestination(destinationNonMeshServiceEndpoints)
|
||||
|
||||
destinationRefs := []intermediate.CombinedDestinationRef{destinationNonMeshServiceEndpoints}
|
||||
destinations, statuses, err := f.FetchDestinationsData(suite.ctx, destinationRefs)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, destinations)
|
||||
|
||||
destinationRef := resource.IDToString(destinationNonMeshServiceEndpoints.ExplicitDestinationsID)
|
||||
serviceRef := resource.ReferenceToString(destinationNonMeshServiceEndpoints.ServiceRef)
|
||||
|
||||
require.Len(t, statuses[destinationRef].Conditions, 2)
|
||||
prototest.AssertElementsMatch(t, statuses[destinationRef].Conditions,
|
||||
[]*pbresource.Condition{
|
||||
meshStatus.ConditionDestinationServiceFound(serviceRef),
|
||||
meshStatus.ConditionMeshProtocolNotFound(serviceRef),
|
||||
})
|
||||
|
||||
_, foundDest := c.ReadDestination(destinationNonMeshServiceEndpoints.ServiceRef, destinationNonMeshServiceEndpoints.Port)
|
||||
require.True(t, foundDest)
|
||||
})
|
||||
|
||||
suite.T().Run("invalid destinations: destination is not on the mesh", func(t *testing.T) {
|
||||
// Update api1 to no longer be on the mesh.
|
||||
suite.api1ServiceEndpoints = resourcetest.Resource(catalog.ServiceEndpointsType, "api-1").
|
||||
WithData(suite.T(), &pbcatalog.ServiceEndpoints{
|
||||
Endpoints: []*pbcatalog.Endpoint{
|
||||
{
|
||||
Addresses: []*pbcatalog.WorkloadAddress{{Host: "10.0.0.1"}},
|
||||
Ports: map[string]*pbcatalog.WorkloadPort{
|
||||
"tcp": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
|
||||
},
|
||||
Identity: "api-1-identity",
|
||||
},
|
||||
},
|
||||
}).Write(suite.T(), suite.client)
|
||||
|
||||
destinationRefs := []intermediate.CombinedDestinationRef{destination1}
|
||||
|
||||
destinations, statuses, err := f.FetchDestinationsData(suite.ctx, destinationRefs)
|
||||
serviceRef := resource.ReferenceToString(destination1.ServiceRef)
|
||||
destinationRef := resource.IDToString(destination1.ExplicitDestinationsID)
|
||||
expectedStatus := &intermediate.Status{
|
||||
ID: suite.webDestinations.Id,
|
||||
Generation: suite.webDestinations.Generation,
|
||||
Conditions: []*pbresource.Condition{
|
||||
meshStatus.ConditionDestinationServiceFound(serviceRef),
|
||||
meshStatus.ConditionMeshProtocolNotFound(serviceRef),
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that the status is generated correctly.
|
||||
prototest.AssertDeepEqual(t, expectedStatus, statuses[destinationRef])
|
||||
|
||||
// Check that we didn't return any destinations.
|
||||
require.Nil(t, destinations)
|
||||
|
||||
// Check that destination service is still in cache because it's still referenced from the pbmesh.Upstreams
|
||||
// resource.
|
||||
_, foundDest := c.ReadDestination(destination1.ServiceRef, destination1.Port)
|
||||
require.True(t, foundDest)
|
||||
|
||||
// Update the endpoints to be mesh enabled again and check that the status is now valid.
|
||||
suite.api1ServiceEndpoints = resourcetest.Resource(catalog.ServiceEndpointsType, "api-1").
|
||||
WithData(suite.T(), suite.api1ServiceEndpointsData).Write(suite.T(), suite.client)
|
||||
expectedStatus = &intermediate.Status{
|
||||
ID: suite.webDestinations.Id,
|
||||
Generation: suite.webDestinations.Generation,
|
||||
Conditions: []*pbresource.Condition{
|
||||
meshStatus.ConditionDestinationServiceFound(serviceRef),
|
||||
meshStatus.ConditionMeshProtocolFound(serviceRef),
|
||||
meshStatus.ConditionNonMeshProtocolDestinationPort(serviceRef, destination1.Port),
|
||||
},
|
||||
}
|
||||
|
||||
_, statuses, err = f.FetchDestinationsData(suite.ctx, destinationRefs)
|
||||
require.NoError(t, err)
|
||||
prototest.AssertDeepEqual(t, expectedStatus, statuses[destinationRef])
|
||||
})
|
||||
|
||||
suite.T().Run("invalid destinations: destination is pointing to a mesh port", func(t *testing.T) {
|
||||
// Create a destination pointing to the mesh port.
|
||||
destinationMeshDestinationPort := intermediate.CombinedDestinationRef{
|
||||
ServiceRef: resource.Reference(suite.api1Service.Id, ""),
|
||||
Port: "mesh",
|
||||
ExplicitDestinationsID: suite.webDestinations.Id,
|
||||
SourceProxies: map[resource.ReferenceKey]struct{}{
|
||||
resource.NewReferenceKey(suite.webProxy.Id): {},
|
||||
},
|
||||
}
|
||||
c.WriteDestination(destinationMeshDestinationPort)
|
||||
destinationRefs := []intermediate.CombinedDestinationRef{destinationMeshDestinationPort}
|
||||
|
||||
destinations, statuses, err := f.FetchDestinationsData(suite.ctx, destinationRefs)
|
||||
serviceRef := resource.ReferenceToString(destination1.ServiceRef)
|
||||
destinationRef := resource.IDToString(destination1.ExplicitDestinationsID)
|
||||
expectedStatus := &intermediate.Status{
|
||||
ID: suite.webDestinations.Id,
|
||||
Generation: suite.webDestinations.Generation,
|
||||
Conditions: []*pbresource.Condition{
|
||||
meshStatus.ConditionDestinationServiceFound(serviceRef),
|
||||
meshStatus.ConditionMeshProtocolFound(serviceRef),
|
||||
meshStatus.ConditionMeshProtocolDestinationPort(serviceRef, destinationMeshDestinationPort.Port),
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that the status is generated correctly.
|
||||
prototest.AssertDeepEqual(t, expectedStatus, statuses[destinationRef])
|
||||
|
||||
// Check that we didn't return any destinations.
|
||||
require.Nil(t, destinations)
|
||||
|
||||
// Check that destination service is still in cache because it's still referenced from the pbmesh.Upstreams
|
||||
// resource.
|
||||
_, foundDest := c.ReadDestination(destinationMeshDestinationPort.ServiceRef, destinationMeshDestinationPort.Port)
|
||||
require.True(t, foundDest)
|
||||
|
||||
// Update the destination to point to a non-mesh port and check that the status is now updated.
|
||||
destinationRefs[0].Port = "tcp"
|
||||
c.WriteDestination(destinationMeshDestinationPort)
|
||||
expectedStatus = &intermediate.Status{
|
||||
ID: suite.webDestinations.Id,
|
||||
Generation: suite.webDestinations.Generation,
|
||||
Conditions: []*pbresource.Condition{
|
||||
meshStatus.ConditionDestinationServiceFound(serviceRef),
|
||||
meshStatus.ConditionMeshProtocolFound(serviceRef),
|
||||
meshStatus.ConditionNonMeshProtocolDestinationPort(serviceRef, destinationRefs[0].Port),
|
||||
},
|
||||
}
|
||||
|
||||
_, statuses, err = f.FetchDestinationsData(suite.ctx, destinationRefs)
|
||||
require.NoError(t, err)
|
||||
prototest.AssertDeepEqual(t, expectedStatus, statuses[destinationRef])
|
||||
})
|
||||
|
||||
suite.T().Run("happy path", func(t *testing.T) {
|
||||
destinationRefs := []intermediate.CombinedDestinationRef{destination1, destination2, destination3}
|
||||
expectedDestinations := []*intermediate.Destination{
|
||||
{
|
||||
Explicit: suite.webDestinationsData.Upstreams[0],
|
||||
ServiceEndpoints: &intermediate.ServiceEndpoints{
|
||||
Resource: suite.api1ServiceEndpoints,
|
||||
Endpoints: suite.api1ServiceEndpointsData,
|
||||
},
|
||||
Identities: []*pbresource.Reference{
|
||||
{
|
||||
Name: "api-1-identity",
|
||||
Tenancy: suite.api1Service.Id.Tenancy,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Explicit: suite.webDestinationsData.Upstreams[1],
|
||||
ServiceEndpoints: &intermediate.ServiceEndpoints{
|
||||
Resource: suite.api2ServiceEndpoints,
|
||||
Endpoints: suite.api2ServiceEndpointsData,
|
||||
},
|
||||
Identities: []*pbresource.Reference{
|
||||
{
|
||||
Name: "api-2-identity",
|
||||
Tenancy: suite.api2Service.Id.Tenancy,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Explicit: suite.webDestinationsData.Upstreams[2],
|
||||
ServiceEndpoints: &intermediate.ServiceEndpoints{
|
||||
Resource: suite.api2ServiceEndpoints,
|
||||
Endpoints: suite.api2ServiceEndpointsData,
|
||||
},
|
||||
Identities: []*pbresource.Reference{
|
||||
{
|
||||
Name: "api-2-identity",
|
||||
Tenancy: suite.api2Service.Id.Tenancy,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
var expectedConditions []*pbresource.Condition
|
||||
for _, d := range destinationRefs {
|
||||
ref := resource.ReferenceToString(d.ServiceRef)
|
||||
expectedConditions = append(expectedConditions,
|
||||
meshStatus.ConditionDestinationServiceFound(ref),
|
||||
meshStatus.ConditionMeshProtocolFound(ref),
|
||||
meshStatus.ConditionNonMeshProtocolDestinationPort(ref, d.Port))
|
||||
}
|
||||
|
||||
actualDestinations, statuses, err := f.FetchDestinationsData(suite.ctx, destinationRefs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that all statuses have "happy" conditions.
|
||||
dref := resource.IDToString(destination1.ExplicitDestinationsID)
|
||||
prototest.AssertElementsMatch(t, expectedConditions, statuses[dref].Conditions)
|
||||
|
||||
// Check that we've computed expanded destinations correctly.
|
||||
prototest.AssertElementsMatch(t, expectedDestinations, actualDestinations)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDataFetcher(t *testing.T) {
|
||||
suite.Run(t, new(dataFetcherSuite))
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
package status
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
const (
|
||||
StatusConditionDestinationAccepted = "DestinationAccepted"
|
||||
|
||||
StatusReasonMeshProtocolNotFound = "MeshPortProtocolNotFound"
|
||||
StatusReasonMeshProtocolFound = "MeshPortProtocolFound"
|
||||
|
||||
StatusReasonMeshProtocolDestinationPort = "DestinationWithMeshPortProtocol"
|
||||
StatusReasonNonMeshProtocolDestinationPort = "DestinationWithNonMeshPortProtocol"
|
||||
|
||||
StatusReasonDestinationServiceNotFound = "ServiceNotFound"
|
||||
StatusReasonDestinationServiceFound = "ServiceFound"
|
||||
)
|
||||
|
||||
func ConditionMeshProtocolNotFound(serviceRef string) *pbresource.Condition {
|
||||
return &pbresource.Condition{
|
||||
Type: StatusConditionDestinationAccepted,
|
||||
State: pbresource.Condition_STATE_FALSE,
|
||||
Reason: StatusReasonMeshProtocolNotFound,
|
||||
Message: fmt.Sprintf("service %q cannot be referenced as a Destination because it's not mesh-enabled.", serviceRef),
|
||||
}
|
||||
}
|
||||
|
||||
func ConditionMeshProtocolFound(serviceRef string) *pbresource.Condition {
|
||||
return &pbresource.Condition{
|
||||
Type: StatusConditionDestinationAccepted,
|
||||
State: pbresource.Condition_STATE_TRUE,
|
||||
Reason: StatusReasonMeshProtocolFound,
|
||||
Message: fmt.Sprintf("service %q is on the mesh.", serviceRef),
|
||||
}
|
||||
}
|
||||
|
||||
func ConditionDestinationServiceNotFound(serviceRef string) *pbresource.Condition {
|
||||
return &pbresource.Condition{
|
||||
Type: StatusConditionDestinationAccepted,
|
||||
State: pbresource.Condition_STATE_FALSE,
|
||||
Reason: StatusReasonDestinationServiceNotFound,
|
||||
Message: fmt.Sprintf("service %q does not exist.", serviceRef),
|
||||
}
|
||||
}
|
||||
|
||||
func ConditionDestinationServiceFound(serviceRef string) *pbresource.Condition {
|
||||
return &pbresource.Condition{
|
||||
Type: StatusConditionDestinationAccepted,
|
||||
State: pbresource.Condition_STATE_TRUE,
|
||||
Reason: StatusReasonDestinationServiceFound,
|
||||
Message: fmt.Sprintf("service %q exists.", serviceRef),
|
||||
}
|
||||
}
|
||||
|
||||
func ConditionMeshProtocolDestinationPort(serviceRef, port string) *pbresource.Condition {
|
||||
return &pbresource.Condition{
|
||||
Type: StatusConditionDestinationAccepted,
|
||||
State: pbresource.Condition_STATE_FALSE,
|
||||
Reason: StatusReasonMeshProtocolDestinationPort,
|
||||
Message: fmt.Sprintf("destination port %q for service %q has PROTOCOL_MESH which is unsupported for destination services", port, serviceRef),
|
||||
}
|
||||
}
|
||||
|
||||
func ConditionNonMeshProtocolDestinationPort(serviceRef, port string) *pbresource.Condition {
|
||||
return &pbresource.Condition{
|
||||
Type: StatusConditionDestinationAccepted,
|
||||
State: pbresource.Condition_STATE_TRUE,
|
||||
Reason: StatusReasonNonMeshProtocolDestinationPort,
|
||||
Message: fmt.Sprintf("destination port %q for service %q has a non-mesh protocol", port, serviceRef),
|
||||
}
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
package sidecarproxymapper
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/consul/internal/catalog"
|
||||
"github.com/hashicorp/consul/internal/controller"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/types/intermediate"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
func (m *Mapper) MapDestinationsToProxyStateTemplate(ctx context.Context, rt controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) {
|
||||
var destinations pbmesh.Upstreams
|
||||
err := res.Data.UnmarshalTo(&destinations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Look up workloads for this destinations.
|
||||
sourceProxyIDs := make(map[resource.ReferenceKey]struct{})
|
||||
var result []controller.Request
|
||||
for _, prefix := range destinations.Workloads.Prefixes {
|
||||
resp, err := rt.Client.List(ctx, &pbresource.ListRequest{
|
||||
Type: catalog.WorkloadType,
|
||||
Tenancy: res.Id.Tenancy,
|
||||
NamePrefix: prefix,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, r := range resp.Resources {
|
||||
proxyID := resource.ReplaceType(types.ProxyStateTemplateType, r.Id)
|
||||
sourceProxyIDs[resource.NewReferenceKey(proxyID)] = struct{}{}
|
||||
result = append(result, controller.Request{
|
||||
ID: proxyID,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for _, name := range destinations.Workloads.Names {
|
||||
proxyID := &pbresource.ID{
|
||||
Name: name,
|
||||
Tenancy: res.Id.Tenancy,
|
||||
Type: types.ProxyStateTemplateType,
|
||||
}
|
||||
sourceProxyIDs[resource.NewReferenceKey(proxyID)] = struct{}{}
|
||||
result = append(result, controller.Request{
|
||||
ID: proxyID,
|
||||
})
|
||||
}
|
||||
|
||||
// Add this destination to cache.
|
||||
for _, destination := range destinations.Upstreams {
|
||||
destinationRef := intermediate.CombinedDestinationRef{
|
||||
ServiceRef: destination.DestinationRef,
|
||||
Port: destination.DestinationPort,
|
||||
ExplicitDestinationsID: res.Id,
|
||||
SourceProxies: sourceProxyIDs,
|
||||
}
|
||||
m.cache.WriteDestination(destinationRef)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
|
@ -0,0 +1,99 @@
|
|||
package sidecarproxymapper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
|
||||
"github.com/hashicorp/consul/internal/catalog"
|
||||
"github.com/hashicorp/consul/internal/controller"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/types/intermediate"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
"github.com/hashicorp/consul/internal/resource/resourcetest"
|
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1"
|
||||
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1"
|
||||
"github.com/hashicorp/consul/proto/private/prototest"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMapDestinationsToProxyStateTemplate(t *testing.T) {
|
||||
client := svctest.RunResourceService(t, types.Register, catalog.RegisterTypes)
|
||||
webWorkload1 := resourcetest.Resource(catalog.WorkloadType, "web-abc").
|
||||
WithData(t, &pbcatalog.Workload{
|
||||
Addresses: []*pbcatalog.WorkloadAddress{{Host: "10.0.0.1"}},
|
||||
Ports: map[string]*pbcatalog.WorkloadPort{"tcp": {Port: 8081, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}},
|
||||
}).
|
||||
Write(t, client)
|
||||
webWorkload2 := resourcetest.Resource(catalog.WorkloadType, "web-def").
|
||||
WithData(t, &pbcatalog.Workload{
|
||||
Addresses: []*pbcatalog.WorkloadAddress{{Host: "10.0.0.2"}},
|
||||
Ports: map[string]*pbcatalog.WorkloadPort{"tcp": {Port: 8081, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}},
|
||||
}).
|
||||
Write(t, client)
|
||||
webWorkload3 := resourcetest.Resource(catalog.WorkloadType, "non-prefix-web").
|
||||
WithData(t, &pbcatalog.Workload{
|
||||
Addresses: []*pbcatalog.WorkloadAddress{{Host: "10.0.0.3"}},
|
||||
Ports: map[string]*pbcatalog.WorkloadPort{"tcp": {Port: 8081, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}},
|
||||
}).
|
||||
Write(t, client)
|
||||
|
||||
webDestinationsData := &pbmesh.Upstreams{
|
||||
Workloads: &pbcatalog.WorkloadSelector{
|
||||
Names: []string{"non-prefix-web"},
|
||||
Prefixes: []string{"web"},
|
||||
},
|
||||
Upstreams: []*pbmesh.Upstream{
|
||||
{
|
||||
DestinationRef: resourcetest.Resource(catalog.ServiceType, "api-1").ReferenceNoSection(),
|
||||
DestinationPort: "tcp",
|
||||
},
|
||||
{
|
||||
DestinationRef: resourcetest.Resource(catalog.ServiceType, "api-2").ReferenceNoSection(),
|
||||
DestinationPort: "tcp1",
|
||||
},
|
||||
{
|
||||
DestinationRef: resourcetest.Resource(catalog.ServiceType, "api-2").ReferenceNoSection(),
|
||||
DestinationPort: "tcp2",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
webDestinations := resourcetest.Resource(types.UpstreamsType, "web-destinations").
|
||||
WithData(t, webDestinationsData).
|
||||
Write(t, client)
|
||||
|
||||
c := sidecarproxycache.New()
|
||||
mapper := &Mapper{cache: c}
|
||||
|
||||
expRequests := []controller.Request{
|
||||
{ID: resource.ReplaceType(types.ProxyStateTemplateType, webWorkload1.Id)},
|
||||
{ID: resource.ReplaceType(types.ProxyStateTemplateType, webWorkload2.Id)},
|
||||
{ID: resource.ReplaceType(types.ProxyStateTemplateType, webWorkload3.Id)},
|
||||
}
|
||||
|
||||
requests, err := mapper.MapDestinationsToProxyStateTemplate(context.Background(), controller.Runtime{Client: client}, webDestinations)
|
||||
require.NoError(t, err)
|
||||
prototest.AssertElementsMatch(t, expRequests, requests)
|
||||
|
||||
//var expDestinations []*intermediate.CombinedDestinationRef
|
||||
proxy1ID := resourcetest.Resource(types.ProxyStateTemplateType, webWorkload1.Id.Name).ID()
|
||||
proxy2ID := resourcetest.Resource(types.ProxyStateTemplateType, webWorkload2.Id.Name).ID()
|
||||
proxy3ID := resourcetest.Resource(types.ProxyStateTemplateType, webWorkload3.Id.Name).ID()
|
||||
for _, u := range webDestinationsData.Upstreams {
|
||||
expDestination := intermediate.CombinedDestinationRef{
|
||||
ServiceRef: u.DestinationRef,
|
||||
Port: u.DestinationPort,
|
||||
ExplicitDestinationsID: webDestinations.Id,
|
||||
SourceProxies: map[resource.ReferenceKey]struct{}{
|
||||
resource.NewReferenceKey(proxy1ID): {},
|
||||
resource.NewReferenceKey(proxy2ID): {},
|
||||
resource.NewReferenceKey(proxy3ID): {},
|
||||
},
|
||||
}
|
||||
actualDestination, found := c.ReadDestination(u.DestinationRef, u.DestinationPort)
|
||||
require.True(t, found)
|
||||
prototest.AssertDeepEqual(t, expDestination, actualDestination)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
package sidecarproxymapper
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/consul/internal/catalog"
|
||||
"github.com/hashicorp/consul/internal/controller"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
type Mapper struct {
|
||||
cache *sidecarproxycache.Cache
|
||||
}
|
||||
|
||||
func New(c *sidecarproxycache.Cache) *Mapper {
|
||||
return &Mapper{
|
||||
cache: c,
|
||||
}
|
||||
}
|
||||
|
||||
// MapServiceEndpointsToProxyStateTemplate maps catalog.ServiceEndpoints objects to the IDs of
|
||||
// ProxyStateTemplate.
|
||||
func (m *Mapper) MapServiceEndpointsToProxyStateTemplate(_ context.Context, _ controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) {
|
||||
// This mapper needs to look up workload IDs from service endpoints and replace them with ProxyStateTemplate type.
|
||||
var serviceEndpoints pbcatalog.ServiceEndpoints
|
||||
err := res.Data.UnmarshalTo(&serviceEndpoints)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var result []controller.Request
|
||||
|
||||
// First, we need to generate requests from workloads this "endpoints" points to
|
||||
// so that we can re-generate proxy state for the sidecar proxy.
|
||||
for _, endpoint := range serviceEndpoints.Endpoints {
|
||||
// Convert the reference to a workload to a ProxyStateTemplate ID.
|
||||
// Because these resources are name and tenancy aligned, we only need to change the type.
|
||||
|
||||
// Skip service endpoints without target refs. These resources would typically be created for
|
||||
// services external to Consul, and we don't need to reconcile those as they don't have
|
||||
// associated workloads.
|
||||
if endpoint.TargetRef != nil {
|
||||
id := &pbresource.ID{
|
||||
Name: endpoint.TargetRef.Name,
|
||||
Tenancy: endpoint.TargetRef.Tenancy,
|
||||
Type: types.ProxyStateTemplateType,
|
||||
}
|
||||
result = append(result, controller.Request{
|
||||
ID: id,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Look up any source proxies for this service and generate updates.
|
||||
serviceID := resource.ReplaceType(catalog.ServiceType, res.Id)
|
||||
|
||||
// Second, we need to generate requests for any proxies where this service is a destination.
|
||||
if len(serviceEndpoints.Endpoints) > 0 {
|
||||
// All port names in the endpoints object should be the same as we filter out to ports that are selected
|
||||
// by the service, and so it's sufficient to check just the first endpoint.
|
||||
for portName, port := range serviceEndpoints.Endpoints[0].Ports {
|
||||
if port.Protocol == pbcatalog.Protocol_PROTOCOL_MESH {
|
||||
// Skip mesh ports. These should never be used as destination ports.
|
||||
continue
|
||||
}
|
||||
serviceRef := resource.Reference(serviceID, "")
|
||||
if destination, ok := m.cache.ReadDestination(serviceRef, portName); ok {
|
||||
for refKey := range destination.SourceProxies {
|
||||
result = append(result, controller.Request{ID: refKey.ToID()})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result, err
|
||||
}
|
|
@ -0,0 +1,101 @@
|
|||
package sidecarproxymapper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/internal/catalog"
|
||||
"github.com/hashicorp/consul/internal/controller"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
||||
"github.com/hashicorp/consul/internal/mesh/internal/types/intermediate"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
"github.com/hashicorp/consul/internal/resource/resourcetest"
|
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1"
|
||||
"github.com/hashicorp/consul/proto/private/prototest"
|
||||
)
|
||||
|
||||
func TestMapServiceEndpointsToProxyStateTemplate(t *testing.T) {
|
||||
workload1 := resourcetest.Resource(catalog.WorkloadType, "workload-1").
|
||||
WithTenancy(resource.DefaultNamespacedTenancy()).Build()
|
||||
workload2 := resourcetest.Resource(catalog.WorkloadType, "workload-2").
|
||||
WithTenancy(resource.DefaultNamespacedTenancy()).Build()
|
||||
serviceEndpoints := resourcetest.Resource(catalog.ServiceEndpointsType, "service").
|
||||
WithData(t, &pbcatalog.ServiceEndpoints{
|
||||
Endpoints: []*pbcatalog.Endpoint{
|
||||
{
|
||||
TargetRef: workload1.Id,
|
||||
Ports: map[string]*pbcatalog.WorkloadPort{
|
||||
"tcp1": {Port: 8080},
|
||||
"tcp2": {Port: 8081},
|
||||
"mesh": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
},
|
||||
{
|
||||
TargetRef: workload2.Id,
|
||||
Ports: map[string]*pbcatalog.WorkloadPort{
|
||||
"tcp1": {Port: 8080},
|
||||
"tcp2": {Port: 8081},
|
||||
"mesh": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
},
|
||||
},
|
||||
}).
|
||||
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||
Build()
|
||||
proxyTmpl1ID := resourcetest.Resource(types.ProxyStateTemplateType, "workload-1").
|
||||
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
|
||||
proxyTmpl2ID := resourcetest.Resource(types.ProxyStateTemplateType, "workload-2").
|
||||
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
|
||||
|
||||
c := sidecarproxycache.New()
|
||||
mapper := &Mapper{cache: c}
|
||||
sourceProxy1 := resourcetest.Resource(types.ProxyStateTemplateType, "workload-3").
|
||||
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
|
||||
sourceProxy2 := resourcetest.Resource(types.ProxyStateTemplateType, "workload-4").
|
||||
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
|
||||
sourceProxy3 := resourcetest.Resource(types.ProxyStateTemplateType, "workload-5").
|
||||
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
|
||||
destination1 := intermediate.CombinedDestinationRef{
|
||||
ServiceRef: resourcetest.Resource(catalog.ServiceType, "service").ReferenceNoSection(),
|
||||
Port: "tcp1",
|
||||
SourceProxies: map[resource.ReferenceKey]struct{}{
|
||||
resource.NewReferenceKey(sourceProxy1): {},
|
||||
resource.NewReferenceKey(sourceProxy2): {},
|
||||
},
|
||||
}
|
||||
destination2 := intermediate.CombinedDestinationRef{
|
||||
ServiceRef: resourcetest.Resource(catalog.ServiceType, "service").ReferenceNoSection(),
|
||||
Port: "tcp2",
|
||||
SourceProxies: map[resource.ReferenceKey]struct{}{
|
||||
resource.NewReferenceKey(sourceProxy1): {},
|
||||
resource.NewReferenceKey(sourceProxy3): {},
|
||||
},
|
||||
}
|
||||
destination3 := intermediate.CombinedDestinationRef{
|
||||
ServiceRef: resourcetest.Resource(catalog.ServiceType, "service").ReferenceNoSection(),
|
||||
Port: "mesh",
|
||||
SourceProxies: map[resource.ReferenceKey]struct{}{
|
||||
resource.NewReferenceKey(sourceProxy1): {},
|
||||
resource.NewReferenceKey(sourceProxy3): {},
|
||||
},
|
||||
}
|
||||
c.WriteDestination(destination1)
|
||||
c.WriteDestination(destination2)
|
||||
c.WriteDestination(destination3)
|
||||
|
||||
expRequests := []controller.Request{
|
||||
{ID: proxyTmpl1ID},
|
||||
{ID: proxyTmpl2ID},
|
||||
{ID: sourceProxy1},
|
||||
{ID: sourceProxy2},
|
||||
{ID: sourceProxy1},
|
||||
{ID: sourceProxy3},
|
||||
}
|
||||
|
||||
requests, err := mapper.MapServiceEndpointsToProxyStateTemplate(context.Background(), controller.Runtime{}, serviceEndpoints)
|
||||
require.NoError(t, err)
|
||||
prototest.AssertElementsMatch(t, expRequests, requests)
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
package intermediate
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1"
|
||||
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
// CombinedDestinationRef contains all references we need for a specific
|
||||
// destination on the mesh.
|
||||
type CombinedDestinationRef struct {
|
||||
// ServiceRef is the reference to the destination service.
|
||||
ServiceRef *pbresource.Reference
|
||||
|
||||
// Port is the port name for this destination.
|
||||
Port string
|
||||
|
||||
// SourceProxies are the reference keys of source proxy state template resources.
|
||||
SourceProxies map[resource.ReferenceKey]struct{}
|
||||
|
||||
// ExplicitDestinationsID is the id of the pbmesh.Upstreams resource. For implicit destinations,
|
||||
// this should be nil.
|
||||
ExplicitDestinationsID *pbresource.ID
|
||||
}
|
||||
|
||||
type ServiceEndpoints struct {
|
||||
Resource *pbresource.Resource
|
||||
Endpoints *pbcatalog.ServiceEndpoints
|
||||
}
|
||||
|
||||
type Destinations struct {
|
||||
Resource *pbresource.Resource
|
||||
Destinations *pbmesh.Upstreams
|
||||
}
|
||||
|
||||
type Workload struct {
|
||||
Resource *pbresource.Resource
|
||||
Workload *pbcatalog.Workload
|
||||
}
|
||||
|
||||
type ProxyStateTemplate struct {
|
||||
Resource *pbresource.Resource
|
||||
Tmpl *pbmesh.ProxyStateTemplate
|
||||
}
|
||||
|
||||
type ProxyConfiguration struct {
|
||||
Resource *pbresource.Resource
|
||||
Cfg *pbmesh.ProxyConfiguration
|
||||
}
|
||||
|
||||
type Destination struct {
|
||||
Explicit *pbmesh.Upstream
|
||||
ServiceEndpoints *ServiceEndpoints
|
||||
Identities []*pbresource.Reference
|
||||
}
|
||||
|
||||
type Status struct {
|
||||
ID *pbresource.ID
|
||||
Generation string
|
||||
Conditions []*pbresource.Condition
|
||||
OldStatus map[string]*pbresource.Status
|
||||
}
|
|
@ -272,6 +272,8 @@ func (m *Mapper) MapLink(_ context.Context, _ controller.Runtime, res *pbresourc
|
|||
}
|
||||
|
||||
func (m *Mapper) itemIDsByLink(link resource.ReferenceKey) []*pbresource.ID {
|
||||
// a lock must be held both to read item from the map and to read the
|
||||
// the returned items.
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
|
||||
|
@ -288,6 +290,8 @@ func (m *Mapper) itemIDsByLink(link resource.ReferenceKey) []*pbresource.ID {
|
|||
}
|
||||
|
||||
func (m *Mapper) itemRefsByLink(link resource.ReferenceKey) []*pbresource.Reference {
|
||||
// a lock must be held both to read item from the map and to read the
|
||||
// the returned items.
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
|
||||
|
|
|
@ -37,3 +37,11 @@ var (
|
|||
_ ReferenceOrID = (*pbresource.ID)(nil)
|
||||
_ ReferenceOrID = (*pbresource.Reference)(nil)
|
||||
)
|
||||
|
||||
func ReplaceType(typ *pbresource.Type, id *pbresource.ID) *pbresource.ID {
|
||||
return &pbresource.ID{
|
||||
Type: typ,
|
||||
Name: id.Name,
|
||||
Tenancy: id.Tenancy,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -127,6 +127,10 @@ func (b *resourceBuilder) Reference(section string) *pbresource.Reference {
|
|||
return resource.Reference(b.ID(), section)
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) ReferenceNoSection() *pbresource.Reference {
|
||||
return resource.Reference(b.ID(), "")
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) Write(t T, client pbresource.ResourceServiceClient) *pbresource.Resource {
|
||||
t.Helper()
|
||||
|
||||
|
|
|
@ -3,7 +3,9 @@ module github.com/hashicorp/consul/proto-public
|
|||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/hashicorp/consul v1.16.1
|
||||
github.com/stretchr/testify v1.8.3
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63
|
||||
google.golang.org/grpc v1.55.0
|
||||
google.golang.org/protobuf v1.30.0
|
||||
)
|
||||
|
@ -11,12 +13,13 @@ require (
|
|||
require (
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/kr/pretty v0.3.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||
github.com/stretchr/objx v0.5.0 // indirect
|
||||
golang.org/x/net v0.13.0 // indirect
|
||||
golang.org/x/sys v0.10.0 // indirect
|
||||
golang.org/x/sys v0.11.0 // indirect
|
||||
golang.org/x/text v0.11.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
|
|
|
@ -8,6 +8,9 @@ github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg
|
|||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/hashicorp/consul v1.16.1 h1:3CeNybQgjxJ3wu2IUSi3OySn4bQ70sv1jENtLJCrklQ=
|
||||
github.com/hashicorp/consul v1.16.1/go.mod h1:GH3Ybk4rNKf0wVLfwG3btwPilh+sMwGtvymgFqFRqp0=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
||||
|
@ -30,10 +33,12 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
|||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ=
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8=
|
||||
golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY=
|
||||
golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
|
||||
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
|
||||
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
|
|
@ -88,6 +88,8 @@ type Endpoint struct {
|
|||
Ports map[string]*WorkloadPort `protobuf:"bytes,3,rep,name=ports,proto3" json:"ports,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
// health_status is the aggregated health status of this endpoint.
|
||||
HealthStatus Health `protobuf:"varint,4,opt,name=health_status,json=healthStatus,proto3,enum=hashicorp.consul.catalog.v1alpha1.Health" json:"health_status,omitempty"`
|
||||
// identity is the name of the workload identity for this endpoint.
|
||||
Identity string `protobuf:"bytes,5,opt,name=identity,proto3" json:"identity,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Endpoint) Reset() {
|
||||
|
@ -150,6 +152,13 @@ func (x *Endpoint) GetHealthStatus() Health {
|
|||
return Health_HEALTH_ANY
|
||||
}
|
||||
|
||||
func (x *Endpoint) GetIdentity() string {
|
||||
if x != nil {
|
||||
return x.Identity
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var File_pbcatalog_v1alpha1_service_endpoints_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_pbcatalog_v1alpha1_service_endpoints_proto_rawDesc = []byte{
|
||||
|
@ -169,7 +178,7 @@ var file_pbcatalog_v1alpha1_service_endpoints_proto_rawDesc = []byte{
|
|||
0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72,
|
||||
0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67,
|
||||
0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69,
|
||||
0x6e, 0x74, 0x52, 0x09, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x22, 0xa3, 0x03,
|
||||
0x6e, 0x74, 0x52, 0x09, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x22, 0xbf, 0x03,
|
||||
0x0a, 0x08, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x0a, 0x74, 0x61,
|
||||
0x72, 0x67, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d,
|
||||
0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75,
|
||||
|
@ -189,33 +198,35 @@ var file_pbcatalog_v1alpha1_service_endpoints_proto_rawDesc = []byte{
|
|||
0x29, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73,
|
||||
0x75, 0x6c, 0x2e, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70,
|
||||
0x68, 0x61, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x52, 0x0c, 0x68, 0x65, 0x61, 0x6c,
|
||||
0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, 0x69, 0x0a, 0x0a, 0x50, 0x6f, 0x72, 0x74,
|
||||
0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x45, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63,
|
||||
0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x61, 0x74, 0x61, 0x6c,
|
||||
0x6f, 0x67, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b,
|
||||
0x6c, 0x6f, 0x61, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
|
||||
0x02, 0x38, 0x01, 0x42, 0xb2, 0x02, 0x0a, 0x25, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68,
|
||||
0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x61, 0x74,
|
||||
0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x15, 0x53,
|
||||
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x50,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
|
||||
0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e,
|
||||
0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63,
|
||||
0x2f, 0x70, 0x62, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70,
|
||||
0x68, 0x61, 0x31, 0x3b, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x76, 0x31, 0x61, 0x6c, 0x70,
|
||||
0x68, 0x61, 0x31, 0xa2, 0x02, 0x03, 0x48, 0x43, 0x43, 0xaa, 0x02, 0x21, 0x48, 0x61, 0x73, 0x68,
|
||||
0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x43, 0x61, 0x74,
|
||||
0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x21,
|
||||
0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e,
|
||||
0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e,
|
||||
0x74, 0x69, 0x74, 0x79, 0x1a, 0x69, 0x0a, 0x0a, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x45, 0x6e, 0x74,
|
||||
0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x03, 0x6b, 0x65, 0x79, 0x12, 0x45, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e,
|
||||
0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x76,
|
||||
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64,
|
||||
0x50, 0x6f, 0x72, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42,
|
||||
0xb2, 0x02, 0x0a, 0x25, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72,
|
||||
0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67,
|
||||
0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x69,
|
||||
0x63, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x50, 0x01, 0x5a, 0x4b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68,
|
||||
0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x63,
|
||||
0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b,
|
||||
0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xa2,
|
||||
0x02, 0x03, 0x48, 0x43, 0x43, 0xaa, 0x02, 0x21, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72,
|
||||
0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67,
|
||||
0x2e, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x21, 0x48, 0x61, 0x73, 0x68,
|
||||
0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x43, 0x61, 0x74,
|
||||
0x61, 0x6c, 0x6f, 0x67, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xe2, 0x02, 0x2d,
|
||||
0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c,
|
||||
0x5c, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
|
||||
0x31, 0xe2, 0x02, 0x2d, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f,
|
||||
0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x5c, 0x56, 0x31, 0x61,
|
||||
0x6c, 0x70, 0x68, 0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
|
||||
0x61, 0xea, 0x02, 0x24, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43,
|
||||
0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x3a, 0x3a,
|
||||
0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x24,
|
||||
0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75,
|
||||
0x6c, 0x3a, 0x3a, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x3a, 0x3a, 0x56, 0x31, 0x61, 0x6c,
|
||||
0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
|
|
@ -29,4 +29,7 @@ message Endpoint {
|
|||
|
||||
// health_status is the aggregated health status of this endpoint.
|
||||
Health health_status = 4;
|
||||
|
||||
// identity is the name of the workload identity for this endpoint.
|
||||
string identity = 5;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
package catalogv1alpha1
|
||||
|
||||
import "golang.org/x/exp/slices"
|
||||
|
||||
func (w *Workload) GetMeshPortName() (string, bool) {
|
||||
var meshPort string
|
||||
|
||||
for portName, port := range w.Ports {
|
||||
if port.Protocol == Protocol_PROTOCOL_MESH {
|
||||
meshPort = portName
|
||||
return meshPort, true
|
||||
}
|
||||
}
|
||||
|
||||
return "", false
|
||||
}
|
||||
|
||||
func (w *Workload) IsMeshEnabled() bool {
|
||||
_, ok := w.GetMeshPortName()
|
||||
return ok
|
||||
}
|
||||
|
||||
func (w *Workload) GetNonExternalAddressesForPort(portName string) []*WorkloadAddress {
|
||||
var addresses []*WorkloadAddress
|
||||
|
||||
for _, address := range w.Addresses {
|
||||
if address.External {
|
||||
// Skip external addresses.
|
||||
continue
|
||||
}
|
||||
|
||||
// If there are no ports, that means this port is selected.
|
||||
// Otherwise, check if the port is explicitly selected by this address
|
||||
if len(address.Ports) == 0 || slices.Contains(address.Ports, portName) {
|
||||
addresses = append(addresses, address)
|
||||
}
|
||||
}
|
||||
|
||||
return addresses
|
||||
}
|
|
@ -0,0 +1,104 @@
|
|||
package catalogv1alpha1
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetMeshPort(t *testing.T) {
|
||||
cases := map[string]struct {
|
||||
ports map[string]*WorkloadPort
|
||||
exp string
|
||||
}{
|
||||
"nil ports": {
|
||||
ports: nil,
|
||||
exp: "",
|
||||
},
|
||||
"empty ports": {
|
||||
ports: make(map[string]*WorkloadPort),
|
||||
exp: "",
|
||||
},
|
||||
"no mesh ports": {
|
||||
ports: map[string]*WorkloadPort{
|
||||
"p1": {Port: 1000, Protocol: Protocol_PROTOCOL_HTTP},
|
||||
"p2": {Port: 2000, Protocol: Protocol_PROTOCOL_TCP},
|
||||
},
|
||||
exp: "",
|
||||
},
|
||||
"one mesh port": {
|
||||
ports: map[string]*WorkloadPort{
|
||||
"p1": {Port: 1000, Protocol: Protocol_PROTOCOL_HTTP},
|
||||
"p2": {Port: 2000, Protocol: Protocol_PROTOCOL_TCP},
|
||||
"p3": {Port: 3000, Protocol: Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
exp: "p3",
|
||||
},
|
||||
}
|
||||
|
||||
for name, c := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
workload := Workload{
|
||||
Ports: c.ports,
|
||||
}
|
||||
meshPort, ok := workload.GetMeshPortName()
|
||||
if c.exp != "" {
|
||||
require.True(t, ok)
|
||||
require.Equal(t, c.exp, meshPort)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAddressesForPort(t *testing.T) {
|
||||
cases := map[string]struct {
|
||||
addresses []*WorkloadAddress
|
||||
portName string
|
||||
expAddresses []*WorkloadAddress
|
||||
}{
|
||||
"empty addresses": {
|
||||
addresses: nil,
|
||||
portName: "doesn't matter",
|
||||
expAddresses: nil,
|
||||
},
|
||||
"addresses without selected port": {
|
||||
addresses: []*WorkloadAddress{{Host: "1.1.1.1"}},
|
||||
portName: "not-found",
|
||||
expAddresses: nil,
|
||||
},
|
||||
"single selected addresses": {
|
||||
addresses: []*WorkloadAddress{
|
||||
{Host: "1.1.1.1", Ports: []string{"p1", "p2"}},
|
||||
{Host: "2.2.2.2", Ports: []string{"p3", "p4"}},
|
||||
},
|
||||
portName: "p1",
|
||||
expAddresses: []*WorkloadAddress{
|
||||
{Host: "1.1.1.1", Ports: []string{"p1", "p2"}},
|
||||
},
|
||||
},
|
||||
"multiple selected addresses": {
|
||||
addresses: []*WorkloadAddress{
|
||||
{Host: "1.1.1.1", Ports: []string{"p1", "p2"}},
|
||||
{Host: "2.2.2.2", Ports: []string{"p3", "p4"}},
|
||||
{Host: "3.3.3.3"},
|
||||
{Host: "3.3.3.3", Ports: []string{"p1"}, External: true},
|
||||
},
|
||||
portName: "p1",
|
||||
expAddresses: []*WorkloadAddress{
|
||||
{Host: "1.1.1.1", Ports: []string{"p1", "p2"}},
|
||||
{Host: "3.3.3.3"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, c := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
workload := Workload{
|
||||
Addresses: c.addresses,
|
||||
}
|
||||
|
||||
actualAddresses := workload.GetNonExternalAddressesForPort(c.portName)
|
||||
require.Equal(t, actualAddresses, c.expAddresses)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -9,7 +9,7 @@ require (
|
|||
github.com/docker/go-connections v0.4.0
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible
|
||||
github.com/go-jose/go-jose/v3 v3.0.0
|
||||
github.com/hashicorp/consul v0.0.0-00010101000000-000000000000
|
||||
github.com/hashicorp/consul v1.16.1
|
||||
github.com/hashicorp/consul/api v1.24.0
|
||||
github.com/hashicorp/consul/envoyextensions v0.4.1
|
||||
github.com/hashicorp/consul/proto-public v0.4.1
|
||||
|
@ -197,7 +197,7 @@ require (
|
|||
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
golang.org/x/crypto v0.12.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b // indirect
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 // indirect
|
||||
golang.org/x/net v0.14.0 // indirect
|
||||
golang.org/x/oauth2 v0.8.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
|
@ -205,7 +205,7 @@ require (
|
|||
golang.org/x/term v0.11.0 // indirect
|
||||
golang.org/x/text v0.12.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.11.1 // indirect
|
||||
golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect
|
||||
google.golang.org/api v0.126.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230726155614-23370e0ffb3e // indirect
|
||||
|
|
|
@ -835,8 +835,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b h1:r+vk0EmXNmekl0S0BascoeeoHk/L7wmaW2QF90K+kYI=
|
||||
golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ=
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
@ -1107,8 +1107,8 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
|||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.11.1 h1:ojD5zOW8+7dOGzdnNgersm8aPfcDjhMp12UfG93NIMc=
|
||||
golang.org/x/tools v0.11.1/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8=
|
||||
golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E=
|
||||
golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
libtopology "github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
|
||||
|
||||
"github.com/hashicorp/consul/internal/catalog/catalogtest"
|
||||
pbresource "github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
func TestCatalog(t *testing.T) {
|
||||
|
|
Loading…
Reference in New Issue