mirror of https://github.com/status-im/consul.git
added node health resource (#19803)
This commit is contained in:
parent
65c06f67e6
commit
7936e55807
|
@ -4,7 +4,8 @@ flowchart TD
|
|||
auth/v2beta1/computedtrafficpermissions --> auth/v2beta1/workloadidentity
|
||||
catalog/v2beta1/failoverpolicy --> catalog/v2beta1/service
|
||||
catalog/v2beta1/healthstatus
|
||||
catalog/v2beta1/node --> catalog/v2beta1/healthstatus
|
||||
catalog/v2beta1/node --> catalog/v2beta1/nodehealthstatus
|
||||
catalog/v2beta1/nodehealthstatus
|
||||
catalog/v2beta1/service
|
||||
catalog/v2beta1/serviceendpoints --> catalog/v2beta1/service
|
||||
catalog/v2beta1/serviceendpoints --> catalog/v2beta1/workload
|
||||
|
|
|
@ -3,11 +3,10 @@
|
|||
"type": {
|
||||
"group": "catalog",
|
||||
"group_version": "v2beta1",
|
||||
"kind": "HealthStatus"
|
||||
"kind": "NodeHealthStatus"
|
||||
},
|
||||
"tenancy": {
|
||||
"partition": "default",
|
||||
"namespace": "default",
|
||||
"peer_name": "local"
|
||||
},
|
||||
"name": "node-1-health"
|
||||
|
@ -25,7 +24,7 @@
|
|||
"name": "node-1"
|
||||
},
|
||||
"data": {
|
||||
"@type": "hashicorp.consul.catalog.v2beta1.HealthStatus",
|
||||
"@type": "hashicorp.consul.catalog.v2beta1.NodeHealthStatus",
|
||||
"type": "synthetic",
|
||||
"status": "HEALTH_PASSING"
|
||||
}
|
||||
|
|
|
@ -3,11 +3,10 @@
|
|||
"type": {
|
||||
"group": "catalog",
|
||||
"group_version": "v2beta1",
|
||||
"kind": "HealthStatus"
|
||||
"kind": "NodeHealthStatus"
|
||||
},
|
||||
"tenancy": {
|
||||
"partition": "default",
|
||||
"namespace": "default",
|
||||
"peer_name": "local"
|
||||
},
|
||||
"name": "node-2-health"
|
||||
|
@ -25,7 +24,7 @@
|
|||
"name": "node-2"
|
||||
},
|
||||
"data": {
|
||||
"@type": "hashicorp.consul.catalog.v2beta1.HealthStatus",
|
||||
"@type": "hashicorp.consul.catalog.v2beta1.NodeHealthStatus",
|
||||
"type": "synthetic",
|
||||
"status": "HEALTH_WARNING"
|
||||
}
|
||||
|
|
|
@ -3,11 +3,10 @@
|
|||
"type": {
|
||||
"group": "catalog",
|
||||
"group_version": "v2beta1",
|
||||
"kind": "HealthStatus"
|
||||
"kind": "NodeHealthStatus"
|
||||
},
|
||||
"tenancy": {
|
||||
"partition": "default",
|
||||
"namespace": "default",
|
||||
"peer_name": "local"
|
||||
},
|
||||
"name": "node-3-health"
|
||||
|
@ -25,7 +24,7 @@
|
|||
"name": "node-3"
|
||||
},
|
||||
"data": {
|
||||
"@type": "hashicorp.consul.catalog.v2beta1.HealthStatus",
|
||||
"@type": "hashicorp.consul.catalog.v2beta1.NodeHealthStatus",
|
||||
"type": "synthetic",
|
||||
"status": "HEALTH_CRITICAL"
|
||||
}
|
||||
|
|
|
@ -3,11 +3,10 @@
|
|||
"type": {
|
||||
"group": "catalog",
|
||||
"group_version": "v2beta1",
|
||||
"kind": "HealthStatus"
|
||||
"kind": "NodeHealthStatus"
|
||||
},
|
||||
"tenancy": {
|
||||
"partition": "default",
|
||||
"namespace": "default",
|
||||
"peer_name": "local"
|
||||
},
|
||||
"name": "node-4-health"
|
||||
|
@ -25,7 +24,7 @@
|
|||
"name": "node-4"
|
||||
},
|
||||
"data": {
|
||||
"@type": "hashicorp.consul.catalog.v2beta1.HealthStatus",
|
||||
"@type": "hashicorp.consul.catalog.v2beta1.NodeHealthStatus",
|
||||
"type": "synthetic",
|
||||
"status": "HEALTH_MAINTENANCE"
|
||||
}
|
||||
|
|
|
@ -71,7 +71,7 @@ func VerifyCatalogV2Beta1IntegrationTestResults(t *testing.T, client pbresource.
|
|||
nodeId := rtest.Resource(pbcatalog.NodeType, fmt.Sprintf("node-%d", i)).WithTenancy(resource.DefaultPartitionedTenancy()).ID()
|
||||
c.RequireResourceExists(t, nodeId)
|
||||
|
||||
res := c.RequireResourceExists(t, rtest.Resource(pbcatalog.HealthStatusType, fmt.Sprintf("node-%d-health", i)).ID())
|
||||
res := c.RequireResourceExists(t, rtest.Resource(pbcatalog.NodeHealthStatusType, fmt.Sprintf("node-%d-health", i)).ID())
|
||||
rtest.RequireOwner(t, res, nodeId, true)
|
||||
}
|
||||
|
||||
|
|
|
@ -88,7 +88,7 @@ func RunCatalogV2Beta1NodeLifecycleIntegrationTest(t *testing.T, client pbresour
|
|||
// reconciliation at each point
|
||||
for _, health := range healthChanges {
|
||||
// update the health check
|
||||
nodeHealth = setHealthStatus(t, c, node.Id, nodeHealthName, health)
|
||||
nodeHealth = setNodeHealthStatus(t, c, node.Id, nodeHealthName, health)
|
||||
|
||||
// wait for reconciliation to kick in and put the node into the right
|
||||
// health status.
|
||||
|
@ -108,7 +108,7 @@ func RunCatalogV2Beta1NodeLifecycleIntegrationTest(t *testing.T, client pbresour
|
|||
// Add the health status back once more, the actual status doesn't matter.
|
||||
// It just must be owned by the node so that we can show cascading
|
||||
// deletions of owned health statuses working.
|
||||
healthStatus := setHealthStatus(t, c, node.Id, nodeHealthName, pbcatalog.Health_HEALTH_CRITICAL)
|
||||
healthStatus := setNodeHealthStatus(t, c, node.Id, nodeHealthName, pbcatalog.Health_HEALTH_CRITICAL)
|
||||
|
||||
// Delete the node and wait for the health status to be deleted.
|
||||
c.MustDelete(t, node.Id)
|
||||
|
@ -263,8 +263,8 @@ func runV2Beta1NodeAssociatedWorkloadLifecycleIntegrationTest(t *testing.T, c *r
|
|||
// Set some non-passing health statuses for those nodes. Using non-passing will make
|
||||
// it easy to see that changing a passing workloads node association appropriately
|
||||
// impacts the overall workload health.
|
||||
setHealthStatus(t, c, node1.Id, nodeHealthName1, pbcatalog.Health_HEALTH_CRITICAL)
|
||||
setHealthStatus(t, c, node2.Id, nodeHealthName2, pbcatalog.Health_HEALTH_WARNING)
|
||||
setNodeHealthStatus(t, c, node1.Id, nodeHealthName1, pbcatalog.Health_HEALTH_CRITICAL)
|
||||
setNodeHealthStatus(t, c, node2.Id, nodeHealthName2, pbcatalog.Health_HEALTH_WARNING)
|
||||
|
||||
// Add the workload but don't immediately associate with any node.
|
||||
workload := rtest.Resource(pbcatalog.WorkloadType, workloadName).
|
||||
|
@ -337,7 +337,7 @@ func runV2Beta1NodeAssociatedWorkloadLifecycleIntegrationTest(t *testing.T, c *r
|
|||
Write(t, c)
|
||||
|
||||
// Also set node 1 health down to WARNING
|
||||
setHealthStatus(t, c, node1.Id, nodeHealthName1, pbcatalog.Health_HEALTH_WARNING)
|
||||
setNodeHealthStatus(t, c, node1.Id, nodeHealthName1, pbcatalog.Health_HEALTH_WARNING)
|
||||
|
||||
// Wait for the workload health controller to mark the workload as warning (due to node 1 having warning health now)
|
||||
c.WaitForStatusCondition(t, workload.Id,
|
||||
|
@ -718,3 +718,13 @@ func setHealthStatus(t *testing.T, client *rtest.Client, owner *pbresource.ID, n
|
|||
WithOwner(owner).
|
||||
Write(t, client)
|
||||
}
|
||||
|
||||
func setNodeHealthStatus(t *testing.T, client *rtest.Client, owner *pbresource.ID, name string, health pbcatalog.Health) *pbresource.Resource {
|
||||
return rtest.Resource(pbcatalog.NodeHealthStatusType, name).
|
||||
WithData(t, &pbcatalog.NodeHealthStatus{
|
||||
Type: "synthetic",
|
||||
Status: health,
|
||||
}).
|
||||
WithOwner(owner).
|
||||
Write(t, client)
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ import (
|
|||
|
||||
func NodeHealthController() controller.Controller {
|
||||
return controller.ForType(pbcatalog.NodeType).
|
||||
WithWatch(pbcatalog.HealthStatusType, controller.MapOwnerFiltered(pbcatalog.NodeType)).
|
||||
WithWatch(pbcatalog.NodeHealthStatusType, controller.MapOwnerFiltered(pbcatalog.NodeType)).
|
||||
WithReconciler(&nodeHealthReconciler{})
|
||||
}
|
||||
|
||||
|
@ -89,8 +89,8 @@ func getNodeHealth(ctx context.Context, rt controller.Runtime, nodeRef *pbresour
|
|||
health := pbcatalog.Health_HEALTH_PASSING
|
||||
|
||||
for _, res := range rsp.Resources {
|
||||
if resource.EqualType(res.Id.Type, pbcatalog.HealthStatusType) {
|
||||
var hs pbcatalog.HealthStatus
|
||||
if resource.EqualType(res.Id.Type, pbcatalog.NodeHealthStatusType) {
|
||||
var hs pbcatalog.NodeHealthStatus
|
||||
if err := res.Data.UnmarshalTo(&hs); err != nil {
|
||||
// This should be impossible as the resource service + type validations the
|
||||
// catalog is performing will ensure that no data gets written where unmarshalling
|
||||
|
|
|
@ -363,10 +363,12 @@ func (suite *nodeHealthControllerTestSuite) TestController() {
|
|||
// wait for rereconciliation to happen
|
||||
suite.waitForReconciliation(suite.nodePassing, "HEALTH_PASSING")
|
||||
|
||||
resourcetest.Resource(pbcatalog.HealthStatusType, "failure").
|
||||
WithData(suite.T(), &pbcatalog.HealthStatus{Type: "fake", Status: pbcatalog.Health_HEALTH_CRITICAL}).
|
||||
resourcetest.Resource(pbcatalog.NodeHealthStatusType, "failure").
|
||||
WithData(suite.T(), &pbcatalog.NodeHealthStatus{Type: "fake", Status: pbcatalog.Health_HEALTH_CRITICAL}).
|
||||
WithOwner(suite.nodePassing).
|
||||
WithTenancy(tenancy).
|
||||
WithTenancy(&pbresource.Tenancy{
|
||||
Partition: tenancy.Partition,
|
||||
}).
|
||||
Write(suite.T(), suite.resourceClient)
|
||||
|
||||
suite.waitForReconciliation(suite.nodePassing, "HEALTH_CRITICAL")
|
||||
|
@ -415,8 +417,8 @@ func (suite *nodeHealthControllerTestSuite) setupNodesWithTenancy(tenancy *pbres
|
|||
for _, node := range []*pbresource.ID{suite.nodePassing, suite.nodeWarning, suite.nodeCritical, suite.nodeMaintenance} {
|
||||
for idx, health := range precedenceHealth {
|
||||
if nodeHealthDesiredStatus[node.Name] >= health {
|
||||
resourcetest.Resource(pbcatalog.HealthStatusType, fmt.Sprintf("test-check-%s-%d-%s-%s", node.Name, idx, tenancy.Partition, tenancy.Namespace)).
|
||||
WithData(suite.T(), &pbcatalog.HealthStatus{Type: "tcp", Status: health}).
|
||||
resourcetest.Resource(pbcatalog.NodeHealthStatusType, fmt.Sprintf("test-check-%s-%d-%s", node.Name, idx, tenancy.Partition)).
|
||||
WithData(suite.T(), &pbcatalog.NodeHealthStatus{Type: "tcp", Status: health}).
|
||||
WithOwner(node).
|
||||
Write(suite.T(), suite.resourceClient)
|
||||
}
|
||||
|
@ -425,7 +427,7 @@ func (suite *nodeHealthControllerTestSuite) setupNodesWithTenancy(tenancy *pbres
|
|||
|
||||
// create a DNSPolicy to be owned by the node. The type doesn't really matter it just needs
|
||||
// to be something that doesn't care about its owner. All we want to prove is that we are
|
||||
// filtering out non-HealthStatus types appropriately.
|
||||
// filtering out non-NodeHealthStatus types appropriately.
|
||||
resourcetest.Resource(pbcatalog.DNSPolicyType, "test-policy-"+tenancy.Partition+"-"+tenancy.Namespace).
|
||||
WithData(suite.T(), dnsPolicyData).
|
||||
WithOwner(suite.nodeNoHealth).
|
||||
|
|
|
@ -64,7 +64,7 @@ func validateHealthStatus(res *DecodedHealthStatus) error {
|
|||
Name: "owner",
|
||||
Wrapped: resource.ErrMissing,
|
||||
})
|
||||
} else if !resource.EqualType(res.Owner.Type, pbcatalog.WorkloadType) && !resource.EqualType(res.Owner.Type, pbcatalog.NodeType) {
|
||||
} else if !resource.EqualType(res.Owner.Type, pbcatalog.WorkloadType) {
|
||||
err = multierror.Append(err, resource.ErrOwnerTypeInvalid{ResourceType: res.Id.Type, OwnerType: res.Owner.Type})
|
||||
}
|
||||
|
||||
|
@ -77,11 +77,6 @@ func aclReadHookHealthStatus(authorizer acl.Authorizer, authzContext *acl.Author
|
|||
return authorizer.ToAllowAuthorizer().ServiceReadAllowed(res.GetOwner().GetName(), authzContext)
|
||||
}
|
||||
|
||||
// For a health status of a node we need to check node:read perms.
|
||||
if res.GetOwner() != nil && resource.EqualType(res.GetOwner().GetType(), pbcatalog.NodeType) {
|
||||
return authorizer.ToAllowAuthorizer().NodeReadAllowed(res.GetOwner().GetName(), authzContext)
|
||||
}
|
||||
|
||||
return acl.PermissionDenied("cannot read catalog.HealthStatus because there is no owner")
|
||||
}
|
||||
|
||||
|
@ -91,10 +86,5 @@ func aclWriteHookHealthStatus(authorizer acl.Authorizer, authzContext *acl.Autho
|
|||
return authorizer.ToAllowAuthorizer().ServiceWriteAllowed(res.GetOwner().GetName(), authzContext)
|
||||
}
|
||||
|
||||
// For a health status of a node we need to check node:write perms.
|
||||
if res.GetOwner() != nil && resource.EqualType(res.GetOwner().GetType(), pbcatalog.NodeType) {
|
||||
return authorizer.ToAllowAuthorizer().NodeWriteAllowed(res.GetOwner().GetName(), authzContext)
|
||||
}
|
||||
|
||||
return acl.PermissionDenied("cannot write catalog.HealthStatus because there is no owner")
|
||||
}
|
||||
|
|
|
@ -70,15 +70,6 @@ func TestValidateHealthStatus_Ok(t *testing.T) {
|
|||
Name: "foo-workload",
|
||||
},
|
||||
},
|
||||
"node-owned": {
|
||||
owner: &pbresource.ID{
|
||||
Type: pbcatalog.NodeType,
|
||||
Tenancy: &pbresource.Tenancy{
|
||||
Partition: defaultHealthStatusOwnerTenancy.Partition,
|
||||
},
|
||||
Name: "bar-node",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tcase := range cases {
|
||||
|
@ -223,7 +214,6 @@ func TestHealthStatusACLs(t *testing.T) {
|
|||
Register(registry)
|
||||
|
||||
workload := resourcetest.Resource(pbcatalog.WorkloadType, "test").ID()
|
||||
node := resourcetest.Resource(pbcatalog.NodeType, "test").ID()
|
||||
|
||||
healthStatusData := &pbcatalog.HealthStatus{
|
||||
Type: "tcp",
|
||||
|
@ -258,42 +248,6 @@ func TestHealthStatusACLs(t *testing.T) {
|
|||
WriteOK: resourcetest.ALLOW,
|
||||
ListOK: resourcetest.DEFAULT,
|
||||
},
|
||||
"service test read with node owner": {
|
||||
Rules: `service "test" { policy = "read" }`,
|
||||
Data: healthStatusData,
|
||||
Owner: node,
|
||||
Typ: pbcatalog.HealthStatusType,
|
||||
ReadOK: resourcetest.DENY,
|
||||
WriteOK: resourcetest.DENY,
|
||||
ListOK: resourcetest.DEFAULT,
|
||||
},
|
||||
"service test write with node owner": {
|
||||
Rules: `service "test" { policy = "write" }`,
|
||||
Data: healthStatusData,
|
||||
Owner: node,
|
||||
Typ: pbcatalog.HealthStatusType,
|
||||
ReadOK: resourcetest.DENY,
|
||||
WriteOK: resourcetest.DENY,
|
||||
ListOK: resourcetest.DEFAULT,
|
||||
},
|
||||
"node test read with node owner": {
|
||||
Rules: `node "test" { policy = "read" }`,
|
||||
Data: healthStatusData,
|
||||
Owner: node,
|
||||
Typ: pbcatalog.HealthStatusType,
|
||||
ReadOK: resourcetest.ALLOW,
|
||||
WriteOK: resourcetest.DENY,
|
||||
ListOK: resourcetest.DEFAULT,
|
||||
},
|
||||
"node test write with node owner": {
|
||||
Rules: `node "test" { policy = "write" }`,
|
||||
Data: healthStatusData,
|
||||
Owner: node,
|
||||
Typ: pbcatalog.HealthStatusType,
|
||||
ReadOK: resourcetest.ALLOW,
|
||||
WriteOK: resourcetest.ALLOW,
|
||||
ListOK: resourcetest.DEFAULT,
|
||||
},
|
||||
"node test read with workload owner": {
|
||||
Rules: `node "test" { policy = "read" }`,
|
||||
Data: healthStatusData,
|
||||
|
|
|
@ -0,0 +1,90 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/go-multierror"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
type DecodedNodeHealthStatus = resource.DecodedResource[*pbcatalog.NodeHealthStatus]
|
||||
|
||||
func RegisterNodeHealthStatus(r resource.Registry) {
|
||||
r.Register(resource.Registration{
|
||||
Type: pbcatalog.NodeHealthStatusType,
|
||||
Proto: &pbcatalog.NodeHealthStatus{},
|
||||
Scope: resource.ScopePartition,
|
||||
Validate: ValidateNodeHealthStatus,
|
||||
ACLs: &resource.ACLHooks{
|
||||
Read: resource.AuthorizeReadWithResource(aclReadHookNodeHealthStatus),
|
||||
Write: aclWriteHookNodeHealthStatus,
|
||||
List: resource.NoOpACLListHook,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
var ValidateNodeHealthStatus = resource.DecodeAndValidate(validateNodeHealthStatus)
|
||||
|
||||
func validateNodeHealthStatus(res *DecodedNodeHealthStatus) error {
|
||||
var err error
|
||||
|
||||
// Should we allow empty types? I think for now it will be safest to require
|
||||
// the type field is set and we can relax this restriction in the future
|
||||
// if we deem it desirable.
|
||||
if res.Data.Type == "" {
|
||||
err = multierror.Append(err, resource.ErrInvalidField{
|
||||
Name: "type",
|
||||
Wrapped: resource.ErrMissing,
|
||||
})
|
||||
}
|
||||
|
||||
switch res.Data.Status {
|
||||
case pbcatalog.Health_HEALTH_PASSING,
|
||||
pbcatalog.Health_HEALTH_WARNING,
|
||||
pbcatalog.Health_HEALTH_CRITICAL,
|
||||
pbcatalog.Health_HEALTH_MAINTENANCE:
|
||||
default:
|
||||
err = multierror.Append(err, resource.ErrInvalidField{
|
||||
Name: "status",
|
||||
Wrapped: errInvalidHealth,
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure that the NodeHealthStatus' owner is a type that we want to allow. The
|
||||
// owner is currently the resource that this NodeHealthStatus applies to. If we
|
||||
// change this to be a parent reference within the NodeHealthStatus.Data then
|
||||
// we could allow for other owners.
|
||||
if res.Resource.Owner == nil {
|
||||
err = multierror.Append(err, resource.ErrInvalidField{
|
||||
Name: "owner",
|
||||
Wrapped: resource.ErrMissing,
|
||||
})
|
||||
} else if !resource.EqualType(res.Owner.Type, pbcatalog.NodeType) {
|
||||
err = multierror.Append(err, resource.ErrOwnerTypeInvalid{ResourceType: res.Id.Type, OwnerType: res.Owner.Type})
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func aclReadHookNodeHealthStatus(authorizer acl.Authorizer, authzContext *acl.AuthorizerContext, res *pbresource.Resource) error {
|
||||
// For a health status of a node we need to check node:read perms.
|
||||
if res.GetOwner() != nil && resource.EqualType(res.GetOwner().GetType(), pbcatalog.NodeType) {
|
||||
return authorizer.ToAllowAuthorizer().NodeReadAllowed(res.GetOwner().GetName(), authzContext)
|
||||
}
|
||||
|
||||
return acl.PermissionDenied("cannot read catalog.NodeHealthStatus because there is no owner")
|
||||
}
|
||||
|
||||
func aclWriteHookNodeHealthStatus(authorizer acl.Authorizer, authzContext *acl.AuthorizerContext, res *pbresource.Resource) error {
|
||||
// For a health status of a node we need to check node:write perms.
|
||||
if res.GetOwner() != nil && resource.EqualType(res.GetOwner().GetType(), pbcatalog.NodeType) {
|
||||
return authorizer.ToAllowAuthorizer().NodeWriteAllowed(res.GetOwner().GetName(), authzContext)
|
||||
}
|
||||
|
||||
return acl.PermissionDenied("cannot write catalog.NodeHealthStatus because there is no owner")
|
||||
}
|
|
@ -0,0 +1,273 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
"github.com/hashicorp/consul/internal/resource/resourcetest"
|
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultNodeHealthStatusOwnerTenancy = &pbresource.Tenancy{
|
||||
Partition: "default",
|
||||
PeerName: "local",
|
||||
}
|
||||
|
||||
defaultNodeHealthStatusOwner = &pbresource.ID{
|
||||
Type: pbcatalog.NodeType,
|
||||
Tenancy: defaultNodeHealthStatusOwnerTenancy,
|
||||
Name: "foo",
|
||||
}
|
||||
)
|
||||
|
||||
func createNodeHealthStatusResource(t *testing.T, data protoreflect.ProtoMessage, owner *pbresource.ID) *pbresource.Resource {
|
||||
res := &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Type: pbcatalog.NodeHealthStatusType,
|
||||
Tenancy: &pbresource.Tenancy{
|
||||
Partition: "default",
|
||||
PeerName: "local",
|
||||
},
|
||||
Name: "test-status",
|
||||
},
|
||||
Owner: owner,
|
||||
}
|
||||
|
||||
var err error
|
||||
res.Data, err = anypb.New(data)
|
||||
require.NoError(t, err)
|
||||
return res
|
||||
}
|
||||
|
||||
func TestValidateNodeHealthStatus_Ok(t *testing.T) {
|
||||
data := &pbcatalog.NodeHealthStatus{
|
||||
Type: "tcp",
|
||||
Status: pbcatalog.Health_HEALTH_PASSING,
|
||||
Description: "Doesn't matter as this is user settable",
|
||||
Output: "Health check executors are free to use this field",
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
owner *pbresource.ID
|
||||
}
|
||||
|
||||
cases := map[string]testCase{
|
||||
"node-owned": {
|
||||
owner: &pbresource.ID{
|
||||
Type: pbcatalog.NodeType,
|
||||
Tenancy: defaultNodeHealthStatusOwnerTenancy,
|
||||
Name: "bar-node",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tcase := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
res := createNodeHealthStatusResource(t, data, tcase.owner)
|
||||
err := ValidateNodeHealthStatus(res)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateNodeHealthStatus_ParseError(t *testing.T) {
|
||||
// Any type other than the NodeHealthStatus type would work
|
||||
// to cause the error we are expecting
|
||||
data := &pbcatalog.IP{Address: "198.18.0.1"}
|
||||
|
||||
res := createNodeHealthStatusResource(t, data, defaultNodeHealthStatusOwner)
|
||||
|
||||
err := ValidateNodeHealthStatus(res)
|
||||
require.Error(t, err)
|
||||
require.ErrorAs(t, err, &resource.ErrDataParse{})
|
||||
}
|
||||
|
||||
func TestValidateNodeHealthStatus_InvalidHealth(t *testing.T) {
|
||||
// while this is a valid enum value it is not allowed to be used
|
||||
// as the Status field.
|
||||
data := &pbcatalog.NodeHealthStatus{
|
||||
Type: "tcp",
|
||||
Status: pbcatalog.Health_HEALTH_ANY,
|
||||
}
|
||||
|
||||
res := createNodeHealthStatusResource(t, data, defaultNodeHealthStatusOwner)
|
||||
|
||||
err := ValidateNodeHealthStatus(res)
|
||||
require.Error(t, err)
|
||||
expected := resource.ErrInvalidField{
|
||||
Name: "status",
|
||||
Wrapped: errInvalidHealth,
|
||||
}
|
||||
var actual resource.ErrInvalidField
|
||||
require.ErrorAs(t, err, &actual)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestValidateNodeHealthStatus_MissingType(t *testing.T) {
|
||||
data := &pbcatalog.NodeHealthStatus{
|
||||
Status: pbcatalog.Health_HEALTH_PASSING,
|
||||
}
|
||||
|
||||
res := createNodeHealthStatusResource(t, data, defaultNodeHealthStatusOwner)
|
||||
|
||||
err := ValidateNodeHealthStatus(res)
|
||||
require.Error(t, err)
|
||||
expected := resource.ErrInvalidField{
|
||||
Name: "type",
|
||||
Wrapped: resource.ErrMissing,
|
||||
}
|
||||
var actual resource.ErrInvalidField
|
||||
require.ErrorAs(t, err, &actual)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestValidateNodeHealthStatus_MissingOwner(t *testing.T) {
|
||||
data := &pbcatalog.NodeHealthStatus{
|
||||
Type: "tcp",
|
||||
Status: pbcatalog.Health_HEALTH_PASSING,
|
||||
}
|
||||
|
||||
res := createNodeHealthStatusResource(t, data, nil)
|
||||
|
||||
err := ValidateNodeHealthStatus(res)
|
||||
require.Error(t, err)
|
||||
expected := resource.ErrInvalidField{
|
||||
Name: "owner",
|
||||
Wrapped: resource.ErrMissing,
|
||||
}
|
||||
var actual resource.ErrInvalidField
|
||||
require.ErrorAs(t, err, &actual)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestValidateNodeHealthStatus_InvalidOwner(t *testing.T) {
|
||||
data := &pbcatalog.NodeHealthStatus{
|
||||
Type: "tcp",
|
||||
Status: pbcatalog.Health_HEALTH_PASSING,
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
owner *pbresource.ID
|
||||
}
|
||||
|
||||
cases := map[string]testCase{
|
||||
"group-mismatch": {
|
||||
owner: &pbresource.ID{
|
||||
Type: &pbresource.Type{
|
||||
Group: "fake",
|
||||
GroupVersion: pbcatalog.Version,
|
||||
Kind: pbcatalog.NodeKind,
|
||||
},
|
||||
Tenancy: defaultNodeHealthStatusOwnerTenancy,
|
||||
Name: "baz",
|
||||
},
|
||||
},
|
||||
"group-version-mismatch": {
|
||||
owner: &pbresource.ID{
|
||||
Type: &pbresource.Type{
|
||||
Group: pbcatalog.GroupName,
|
||||
GroupVersion: "v99",
|
||||
Kind: pbcatalog.NodeKind,
|
||||
},
|
||||
Tenancy: defaultNodeHealthStatusOwnerTenancy,
|
||||
Name: "baz",
|
||||
},
|
||||
},
|
||||
"kind-mismatch": {
|
||||
owner: &pbresource.ID{
|
||||
Type: pbcatalog.ServiceType,
|
||||
Tenancy: defaultNodeHealthStatusOwnerTenancy,
|
||||
Name: "baz",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tcase := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
res := createNodeHealthStatusResource(t, data, tcase.owner)
|
||||
err := ValidateNodeHealthStatus(res)
|
||||
require.Error(t, err)
|
||||
expected := resource.ErrOwnerTypeInvalid{
|
||||
ResourceType: pbcatalog.NodeHealthStatusType,
|
||||
OwnerType: tcase.owner.Type,
|
||||
}
|
||||
var actual resource.ErrOwnerTypeInvalid
|
||||
require.ErrorAs(t, err, &actual)
|
||||
require.Equal(t, expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeHealthStatusACLs(t *testing.T) {
|
||||
registry := resource.NewRegistry()
|
||||
Register(registry)
|
||||
|
||||
node := resourcetest.Resource(pbcatalog.NodeType, "test").ID()
|
||||
|
||||
nodehealthStatusData := &pbcatalog.NodeHealthStatus{
|
||||
Type: "tcp",
|
||||
Status: pbcatalog.Health_HEALTH_PASSING,
|
||||
}
|
||||
|
||||
cases := map[string]resourcetest.ACLTestCase{
|
||||
"no rules": {
|
||||
Rules: ``,
|
||||
Data: nodehealthStatusData,
|
||||
Owner: node,
|
||||
Typ: pbcatalog.NodeHealthStatusType,
|
||||
ReadOK: resourcetest.DENY,
|
||||
WriteOK: resourcetest.DENY,
|
||||
ListOK: resourcetest.DEFAULT,
|
||||
},
|
||||
"service test read with node owner": {
|
||||
Rules: `service "test" { policy = "read" }`,
|
||||
Data: nodehealthStatusData,
|
||||
Owner: node,
|
||||
Typ: pbcatalog.NodeHealthStatusType,
|
||||
ReadOK: resourcetest.DENY,
|
||||
WriteOK: resourcetest.DENY,
|
||||
ListOK: resourcetest.DEFAULT,
|
||||
},
|
||||
"service test write with node owner": {
|
||||
Rules: `service "test" { policy = "write" }`,
|
||||
Data: nodehealthStatusData,
|
||||
Owner: node,
|
||||
Typ: pbcatalog.NodeHealthStatusType,
|
||||
ReadOK: resourcetest.DENY,
|
||||
WriteOK: resourcetest.DENY,
|
||||
ListOK: resourcetest.DEFAULT,
|
||||
},
|
||||
"node test read with node owner": {
|
||||
Rules: `node "test" { policy = "read" }`,
|
||||
Data: nodehealthStatusData,
|
||||
Owner: node,
|
||||
Typ: pbcatalog.NodeHealthStatusType,
|
||||
ReadOK: resourcetest.ALLOW,
|
||||
WriteOK: resourcetest.DENY,
|
||||
ListOK: resourcetest.DEFAULT,
|
||||
},
|
||||
"node test write with node owner": {
|
||||
Rules: `node "test" { policy = "write" }`,
|
||||
Data: nodehealthStatusData,
|
||||
Owner: node,
|
||||
Typ: pbcatalog.NodeHealthStatusType,
|
||||
ReadOK: resourcetest.ALLOW,
|
||||
WriteOK: resourcetest.ALLOW,
|
||||
ListOK: resourcetest.DEFAULT,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
resourcetest.RunACLTestCase(t, tc, registry)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -14,7 +14,7 @@ func Register(r resource.Registry) {
|
|||
RegisterNode(r)
|
||||
RegisterHealthStatus(r)
|
||||
RegisterFailoverPolicy(r)
|
||||
|
||||
RegisterNodeHealthStatus(r)
|
||||
// todo (v2): re-register once these resources are implemented.
|
||||
//RegisterHealthChecks(r)
|
||||
//RegisterDNSPolicy(r)
|
||||
|
|
|
@ -96,3 +96,13 @@ func (msg *CheckTLSConfig) MarshalBinary() ([]byte, error) {
|
|||
func (msg *CheckTLSConfig) UnmarshalBinary(b []byte) error {
|
||||
return proto.Unmarshal(b, msg)
|
||||
}
|
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler
|
||||
func (msg *NodeHealthStatus) MarshalBinary() ([]byte, error) {
|
||||
return proto.Marshal(msg)
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
||||
func (msg *NodeHealthStatus) UnmarshalBinary(b []byte) error {
|
||||
return proto.Unmarshal(b, msg)
|
||||
}
|
||||
|
|
|
@ -83,7 +83,7 @@ func (Health) EnumDescriptor() ([]byte, []int) {
|
|||
return file_pbcatalog_v2beta1_health_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
// This resource will belong to a workload or a node and will have an ownership relationship.
|
||||
// This resource will belong to a workload and will have an ownership relationship.
|
||||
type HealthStatus struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
|
@ -718,6 +718,82 @@ func (x *CheckTLSConfig) GetUseTls() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// This resource will belong to a node and will have an ownership relationship.
|
||||
type NodeHealthStatus struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Type is the type of this health check, such as http, tcp, or kubernetes-readiness
|
||||
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
|
||||
// Health is the status. This maps to existing health check statuses.
|
||||
Status Health `protobuf:"varint,2,opt,name=status,proto3,enum=hashicorp.consul.catalog.v2beta1.Health" json:"status,omitempty"`
|
||||
// Description is the description for this status.
|
||||
Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
|
||||
// Output is the output from running the check that resulted in this status
|
||||
Output string `protobuf:"bytes,4,opt,name=output,proto3" json:"output,omitempty"`
|
||||
}
|
||||
|
||||
func (x *NodeHealthStatus) Reset() {
|
||||
*x = NodeHealthStatus{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_pbcatalog_v2beta1_health_proto_msgTypes[9]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *NodeHealthStatus) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*NodeHealthStatus) ProtoMessage() {}
|
||||
|
||||
func (x *NodeHealthStatus) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_pbcatalog_v2beta1_health_proto_msgTypes[9]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use NodeHealthStatus.ProtoReflect.Descriptor instead.
|
||||
func (*NodeHealthStatus) Descriptor() ([]byte, []int) {
|
||||
return file_pbcatalog_v2beta1_health_proto_rawDescGZIP(), []int{9}
|
||||
}
|
||||
|
||||
func (x *NodeHealthStatus) GetType() string {
|
||||
if x != nil {
|
||||
return x.Type
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *NodeHealthStatus) GetStatus() Health {
|
||||
if x != nil {
|
||||
return x.Status
|
||||
}
|
||||
return Health_HEALTH_ANY
|
||||
}
|
||||
|
||||
func (x *NodeHealthStatus) GetDescription() string {
|
||||
if x != nil {
|
||||
return x.Description
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *NodeHealthStatus) GetOutput() string {
|
||||
if x != nil {
|
||||
return x.Output
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var File_pbcatalog_v2beta1_health_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_pbcatalog_v2beta1_health_proto_rawDesc = []byte{
|
||||
|
@ -832,33 +908,44 @@ var file_pbcatalog_v2beta1_health_proto_rawDesc = []byte{
|
|||
0x0a, 0x0f, 0x74, 0x6c, 0x73, 0x5f, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66,
|
||||
0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, 0x6c, 0x73, 0x53, 0x6b, 0x69, 0x70,
|
||||
0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x5f, 0x74, 0x6c,
|
||||
0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x75, 0x73, 0x65, 0x54, 0x6c, 0x73, 0x2a,
|
||||
0x6d, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x0e, 0x0a, 0x0a, 0x48, 0x45, 0x41,
|
||||
0x4c, 0x54, 0x48, 0x5f, 0x41, 0x4e, 0x59, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x48, 0x45, 0x41,
|
||||
0x4c, 0x54, 0x48, 0x5f, 0x50, 0x41, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x12, 0x0a,
|
||||
0x0e, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x5f, 0x57, 0x41, 0x52, 0x4e, 0x49, 0x4e, 0x47, 0x10,
|
||||
0x02, 0x12, 0x13, 0x0a, 0x0f, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x5f, 0x43, 0x52, 0x49, 0x54,
|
||||
0x49, 0x43, 0x41, 0x4c, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48,
|
||||
0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x54, 0x45, 0x4e, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x04, 0x42, 0xa1,
|
||||
0x02, 0x0a, 0x24, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70,
|
||||
0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e,
|
||||
0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x49, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
|
||||
0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e,
|
||||
0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63,
|
||||
0x2f, 0x70, 0x62, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x32, 0x62, 0x65, 0x74,
|
||||
0x61, 0x31, 0x3b, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61,
|
||||
0x31, 0xa2, 0x02, 0x03, 0x48, 0x43, 0x43, 0xaa, 0x02, 0x20, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63,
|
||||
0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x43, 0x61, 0x74, 0x61, 0x6c,
|
||||
0x6f, 0x67, 0x2e, 0x56, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0xca, 0x02, 0x20, 0x48, 0x61, 0x73,
|
||||
0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x75, 0x73, 0x65, 0x54, 0x6c, 0x73, 0x22,
|
||||
0xaa, 0x01, 0x0a, 0x10, 0x4e, 0x6f, 0x64, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74,
|
||||
0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74,
|
||||
0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69,
|
||||
0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x61, 0x74, 0x61,
|
||||
0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c,
|
||||
0x74, 0x68, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65,
|
||||
0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06,
|
||||
0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x75,
|
||||
0x74, 0x70, 0x75, 0x74, 0x3a, 0x06, 0xa2, 0x93, 0x04, 0x02, 0x08, 0x02, 0x2a, 0x6d, 0x0a, 0x06,
|
||||
0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x0e, 0x0a, 0x0a, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48,
|
||||
0x5f, 0x41, 0x4e, 0x59, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48,
|
||||
0x5f, 0x50, 0x41, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x48, 0x45,
|
||||
0x41, 0x4c, 0x54, 0x48, 0x5f, 0x57, 0x41, 0x52, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13,
|
||||
0x0a, 0x0f, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x5f, 0x43, 0x52, 0x49, 0x54, 0x49, 0x43, 0x41,
|
||||
0x4c, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x5f, 0x4d, 0x41,
|
||||
0x49, 0x4e, 0x54, 0x45, 0x4e, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x04, 0x42, 0xa1, 0x02, 0x0a, 0x24,
|
||||
0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f,
|
||||
0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x62,
|
||||
0x65, 0x74, 0x61, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x50, 0x01, 0x5a, 0x49, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
|
||||
0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c,
|
||||
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62,
|
||||
0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x3b,
|
||||
0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0xa2, 0x02,
|
||||
0x03, 0x48, 0x43, 0x43, 0xaa, 0x02, 0x20, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70,
|
||||
0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e,
|
||||
0x56, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0xca, 0x02, 0x20, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63,
|
||||
0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x43, 0x61, 0x74, 0x61, 0x6c,
|
||||
0x6f, 0x67, 0x5c, 0x56, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0xe2, 0x02, 0x2c, 0x48, 0x61, 0x73,
|
||||
0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x43, 0x61,
|
||||
0x74, 0x61, 0x6c, 0x6f, 0x67, 0x5c, 0x56, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0xe2, 0x02, 0x2c,
|
||||
0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c,
|
||||
0x5c, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x5c, 0x56, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31,
|
||||
0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x23, 0x48,
|
||||
0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c,
|
||||
0x3a, 0x3a, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x65, 0x74,
|
||||
0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x74, 0x61, 0x6c, 0x6f, 0x67, 0x5c, 0x56, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x5c, 0x47, 0x50,
|
||||
0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x23, 0x48, 0x61, 0x73, 0x68,
|
||||
0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x43,
|
||||
0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x62,
|
||||
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -874,7 +961,7 @@ func file_pbcatalog_v2beta1_health_proto_rawDescGZIP() []byte {
|
|||
}
|
||||
|
||||
var file_pbcatalog_v2beta1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
|
||||
var file_pbcatalog_v2beta1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 10)
|
||||
var file_pbcatalog_v2beta1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 11)
|
||||
var file_pbcatalog_v2beta1_health_proto_goTypes = []interface{}{
|
||||
(Health)(0), // 0: hashicorp.consul.catalog.v2beta1.Health
|
||||
(*HealthStatus)(nil), // 1: hashicorp.consul.catalog.v2beta1.HealthStatus
|
||||
|
@ -886,30 +973,32 @@ var file_pbcatalog_v2beta1_health_proto_goTypes = []interface{}{
|
|||
(*GRPCCheck)(nil), // 7: hashicorp.consul.catalog.v2beta1.GRPCCheck
|
||||
(*OSServiceCheck)(nil), // 8: hashicorp.consul.catalog.v2beta1.OSServiceCheck
|
||||
(*CheckTLSConfig)(nil), // 9: hashicorp.consul.catalog.v2beta1.CheckTLSConfig
|
||||
nil, // 10: hashicorp.consul.catalog.v2beta1.HTTPCheck.HeaderEntry
|
||||
(*WorkloadSelector)(nil), // 11: hashicorp.consul.catalog.v2beta1.WorkloadSelector
|
||||
(*durationpb.Duration)(nil), // 12: google.protobuf.Duration
|
||||
(*NodeHealthStatus)(nil), // 10: hashicorp.consul.catalog.v2beta1.NodeHealthStatus
|
||||
nil, // 11: hashicorp.consul.catalog.v2beta1.HTTPCheck.HeaderEntry
|
||||
(*WorkloadSelector)(nil), // 12: hashicorp.consul.catalog.v2beta1.WorkloadSelector
|
||||
(*durationpb.Duration)(nil), // 13: google.protobuf.Duration
|
||||
}
|
||||
var file_pbcatalog_v2beta1_health_proto_depIdxs = []int32{
|
||||
0, // 0: hashicorp.consul.catalog.v2beta1.HealthStatus.status:type_name -> hashicorp.consul.catalog.v2beta1.Health
|
||||
11, // 1: hashicorp.consul.catalog.v2beta1.HealthChecks.workloads:type_name -> hashicorp.consul.catalog.v2beta1.WorkloadSelector
|
||||
12, // 1: hashicorp.consul.catalog.v2beta1.HealthChecks.workloads:type_name -> hashicorp.consul.catalog.v2beta1.WorkloadSelector
|
||||
3, // 2: hashicorp.consul.catalog.v2beta1.HealthChecks.health_checks:type_name -> hashicorp.consul.catalog.v2beta1.HealthCheck
|
||||
4, // 3: hashicorp.consul.catalog.v2beta1.HealthCheck.http:type_name -> hashicorp.consul.catalog.v2beta1.HTTPCheck
|
||||
5, // 4: hashicorp.consul.catalog.v2beta1.HealthCheck.tcp:type_name -> hashicorp.consul.catalog.v2beta1.TCPCheck
|
||||
6, // 5: hashicorp.consul.catalog.v2beta1.HealthCheck.udp:type_name -> hashicorp.consul.catalog.v2beta1.UDPCheck
|
||||
7, // 6: hashicorp.consul.catalog.v2beta1.HealthCheck.grpc:type_name -> hashicorp.consul.catalog.v2beta1.GRPCCheck
|
||||
8, // 7: hashicorp.consul.catalog.v2beta1.HealthCheck.os_service:type_name -> hashicorp.consul.catalog.v2beta1.OSServiceCheck
|
||||
12, // 8: hashicorp.consul.catalog.v2beta1.HealthCheck.interval:type_name -> google.protobuf.Duration
|
||||
12, // 9: hashicorp.consul.catalog.v2beta1.HealthCheck.timeout:type_name -> google.protobuf.Duration
|
||||
12, // 10: hashicorp.consul.catalog.v2beta1.HealthCheck.deregister_critical_after:type_name -> google.protobuf.Duration
|
||||
10, // 11: hashicorp.consul.catalog.v2beta1.HTTPCheck.header:type_name -> hashicorp.consul.catalog.v2beta1.HTTPCheck.HeaderEntry
|
||||
13, // 8: hashicorp.consul.catalog.v2beta1.HealthCheck.interval:type_name -> google.protobuf.Duration
|
||||
13, // 9: hashicorp.consul.catalog.v2beta1.HealthCheck.timeout:type_name -> google.protobuf.Duration
|
||||
13, // 10: hashicorp.consul.catalog.v2beta1.HealthCheck.deregister_critical_after:type_name -> google.protobuf.Duration
|
||||
11, // 11: hashicorp.consul.catalog.v2beta1.HTTPCheck.header:type_name -> hashicorp.consul.catalog.v2beta1.HTTPCheck.HeaderEntry
|
||||
9, // 12: hashicorp.consul.catalog.v2beta1.HTTPCheck.tls:type_name -> hashicorp.consul.catalog.v2beta1.CheckTLSConfig
|
||||
9, // 13: hashicorp.consul.catalog.v2beta1.GRPCCheck.tls:type_name -> hashicorp.consul.catalog.v2beta1.CheckTLSConfig
|
||||
14, // [14:14] is the sub-list for method output_type
|
||||
14, // [14:14] is the sub-list for method input_type
|
||||
14, // [14:14] is the sub-list for extension type_name
|
||||
14, // [14:14] is the sub-list for extension extendee
|
||||
0, // [0:14] is the sub-list for field type_name
|
||||
0, // 14: hashicorp.consul.catalog.v2beta1.NodeHealthStatus.status:type_name -> hashicorp.consul.catalog.v2beta1.Health
|
||||
15, // [15:15] is the sub-list for method output_type
|
||||
15, // [15:15] is the sub-list for method input_type
|
||||
15, // [15:15] is the sub-list for extension type_name
|
||||
15, // [15:15] is the sub-list for extension extendee
|
||||
0, // [0:15] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_pbcatalog_v2beta1_health_proto_init() }
|
||||
|
@ -1027,6 +1116,18 @@ func file_pbcatalog_v2beta1_health_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_pbcatalog_v2beta1_health_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*NodeHealthStatus); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
file_pbcatalog_v2beta1_health_proto_msgTypes[2].OneofWrappers = []interface{}{
|
||||
(*HealthCheck_Http)(nil),
|
||||
|
@ -1041,7 +1142,7 @@ func file_pbcatalog_v2beta1_health_proto_init() {
|
|||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_pbcatalog_v2beta1_health_proto_rawDesc,
|
||||
NumEnums: 1,
|
||||
NumMessages: 10,
|
||||
NumMessages: 11,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
|
|
|
@ -9,7 +9,7 @@ import "google/protobuf/duration.proto";
|
|||
import "pbcatalog/v2beta1/selector.proto";
|
||||
import "pbresource/annotations.proto";
|
||||
|
||||
// This resource will belong to a workload or a node and will have an ownership relationship.
|
||||
// This resource will belong to a workload and will have an ownership relationship.
|
||||
message HealthStatus {
|
||||
option (hashicorp.consul.resource.spec) = {scope: SCOPE_NAMESPACE};
|
||||
|
||||
|
@ -89,3 +89,17 @@ message CheckTLSConfig {
|
|||
bool tls_skip_verify = 2;
|
||||
bool use_tls = 3;
|
||||
}
|
||||
|
||||
// This resource will belong to a node and will have an ownership relationship.
|
||||
message NodeHealthStatus {
|
||||
option (hashicorp.consul.resource.spec) = {scope: SCOPE_PARTITION};
|
||||
|
||||
// Type is the type of this health check, such as http, tcp, or kubernetes-readiness
|
||||
string type = 1;
|
||||
// Health is the status. This maps to existing health check statuses.
|
||||
Health status = 2;
|
||||
// Description is the description for this status.
|
||||
string description = 3;
|
||||
// Output is the output from running the check that resulted in this status
|
||||
string output = 4;
|
||||
}
|
||||
|
|
|
@ -193,3 +193,24 @@ func (in *CheckTLSConfig) DeepCopy() *CheckTLSConfig {
|
|||
func (in *CheckTLSConfig) DeepCopyInterface() interface{} {
|
||||
return in.DeepCopy()
|
||||
}
|
||||
|
||||
// DeepCopyInto supports using NodeHealthStatus within kubernetes types, where deepcopy-gen is used.
|
||||
func (in *NodeHealthStatus) DeepCopyInto(out *NodeHealthStatus) {
|
||||
proto.Reset(out)
|
||||
proto.Merge(out, proto.Clone(in))
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeHealthStatus. Required by controller-gen.
|
||||
func (in *NodeHealthStatus) DeepCopy() *NodeHealthStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NodeHealthStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new NodeHealthStatus. Required by controller-gen.
|
||||
func (in *NodeHealthStatus) DeepCopyInterface() interface{} {
|
||||
return in.DeepCopy()
|
||||
}
|
||||
|
|
|
@ -104,6 +104,17 @@ func (this *CheckTLSConfig) UnmarshalJSON(b []byte) error {
|
|||
return HealthUnmarshaler.Unmarshal(b, this)
|
||||
}
|
||||
|
||||
// MarshalJSON is a custom marshaler for NodeHealthStatus
|
||||
func (this *NodeHealthStatus) MarshalJSON() ([]byte, error) {
|
||||
str, err := HealthMarshaler.Marshal(this)
|
||||
return []byte(str), err
|
||||
}
|
||||
|
||||
// UnmarshalJSON is a custom unmarshaler for NodeHealthStatus
|
||||
func (this *NodeHealthStatus) UnmarshalJSON(b []byte) error {
|
||||
return HealthUnmarshaler.Unmarshal(b, this)
|
||||
}
|
||||
|
||||
var (
|
||||
HealthMarshaler = &protojson.MarshalOptions{}
|
||||
HealthUnmarshaler = &protojson.UnmarshalOptions{DiscardUnknown: false}
|
||||
|
|
|
@ -15,6 +15,7 @@ const (
|
|||
HealthChecksKind = "HealthChecks"
|
||||
HealthStatusKind = "HealthStatus"
|
||||
NodeKind = "Node"
|
||||
NodeHealthStatusKind = "NodeHealthStatus"
|
||||
ServiceKind = "Service"
|
||||
ServiceEndpointsKind = "ServiceEndpoints"
|
||||
VirtualIPsKind = "VirtualIPs"
|
||||
|
@ -52,6 +53,12 @@ var (
|
|||
Kind: NodeKind,
|
||||
}
|
||||
|
||||
NodeHealthStatusType = &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
Kind: NodeHealthStatusKind,
|
||||
}
|
||||
|
||||
ServiceType = &pbresource.Type{
|
||||
Group: GroupName,
|
||||
GroupVersion: Version,
|
||||
|
|
|
@ -343,16 +343,7 @@ func (s *Sprawl) registerCatalogNode(
|
|||
node *topology.Node,
|
||||
) error {
|
||||
if node.IsV2() {
|
||||
|
||||
// TODO(rb): nodes are optional in v2 and won't be used in k8s by
|
||||
// default. There are some scoping issues with the Node Type in 1.17 so
|
||||
// disable it for now.
|
||||
//
|
||||
// To re-enable you also need to link it to the Workload by setting the
|
||||
// NodeName field.
|
||||
//
|
||||
// return s.registerCatalogNodeV2(cluster, node)
|
||||
return nil
|
||||
return s.registerCatalogNodeV2(cluster, node)
|
||||
}
|
||||
return s.registerCatalogNodeV1(cluster, node)
|
||||
}
|
||||
|
@ -382,7 +373,6 @@ func (s *Sprawl) registerCatalogNodeV2(
|
|||
Name: node.PodName(),
|
||||
Tenancy: &pbresource.Tenancy{
|
||||
Partition: node.Partition,
|
||||
Namespace: "default", // temporary requirement
|
||||
},
|
||||
},
|
||||
Metadata: map[string]string{
|
||||
|
@ -723,8 +713,7 @@ func workloadInstanceToResources(
|
|||
Metadata: wrk.Meta,
|
||||
},
|
||||
Data: &pbcatalog.Workload{
|
||||
// TODO(rb): disabling this until node scoping makes sense again
|
||||
// NodeName: node.PodName(),
|
||||
NodeName: node.PodName(),
|
||||
Identity: wrk.WorkloadIdentity,
|
||||
Ports: wlPorts,
|
||||
Addresses: []*pbcatalog.WorkloadAddress{
|
||||
|
|
Loading…
Reference in New Issue