Implement the service endpoints controller (#17216)

* Add a ReplaceType dep mapper and move them into their own file

* Implement the service endpoints controller

* Implement a Catalog Controllers Integration Test
This commit is contained in:
Matt Keeler 2023-06-06 17:09:48 -04:00 committed by GitHub
parent 2dd5551003
commit a5ba889034
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
92 changed files with 6550 additions and 162 deletions

2
go.mod
View File

@ -103,6 +103,7 @@ require (
go.opentelemetry.io/proto/otlp v0.19.0 go.opentelemetry.io/proto/otlp v0.19.0
go.uber.org/goleak v1.1.10 go.uber.org/goleak v1.1.10
golang.org/x/crypto v0.1.0 golang.org/x/crypto v0.1.0
golang.org/x/exp v0.0.0-20230321023759-10a507213a29
golang.org/x/net v0.10.0 golang.org/x/net v0.10.0
golang.org/x/oauth2 v0.6.0 golang.org/x/oauth2 v0.6.0
golang.org/x/sync v0.2.0 golang.org/x/sync v0.2.0
@ -240,7 +241,6 @@ require (
go.opencensus.io v0.24.0 // indirect go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel/trace v1.16.0 // indirect go.opentelemetry.io/otel/trace v1.16.0 // indirect
go.uber.org/atomic v1.9.0 // indirect go.uber.org/atomic v1.9.0 // indirect
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
golang.org/x/mod v0.10.0 // indirect golang.org/x/mod v0.10.0 // indirect
golang.org/x/term v0.8.0 // indirect golang.org/x/term v0.8.0 // indirect

View File

@ -0,0 +1,37 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Service"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Service",
"workloads": {
"prefixes": [
"api-"
]
},
"ports": [
{
"target_port": "http",
"protocol": "PROTOCOL_HTTP"
},
{
"target_port": "grpc",
"protocol": "PROTOCOL_GRPC"
},
{
"target_port": "mesh",
"protocol": "PROTOCOL_MESH"
}
]
}
}

View File

@ -0,0 +1,33 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "HealthStatus"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-1-health"
},
"owner": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-1"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.HealthStatus",
"type": "synthetic",
"status": "HEALTH_PASSING"
}
}

View File

@ -0,0 +1,46 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-1"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Workload",
"addresses": [
{
"host": "172.16.1.1"
},
{
"host": "198.18.2.1",
"external": true,
"ports": [
"mesh"
]
}
],
"ports": {
"http": {
"port": 8080,
"protocol": "PROTOCOL_HTTP"
},
"grpc": {
"port": 9090,
"protocol": "PROTOCOL_GRPC"
},
"mesh": {
"port": 10000,
"protocol": "PROTOCOL_MESH"
}
},
"identity": "api",
"node_name": "node-1"
}
}

View File

@ -0,0 +1,33 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "HealthStatus"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-10-health"
},
"owner": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-10"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.HealthStatus",
"type": "synthetic",
"status": "HEALTH_WARNING"
}
}

View File

@ -0,0 +1,46 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-10"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Workload",
"addresses": [
{
"host": "172.16.1.10"
},
{
"host": "198.18.2.10",
"external": true,
"ports": [
"mesh"
]
}
],
"ports": {
"http": {
"port": 8080,
"protocol": "PROTOCOL_HTTP"
},
"grpc": {
"port": 9090,
"protocol": "PROTOCOL_GRPC"
},
"mesh": {
"port": 10000,
"protocol": "PROTOCOL_MESH"
}
},
"identity": "api",
"node_name": "node-3"
}
}

View File

@ -0,0 +1,33 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "HealthStatus"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-11-health"
},
"owner": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-11"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.HealthStatus",
"type": "synthetic",
"status": "HEALTH_CRITICAL"
}
}

View File

@ -0,0 +1,46 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-11"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Workload",
"addresses": [
{
"host": "172.16.1.11"
},
{
"host": "198.18.2.11",
"external": true,
"ports": [
"mesh"
]
}
],
"ports": {
"http": {
"port": 8080,
"protocol": "PROTOCOL_HTTP"
},
"grpc": {
"port": 9090,
"protocol": "PROTOCOL_GRPC"
},
"mesh": {
"port": 10000,
"protocol": "PROTOCOL_MESH"
}
},
"identity": "api",
"node_name": "node-3"
}
}

View File

@ -0,0 +1,33 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "HealthStatus"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-12-health"
},
"owner": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-12"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.HealthStatus",
"type": "synthetic",
"status": "HEALTH_MAINTENANCE"
}
}

View File

@ -0,0 +1,46 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-12"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Workload",
"addresses": [
{
"host": "172.16.1.12"
},
{
"host": "198.18.2.12",
"external": true,
"ports": [
"mesh"
]
}
],
"ports": {
"http": {
"port": 8080,
"protocol": "PROTOCOL_HTTP"
},
"grpc": {
"port": 9090,
"protocol": "PROTOCOL_GRPC"
},
"mesh": {
"port": 10000,
"protocol": "PROTOCOL_MESH"
}
},
"identity": "api",
"node_name": "node-3"
}
}

View File

@ -0,0 +1,33 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "HealthStatus"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-13-health"
},
"owner": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-13"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.HealthStatus",
"type": "synthetic",
"status": "HEALTH_PASSING"
}
}

View File

@ -0,0 +1,46 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-13"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Workload",
"addresses": [
{
"host": "172.16.1.13"
},
{
"host": "198.18.2.13",
"external": true,
"ports": [
"mesh"
]
}
],
"ports": {
"http": {
"port": 8080,
"protocol": "PROTOCOL_HTTP"
},
"grpc": {
"port": 9090,
"protocol": "PROTOCOL_GRPC"
},
"mesh": {
"port": 10000,
"protocol": "PROTOCOL_MESH"
}
},
"identity": "api",
"node_name": "node-4"
}
}

View File

@ -0,0 +1,33 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "HealthStatus"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-14-health"
},
"owner": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-14"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.HealthStatus",
"type": "synthetic",
"status": "HEALTH_WARNING"
}
}

View File

@ -0,0 +1,46 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-14"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Workload",
"addresses": [
{
"host": "172.16.1.14"
},
{
"host": "198.18.2.14",
"external": true,
"ports": [
"mesh"
]
}
],
"ports": {
"http": {
"port": 8080,
"protocol": "PROTOCOL_HTTP"
},
"grpc": {
"port": 9090,
"protocol": "PROTOCOL_GRPC"
},
"mesh": {
"port": 10000,
"protocol": "PROTOCOL_MESH"
}
},
"identity": "api",
"node_name": "node-4"
}
}

View File

@ -0,0 +1,33 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "HealthStatus"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-15-health"
},
"owner": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-15"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.HealthStatus",
"type": "synthetic",
"status": "HEALTH_CRITICAL"
}
}

View File

@ -0,0 +1,46 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-15"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Workload",
"addresses": [
{
"host": "172.16.1.15"
},
{
"host": "198.18.2.15",
"external": true,
"ports": [
"mesh"
]
}
],
"ports": {
"http": {
"port": 8080,
"protocol": "PROTOCOL_HTTP"
},
"grpc": {
"port": 9090,
"protocol": "PROTOCOL_GRPC"
},
"mesh": {
"port": 10000,
"protocol": "PROTOCOL_MESH"
}
},
"identity": "api",
"node_name": "node-4"
}
}

View File

@ -0,0 +1,33 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "HealthStatus"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-16-health"
},
"owner": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-16"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.HealthStatus",
"type": "synthetic",
"status": "HEALTH_MAINTENANCE"
}
}

View File

@ -0,0 +1,46 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-16"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Workload",
"addresses": [
{
"host": "172.16.1.16"
},
{
"host": "198.18.2.16",
"external": true,
"ports": [
"mesh"
]
}
],
"ports": {
"http": {
"port": 8080,
"protocol": "PROTOCOL_HTTP"
},
"grpc": {
"port": 9090,
"protocol": "PROTOCOL_GRPC"
},
"mesh": {
"port": 10000,
"protocol": "PROTOCOL_MESH"
}
},
"identity": "api",
"node_name": "node-4"
}
}

View File

@ -0,0 +1,33 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "HealthStatus"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-17-health"
},
"owner": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-17"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.HealthStatus",
"type": "synthetic",
"status": "HEALTH_PASSING"
}
}

View File

@ -0,0 +1,45 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-17"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Workload",
"addresses": [
{
"host": "172.16.1.17"
},
{
"host": "198.18.2.17",
"external": true,
"ports": [
"mesh"
]
}
],
"ports": {
"http": {
"port": 8080,
"protocol": "PROTOCOL_HTTP"
},
"grpc": {
"port": 9090,
"protocol": "PROTOCOL_GRPC"
},
"mesh": {
"port": 10000,
"protocol": "PROTOCOL_MESH"
}
},
"identity": "api"
}
}

View File

@ -0,0 +1,33 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "HealthStatus"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-18-health"
},
"owner": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-18"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.HealthStatus",
"type": "synthetic",
"status": "HEALTH_WARNING"
}
}

View File

@ -0,0 +1,45 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-18"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Workload",
"addresses": [
{
"host": "172.16.1.18"
},
{
"host": "198.18.2.18",
"external": true,
"ports": [
"mesh"
]
}
],
"ports": {
"http": {
"port": 8080,
"protocol": "PROTOCOL_HTTP"
},
"grpc": {
"port": 9090,
"protocol": "PROTOCOL_GRPC"
},
"mesh": {
"port": 10000,
"protocol": "PROTOCOL_MESH"
}
},
"identity": "api"
}
}

View File

@ -0,0 +1,33 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "HealthStatus"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-19-health"
},
"owner": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-19"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.HealthStatus",
"type": "synthetic",
"status": "HEALTH_CRITICAL"
}
}

View File

@ -0,0 +1,45 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-19"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Workload",
"addresses": [
{
"host": "172.16.1.19"
},
{
"host": "198.18.2.19",
"external": true,
"ports": [
"mesh"
]
}
],
"ports": {
"http": {
"port": 8080,
"protocol": "PROTOCOL_HTTP"
},
"grpc": {
"port": 9090,
"protocol": "PROTOCOL_GRPC"
},
"mesh": {
"port": 10000,
"protocol": "PROTOCOL_MESH"
}
},
"identity": "api"
}
}

View File

@ -0,0 +1,33 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "HealthStatus"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-2-health"
},
"owner": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-2"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.HealthStatus",
"type": "synthetic",
"status": "HEALTH_WARNING"
}
}

View File

@ -0,0 +1,46 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-2"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Workload",
"addresses": [
{
"host": "172.16.1.2"
},
{
"host": "198.18.2.2",
"external": true,
"ports": [
"mesh"
]
}
],
"ports": {
"http": {
"port": 8080,
"protocol": "PROTOCOL_HTTP"
},
"grpc": {
"port": 9090,
"protocol": "PROTOCOL_GRPC"
},
"mesh": {
"port": 10000,
"protocol": "PROTOCOL_MESH"
}
},
"identity": "api",
"node_name": "node-1"
}
}

View File

@ -0,0 +1,33 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "HealthStatus"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-20-health"
},
"owner": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-20"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.HealthStatus",
"type": "synthetic",
"status": "HEALTH_MAINTENANCE"
}
}

View File

@ -0,0 +1,45 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-20"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Workload",
"addresses": [
{
"host": "172.16.1.20"
},
{
"host": "198.18.2.20",
"external": true,
"ports": [
"mesh"
]
}
],
"ports": {
"http": {
"port": 8080,
"protocol": "PROTOCOL_HTTP"
},
"grpc": {
"port": 9090,
"protocol": "PROTOCOL_GRPC"
},
"mesh": {
"port": 10000,
"protocol": "PROTOCOL_MESH"
}
},
"identity": "api"
}
}

View File

@ -0,0 +1,33 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "HealthStatus"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-3-health"
},
"owner": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-3"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.HealthStatus",
"type": "synthetic",
"status": "HEALTH_CRITICAL"
}
}

View File

@ -0,0 +1,46 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-3"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Workload",
"addresses": [
{
"host": "172.16.1.3"
},
{
"host": "198.18.2.3",
"external": true,
"ports": [
"mesh"
]
}
],
"ports": {
"http": {
"port": 8080,
"protocol": "PROTOCOL_HTTP"
},
"grpc": {
"port": 9090,
"protocol": "PROTOCOL_GRPC"
},
"mesh": {
"port": 10000,
"protocol": "PROTOCOL_MESH"
}
},
"identity": "api",
"node_name": "node-1"
}
}

View File

@ -0,0 +1,33 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "HealthStatus"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-4-health"
},
"owner": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-4"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.HealthStatus",
"type": "synthetic",
"status": "HEALTH_MAINTENANCE"
}
}

View File

@ -0,0 +1,46 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-4"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Workload",
"addresses": [
{
"host": "172.16.1.4"
},
{
"host": "198.18.2.4",
"external": true,
"ports": [
"mesh"
]
}
],
"ports": {
"http": {
"port": 8080,
"protocol": "PROTOCOL_HTTP"
},
"grpc": {
"port": 9090,
"protocol": "PROTOCOL_GRPC"
},
"mesh": {
"port": 10000,
"protocol": "PROTOCOL_MESH"
}
},
"identity": "api",
"node_name": "node-1"
}
}

View File

@ -0,0 +1,33 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "HealthStatus"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-5-health"
},
"owner": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-5"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.HealthStatus",
"type": "synthetic",
"status": "HEALTH_PASSING"
}
}

View File

@ -0,0 +1,46 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-5"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Workload",
"addresses": [
{
"host": "172.16.1.5"
},
{
"host": "198.18.2.5",
"external": true,
"ports": [
"mesh"
]
}
],
"ports": {
"http": {
"port": 8080,
"protocol": "PROTOCOL_HTTP"
},
"grpc": {
"port": 9090,
"protocol": "PROTOCOL_GRPC"
},
"mesh": {
"port": 10000,
"protocol": "PROTOCOL_MESH"
}
},
"identity": "api",
"node_name": "node-2"
}
}

View File

@ -0,0 +1,33 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "HealthStatus"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-6-health"
},
"owner": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-6"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.HealthStatus",
"type": "synthetic",
"status": "HEALTH_WARNING"
}
}

View File

@ -0,0 +1,46 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-6"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Workload",
"addresses": [
{
"host": "172.16.1.6"
},
{
"host": "198.18.2.6",
"external": true,
"ports": [
"mesh"
]
}
],
"ports": {
"http": {
"port": 8080,
"protocol": "PROTOCOL_HTTP"
},
"grpc": {
"port": 9090,
"protocol": "PROTOCOL_GRPC"
},
"mesh": {
"port": 10000,
"protocol": "PROTOCOL_MESH"
}
},
"identity": "api",
"node_name": "node-2"
}
}

View File

@ -0,0 +1,33 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "HealthStatus"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-7-health"
},
"owner": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-7"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.HealthStatus",
"type": "synthetic",
"status": "HEALTH_CRITICAL"
}
}

View File

@ -0,0 +1,46 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-7"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Workload",
"addresses": [
{
"host": "172.16.1.7"
},
{
"host": "198.18.2.7",
"external": true,
"ports": [
"mesh"
]
}
],
"ports": {
"http": {
"port": 8080,
"protocol": "PROTOCOL_HTTP"
},
"grpc": {
"port": 9090,
"protocol": "PROTOCOL_GRPC"
},
"mesh": {
"port": 10000,
"protocol": "PROTOCOL_MESH"
}
},
"identity": "api",
"node_name": "node-2"
}
}

View File

@ -0,0 +1,33 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "HealthStatus"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-8-health"
},
"owner": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-8"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.HealthStatus",
"type": "synthetic",
"status": "HEALTH_MAINTENANCE"
}
}

View File

@ -0,0 +1,46 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-8"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Workload",
"addresses": [
{
"host": "172.16.1.8"
},
{
"host": "198.18.2.8",
"external": true,
"ports": [
"mesh"
]
}
],
"ports": {
"http": {
"port": 8080,
"protocol": "PROTOCOL_HTTP"
},
"grpc": {
"port": 9090,
"protocol": "PROTOCOL_GRPC"
},
"mesh": {
"port": 10000,
"protocol": "PROTOCOL_MESH"
}
},
"identity": "api",
"node_name": "node-2"
}
}

View File

@ -0,0 +1,33 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "HealthStatus"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-9-health"
},
"owner": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-9"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.HealthStatus",
"type": "synthetic",
"status": "HEALTH_PASSING"
}
}

View File

@ -0,0 +1,46 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Workload"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "api-9"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Workload",
"addresses": [
{
"host": "172.16.1.9"
},
{
"host": "198.18.2.9",
"external": true,
"ports": [
"mesh"
]
}
],
"ports": {
"http": {
"port": 8080,
"protocol": "PROTOCOL_HTTP"
},
"grpc": {
"port": 9090,
"protocol": "PROTOCOL_GRPC"
},
"mesh": {
"port": 10000,
"protocol": "PROTOCOL_MESH"
}
},
"identity": "api",
"node_name": "node-3"
}
}

View File

@ -0,0 +1,47 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "ServiceEndpoints"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "foo"
},
"owner": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Service"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "foo"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.ServiceEndpoints",
"endpoints": [
{
"addresses": [
{
"host": "198.18.0.1"
}
],
"ports": {
"external-service-port": {
"port": 9876,
"protocol": "PROTOCOL_HTTP2"
}
},
"health_status": "HEALTH_PASSING"
}
]
}
}

View File

@ -0,0 +1,24 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Service"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "foo"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Service",
"ports": [
{
"target_port": "external-service-port",
"protocol": "PROTOCOL_HTTP2"
}
]
}
}

View File

@ -0,0 +1,42 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Service"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "grpc-api"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Service",
"workloads": {
"names": [
"api-1",
"api-2",
"api-3",
"api-4",
"api-5",
"api-6",
"api-7",
"api-8",
"api-9",
"api-20"
]
},
"ports": [
{
"target_port": "grpc",
"protocol": "PROTOCOL_GRPC"
},
{
"target_port": "mesh",
"protocol": "PROTOCOL_MESH"
}
]
}
}

View File

@ -0,0 +1,29 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Service"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "http-api"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Service",
"workloads": {
"prefixes": [
"api-1"
]
},
"ports": [
{
"target_port": "http",
"protocol": "PROTOCOL_HTTP"
}
]
}
}

View File

@ -0,0 +1,33 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "HealthStatus"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "node-1-health"
},
"owner": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Node"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "node-1"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.HealthStatus",
"type": "synthetic",
"status": "HEALTH_PASSING"
}
}

View File

@ -0,0 +1,27 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Node"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "node-1"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Node",
"addresses": [
{
"host": "198.18.1.1",
"external": true
},
{
"host": "172.16.0.1"
}
]
}
}

View File

@ -0,0 +1,33 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "HealthStatus"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "node-2-health"
},
"owner": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Node"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "node-2"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.HealthStatus",
"type": "synthetic",
"status": "HEALTH_WARNING"
}
}

View File

@ -0,0 +1,27 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Node"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "node-2"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Node",
"addresses": [
{
"host": "198.18.1.2",
"external": true
},
{
"host": "172.16.0.2"
}
]
}
}

View File

@ -0,0 +1,33 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "HealthStatus"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "node-3-health"
},
"owner": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Node"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "node-3"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.HealthStatus",
"type": "synthetic",
"status": "HEALTH_CRITICAL"
}
}

View File

@ -0,0 +1,27 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Node"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "node-3"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Node",
"addresses": [
{
"host": "198.18.1.3",
"external": true
},
{
"host": "172.16.0.3"
}
]
}
}

View File

@ -0,0 +1,33 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "HealthStatus"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "node-4-health"
},
"owner": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Node"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "node-4"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.HealthStatus",
"type": "synthetic",
"status": "HEALTH_MAINTENANCE"
}
}

View File

@ -0,0 +1,27 @@
{
"id": {
"type": {
"group": "catalog",
"group_version": "v1alpha1",
"kind": "Node"
},
"tenancy": {
"partition": "default",
"namespace": "default",
"peer_name": "local"
},
"name": "node-4"
},
"data": {
"@type": "hashicorp.consul.catalog.v1alpha1.Node",
"addresses": [
{
"host": "198.18.1.4",
"external": true
},
{
"host": "172.16.0.4"
}
]
}
}

View File

@ -0,0 +1,39 @@
package catalogtest
import (
"testing"
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
"github.com/hashicorp/consul/internal/catalog"
"github.com/hashicorp/consul/internal/catalog/internal/controllers"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/resource/reaper"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/sdk/testutil"
)
func runInMemResourceServiceAndControllers(t *testing.T, deps controllers.Dependencies) pbresource.ResourceServiceClient {
t.Helper()
ctx := testutil.TestContext(t)
// Create the in-mem resource service
client := svctest.RunResourceService(t, catalog.RegisterTypes)
// Setup/Run the controller manager
mgr := controller.NewManager(client, testutil.Logger(t))
catalog.RegisterControllers(mgr, deps)
// We also depend on the reaper to take care of cleaning up owned health statuses and
// service endpoints so we must enable that controller as well
reaper.RegisterControllers(mgr)
mgr.SetRaftLeader(true)
go mgr.Run(ctx)
return client
}
func TestControllers_Integration(t *testing.T) {
client := runInMemResourceServiceAndControllers(t, catalog.DefaultControllerDependencies())
RunCatalogV1Alpha1IntegrationTest(t, client)
}

View File

@ -0,0 +1,707 @@
package catalogtest
import (
"embed"
"fmt"
"testing"
"github.com/hashicorp/consul/internal/catalog"
"github.com/hashicorp/consul/internal/catalog/internal/controllers/endpoints"
"github.com/hashicorp/consul/internal/catalog/internal/controllers/nodehealth"
"github.com/hashicorp/consul/internal/catalog/internal/controllers/workloadhealth"
"github.com/hashicorp/consul/internal/catalog/internal/types"
rtest "github.com/hashicorp/consul/internal/resource/resourcetest"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/proto/private/prototest"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/stretchr/testify/require"
)
var (
//go:embed integration_test_data
testData embed.FS
)
// RunCatalogV1Alpha1IntegrationTest will push up a bunch of catalog related data and then
// verify that all the expected reconciliations happened correctly. This test is
// intended to exercise a large swathe of behavior of the overall catalog package.
// Besides just controller reconciliation behavior, the intent is also to verify
// that integrations with the resource service are also working (i.e. the various
// validation, mutation and ACL hooks get invoked and are working properly)
//
// This test specifically is not doing any sort of lifecycle related tests to ensure
// that modification to values results in re-reconciliation as expected. Instead there
// is another RunCatalogIntegrationTestLifeCycle function that can be used for those
// purposes. The two are distinct so that the data being published and the assertions
// made against the system can be reused in upgrade tests.
func RunCatalogV1Alpha1IntegrationTest(t *testing.T, client pbresource.ResourceServiceClient) {
t.Helper()
PublishCatalogV1Alpha1IntegrationTestData(t, client)
VerifyCatalogV1Alpha1IntegrationTestResults(t, client)
}
// PublishCatalogV1Alpha1IntegrationTestData will perform a whole bunch of resource writes
// for Service, ServiceEndpoints, Workload, Node and HealthStatus objects
func PublishCatalogV1Alpha1IntegrationTestData(t *testing.T, client pbresource.ResourceServiceClient) {
t.Helper()
c := rtest.NewClient(client)
resources := rtest.ParseResourcesFromFilesystem(t, testData, "integration_test_data/v1alpha1")
c.PublishResources(t, resources)
}
func VerifyCatalogV1Alpha1IntegrationTestResults(t *testing.T, client pbresource.ResourceServiceClient) {
t.Helper()
c := rtest.NewClient(client)
testutil.RunStep(t, "resources-exist", func(t *testing.T) {
c.RequireResourceExists(t, rtest.Resource(catalog.ServiceV1Alpha1Type, "api").ID())
c.RequireResourceExists(t, rtest.Resource(catalog.ServiceV1Alpha1Type, "http-api").ID())
c.RequireResourceExists(t, rtest.Resource(catalog.ServiceV1Alpha1Type, "grpc-api").ID())
c.RequireResourceExists(t, rtest.Resource(catalog.ServiceV1Alpha1Type, "foo").ID())
for i := 1; i < 5; i++ {
nodeId := rtest.Resource(catalog.NodeV1Alpha1Type, fmt.Sprintf("node-%d", i)).ID()
c.RequireResourceExists(t, nodeId)
res := c.RequireResourceExists(t, rtest.Resource(catalog.HealthStatusV1Alpha1Type, fmt.Sprintf("node-%d-health", i)).ID())
rtest.RequireOwner(t, res, nodeId, true)
}
for i := 1; i < 21; i++ {
workloadId := rtest.Resource(catalog.WorkloadV1Alpha1Type, fmt.Sprintf("api-%d", i)).ID()
c.RequireResourceExists(t, workloadId)
res := c.RequireResourceExists(t, rtest.Resource(catalog.HealthStatusV1Alpha1Type, fmt.Sprintf("api-%d-health", i)).ID())
rtest.RequireOwner(t, res, workloadId, true)
}
})
testutil.RunStep(t, "node-health-reconciliation", func(t *testing.T) {
c.WaitForStatusCondition(t, rtest.Resource(catalog.NodeV1Alpha1Type, "node-1").ID(), nodehealth.StatusKey, nodehealth.ConditionPassing)
c.WaitForStatusCondition(t, rtest.Resource(catalog.NodeV1Alpha1Type, "node-2").ID(), nodehealth.StatusKey, nodehealth.ConditionWarning)
c.WaitForStatusCondition(t, rtest.Resource(catalog.NodeV1Alpha1Type, "node-3").ID(), nodehealth.StatusKey, nodehealth.ConditionCritical)
c.WaitForStatusCondition(t, rtest.Resource(catalog.NodeV1Alpha1Type, "node-4").ID(), nodehealth.StatusKey, nodehealth.ConditionMaintenance)
})
testutil.RunStep(t, "workload-health-reconciliation", func(t *testing.T) {
c.WaitForStatusCondition(t, rtest.Resource(catalog.WorkloadV1Alpha1Type, "api-1").ID(), workloadhealth.StatusKey, workloadhealth.ConditionNodeAndWorkloadPassing)
c.WaitForStatusCondition(t, rtest.Resource(catalog.WorkloadV1Alpha1Type, "api-2").ID(), workloadhealth.StatusKey, workloadhealth.ConditionWorkloadWarning)
c.WaitForStatusCondition(t, rtest.Resource(catalog.WorkloadV1Alpha1Type, "api-3").ID(), workloadhealth.StatusKey, workloadhealth.ConditionWorkloadCritical)
c.WaitForStatusCondition(t, rtest.Resource(catalog.WorkloadV1Alpha1Type, "api-4").ID(), workloadhealth.StatusKey, workloadhealth.ConditionWorkloadMaintenance)
c.WaitForStatusCondition(t, rtest.Resource(catalog.WorkloadV1Alpha1Type, "api-5").ID(), workloadhealth.StatusKey, workloadhealth.ConditionNodeWarning)
c.WaitForStatusCondition(t, rtest.Resource(catalog.WorkloadV1Alpha1Type, "api-6").ID(), workloadhealth.StatusKey, workloadhealth.ConditionNodeAndWorkloadWarning)
c.WaitForStatusCondition(t, rtest.Resource(catalog.WorkloadV1Alpha1Type, "api-7").ID(), workloadhealth.StatusKey, workloadhealth.ConditionNodeAndWorkloadCritical)
c.WaitForStatusCondition(t, rtest.Resource(catalog.WorkloadV1Alpha1Type, "api-8").ID(), workloadhealth.StatusKey, workloadhealth.ConditionNodeAndWorkloadMaintenance)
c.WaitForStatusCondition(t, rtest.Resource(catalog.WorkloadV1Alpha1Type, "api-9").ID(), workloadhealth.StatusKey, workloadhealth.ConditionNodeCritical)
c.WaitForStatusCondition(t, rtest.Resource(catalog.WorkloadV1Alpha1Type, "api-10").ID(), workloadhealth.StatusKey, workloadhealth.ConditionNodeAndWorkloadCritical)
c.WaitForStatusCondition(t, rtest.Resource(catalog.WorkloadV1Alpha1Type, "api-11").ID(), workloadhealth.StatusKey, workloadhealth.ConditionNodeAndWorkloadCritical)
c.WaitForStatusCondition(t, rtest.Resource(catalog.WorkloadV1Alpha1Type, "api-12").ID(), workloadhealth.StatusKey, workloadhealth.ConditionNodeAndWorkloadMaintenance)
c.WaitForStatusCondition(t, rtest.Resource(catalog.WorkloadV1Alpha1Type, "api-13").ID(), workloadhealth.StatusKey, workloadhealth.ConditionNodeMaintenance)
c.WaitForStatusCondition(t, rtest.Resource(catalog.WorkloadV1Alpha1Type, "api-14").ID(), workloadhealth.StatusKey, workloadhealth.ConditionNodeAndWorkloadMaintenance)
c.WaitForStatusCondition(t, rtest.Resource(catalog.WorkloadV1Alpha1Type, "api-15").ID(), workloadhealth.StatusKey, workloadhealth.ConditionNodeAndWorkloadMaintenance)
c.WaitForStatusCondition(t, rtest.Resource(catalog.WorkloadV1Alpha1Type, "api-16").ID(), workloadhealth.StatusKey, workloadhealth.ConditionNodeAndWorkloadMaintenance)
c.WaitForStatusCondition(t, rtest.Resource(catalog.WorkloadV1Alpha1Type, "api-17").ID(), workloadhealth.StatusKey, workloadhealth.ConditionWorkloadPassing)
c.WaitForStatusCondition(t, rtest.Resource(catalog.WorkloadV1Alpha1Type, "api-18").ID(), workloadhealth.StatusKey, workloadhealth.ConditionWorkloadWarning)
c.WaitForStatusCondition(t, rtest.Resource(catalog.WorkloadV1Alpha1Type, "api-19").ID(), workloadhealth.StatusKey, workloadhealth.ConditionWorkloadCritical)
c.WaitForStatusCondition(t, rtest.Resource(catalog.WorkloadV1Alpha1Type, "api-20").ID(), workloadhealth.StatusKey, workloadhealth.ConditionWorkloadMaintenance)
})
testutil.RunStep(t, "service-reconciliation", func(t *testing.T) {
c.WaitForStatusCondition(t, rtest.Resource(catalog.ServiceV1Alpha1Type, "foo").ID(), endpoints.StatusKey, endpoints.ConditionUnmanaged)
c.WaitForStatusCondition(t, rtest.Resource(catalog.ServiceV1Alpha1Type, "api").ID(), endpoints.StatusKey, endpoints.ConditionManaged)
c.WaitForStatusCondition(t, rtest.Resource(catalog.ServiceV1Alpha1Type, "http-api").ID(), endpoints.StatusKey, endpoints.ConditionManaged)
c.WaitForStatusCondition(t, rtest.Resource(catalog.ServiceV1Alpha1Type, "grpc-api").ID(), endpoints.StatusKey, endpoints.ConditionManaged)
})
testutil.RunStep(t, "service-endpoints-generation", func(t *testing.T) {
verifyServiceEndpoints(t, c, rtest.Resource(catalog.ServiceEndpointsV1Alpha1Type, "foo").ID(), expectedFooServiceEndpoints())
verifyServiceEndpoints(t, c, rtest.Resource(catalog.ServiceEndpointsV1Alpha1Type, "api").ID(), expectedApiServiceEndpoints(t, c))
verifyServiceEndpoints(t, c, rtest.Resource(catalog.ServiceEndpointsV1Alpha1Type, "http-api").ID(), expectedHTTPApiServiceEndpoints(t, c))
verifyServiceEndpoints(t, c, rtest.Resource(catalog.ServiceEndpointsV1Alpha1Type, "grpc-api").ID(), expectedGRPCApiServiceEndpoints(t, c))
})
}
func expectedFooServiceEndpoints() *pbcatalog.ServiceEndpoints {
return &pbcatalog.ServiceEndpoints{
Endpoints: []*pbcatalog.Endpoint{
{
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "198.18.0.1"},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"external-service-port": {
Port: 9876,
Protocol: pbcatalog.Protocol_PROTOCOL_HTTP2,
},
},
HealthStatus: pbcatalog.Health_HEALTH_PASSING,
},
},
}
}
func expectedApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.ServiceEndpoints {
return &pbcatalog.ServiceEndpoints{
Endpoints: []*pbcatalog.Endpoint{
// api-1
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-1").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.1", Ports: []string{"grpc", "http", "mesh"}},
{Host: "198.18.2.1", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_PASSING,
},
// api-2
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-2").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.2", Ports: []string{"grpc", "http", "mesh"}},
{Host: "198.18.2.2", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_WARNING,
},
// api-3
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-3").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.3", Ports: []string{"grpc", "http", "mesh"}},
{Host: "198.18.2.3", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
},
// api-4
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-4").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.4", Ports: []string{"grpc", "http", "mesh"}},
{Host: "198.18.2.4", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
},
// api-5
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-5").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.5", Ports: []string{"grpc", "http", "mesh"}},
{Host: "198.18.2.5", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_WARNING,
},
// api-6
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-6").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.6", Ports: []string{"grpc", "http", "mesh"}},
{Host: "198.18.2.6", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_WARNING,
},
// api-7
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-7").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.7", Ports: []string{"grpc", "http", "mesh"}},
{Host: "198.18.2.7", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
},
// api-8
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-8").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.8", Ports: []string{"grpc", "http", "mesh"}},
{Host: "198.18.2.8", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
},
// api-9
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-9").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.9", Ports: []string{"grpc", "http", "mesh"}},
{Host: "198.18.2.9", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
},
// api-10
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-10").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.10", Ports: []string{"grpc", "http", "mesh"}},
{Host: "198.18.2.10", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
},
// api-11
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-11").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.11", Ports: []string{"grpc", "http", "mesh"}},
{Host: "198.18.2.11", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
},
// api-12
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-12").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.12", Ports: []string{"grpc", "http", "mesh"}},
{Host: "198.18.2.12", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
},
// api-13
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-13").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.13", Ports: []string{"grpc", "http", "mesh"}},
{Host: "198.18.2.13", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
},
// api-14
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-14").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.14", Ports: []string{"grpc", "http", "mesh"}},
{Host: "198.18.2.14", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
},
// api-15
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-15").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.15", Ports: []string{"grpc", "http", "mesh"}},
{Host: "198.18.2.15", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
},
// api-16
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-16").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.16", Ports: []string{"grpc", "http", "mesh"}},
{Host: "198.18.2.16", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
},
// api-17
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-17").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.17", Ports: []string{"grpc", "http", "mesh"}},
{Host: "198.18.2.17", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_PASSING,
},
// api-18
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-18").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.18", Ports: []string{"grpc", "http", "mesh"}},
{Host: "198.18.2.18", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_WARNING,
},
// api-19
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-19").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.19", Ports: []string{"grpc", "http", "mesh"}},
{Host: "198.18.2.19", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
},
// api-20
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-20").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.20", Ports: []string{"grpc", "http", "mesh"}},
{Host: "198.18.2.20", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
},
},
}
}
func expectedHTTPApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.ServiceEndpoints {
return &pbcatalog.ServiceEndpoints{
Endpoints: []*pbcatalog.Endpoint{
// api-1
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-1").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.1", Ports: []string{"http"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
HealthStatus: pbcatalog.Health_HEALTH_PASSING,
},
// api-10
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-10").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.10", Ports: []string{"http"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
},
// api-11
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-11").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.11", Ports: []string{"http"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
},
// api-12
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-12").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.12", Ports: []string{"http"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
},
// api-13
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-13").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.13", Ports: []string{"http"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
},
// api-14
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-14").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.14", Ports: []string{"http"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
},
// api-15
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-15").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.15", Ports: []string{"http"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
},
// api-16
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-16").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.16", Ports: []string{"http"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
},
// api-17
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-17").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.17", Ports: []string{"http"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
HealthStatus: pbcatalog.Health_HEALTH_PASSING,
},
// api-18
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-18").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.18", Ports: []string{"http"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
HealthStatus: pbcatalog.Health_HEALTH_WARNING,
},
// api-19
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-19").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.19", Ports: []string{"http"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
},
},
}
}
func expectedGRPCApiServiceEndpoints(t *testing.T, c *rtest.Client) *pbcatalog.ServiceEndpoints {
return &pbcatalog.ServiceEndpoints{
Endpoints: []*pbcatalog.Endpoint{
// api-1
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-1").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.1", Ports: []string{"grpc", "mesh"}},
{Host: "198.18.2.1", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_PASSING,
},
// api-2
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-2").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.2", Ports: []string{"grpc", "mesh"}},
{Host: "198.18.2.2", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_WARNING,
},
// api-3
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-3").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.3", Ports: []string{"grpc", "mesh"}},
{Host: "198.18.2.3", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
},
// api-4
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-4").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.4", Ports: []string{"grpc", "mesh"}},
{Host: "198.18.2.4", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
},
// api-5
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-5").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.5", Ports: []string{"grpc", "mesh"}},
{Host: "198.18.2.5", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_WARNING,
},
// api-6
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-6").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.6", Ports: []string{"grpc", "mesh"}},
{Host: "198.18.2.6", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_WARNING,
},
// api-7
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-7").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.7", Ports: []string{"grpc", "mesh"}},
{Host: "198.18.2.7", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
},
// api-8
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-8").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.8", Ports: []string{"grpc", "mesh"}},
{Host: "198.18.2.8", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
},
// api-9
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-9").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.9", Ports: []string{"grpc", "mesh"}},
{Host: "198.18.2.9", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
},
// api-20
{
TargetRef: c.ResolveResourceID(t, rtest.Resource(types.WorkloadV1Alpha1Type, "api-20").ID()),
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "172.16.1.20", Ports: []string{"grpc", "mesh"}},
{Host: "198.18.2.20", External: true, Ports: []string{"mesh"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
"mesh": {Port: 10000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
HealthStatus: pbcatalog.Health_HEALTH_MAINTENANCE,
},
},
}
}
func verifyServiceEndpoints(t *testing.T, c *rtest.Client, id *pbresource.ID, expected *pbcatalog.ServiceEndpoints) {
c.WaitForResourceState(t, id, func(t rtest.T, res *pbresource.Resource) {
var actual pbcatalog.ServiceEndpoints
err := res.Data.UnmarshalTo(&actual)
require.NoError(t, err)
prototest.AssertElementsMatch(t, expected.Endpoints, actual.Endpoints)
})
}

View File

@ -6,6 +6,7 @@ package catalog
import ( import (
"github.com/hashicorp/consul/internal/catalog/internal/controllers" "github.com/hashicorp/consul/internal/catalog/internal/controllers"
"github.com/hashicorp/consul/internal/catalog/internal/mappers/nodemapper" "github.com/hashicorp/consul/internal/catalog/internal/mappers/nodemapper"
"github.com/hashicorp/consul/internal/catalog/internal/mappers/selectiontracker"
"github.com/hashicorp/consul/internal/catalog/internal/types" "github.com/hashicorp/consul/internal/catalog/internal/types"
"github.com/hashicorp/consul/internal/controller" "github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource"
@ -52,6 +53,7 @@ type ControllerDependencies = controllers.Dependencies
func DefaultControllerDependencies() ControllerDependencies { func DefaultControllerDependencies() ControllerDependencies {
return ControllerDependencies{ return ControllerDependencies{
WorkloadHealthNodeMapper: nodemapper.New(), WorkloadHealthNodeMapper: nodemapper.New(),
EndpointsWorkloadMapper: selectiontracker.New(),
} }
} }

View File

@ -0,0 +1,384 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package endpoints
import (
"context"
"sort"
"github.com/hashicorp/consul/internal/catalog/internal/controllers/workloadhealth"
"github.com/hashicorp/consul/internal/catalog/internal/types"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/resource"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1"
"github.com/hashicorp/consul/proto-public/pbresource"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
)
const (
endpointsMetaManagedBy = "managed-by-controller"
)
// The WorkloadMapper interface is used to provide an implementation around being able
// to map a watch even for a Workload resource and translate it to reconciliation requests
type WorkloadMapper interface {
// MapWorkload conforms to the controller.DependencyMapper signature. Given a Workload
// resource it should report the resource IDs that have selected the workload.
MapWorkload(context.Context, controller.Runtime, *pbresource.Resource) ([]controller.Request, error)
// TrackIDForSelector should be used to associate the specified WorkloadSelector with
// the given resource ID. Future calls to MapWorkload
TrackIDForSelector(*pbresource.ID, *pbcatalog.WorkloadSelector)
// UntrackID should be used to inform the tracker to forget about the specified ID
UntrackID(*pbresource.ID)
}
// ServiceEndpointsController creates a controller to perform automatic endpoint management for
// services.
func ServiceEndpointsController(workloadMap WorkloadMapper) controller.Controller {
if workloadMap == nil {
panic("No WorkloadMapper was provided to the ServiceEndpointsController constructor")
}
return controller.ForType(types.ServiceEndpointsType).
WithWatch(types.ServiceType, controller.ReplaceType(types.ServiceEndpointsType)).
WithWatch(types.WorkloadType, workloadMap.MapWorkload).
WithReconciler(newServiceEndpointsReconciler(workloadMap))
}
type serviceEndpointsReconciler struct {
workloadMap WorkloadMapper
}
func newServiceEndpointsReconciler(workloadMap WorkloadMapper) *serviceEndpointsReconciler {
return &serviceEndpointsReconciler{
workloadMap: workloadMap,
}
}
// Reconcile will reconcile one ServiceEndpoints resource in response to some event.
func (r *serviceEndpointsReconciler) Reconcile(ctx context.Context, rt controller.Runtime, req controller.Request) error {
// The runtime is passed by value so replacing it here for the remainder of this
// reconciliation request processing will not affect future invocations.
rt.Logger = rt.Logger.With("resource-id", req.ID, "controller", StatusKey)
rt.Logger.Trace("reconciling service endpoints")
endpointsID := req.ID
serviceID := &pbresource.ID{
Type: types.ServiceType,
Tenancy: endpointsID.Tenancy,
Name: endpointsID.Name,
}
// First we read and unmarshal the service
serviceData, err := getServiceData(ctx, rt, serviceID)
if err != nil {
rt.Logger.Error("error retrieving corresponding Service", "error", err)
return err
}
// Check if the service exists. If it doesn't we can avoid a bunch of other work.
if serviceData == nil {
rt.Logger.Trace("service has been deleted")
// The service was deleted so we need to update the WorkloadMapper to tell it to
// stop tracking this service
r.workloadMap.UntrackID(req.ID)
// Note that because we configured ServiceEndpoints to be owned by the service,
// the service endpoints object should eventually be automatically deleted.
// There is no reason to attempt deletion here.
return nil
}
// Now read and unmarshal the endpoints. We don't need this data just yet but all
// code paths from this point on will need this regardless of branching so we pull
// it now.
endpointsData, err := getEndpointsData(ctx, rt, endpointsID)
if err != nil {
rt.Logger.Error("error retrieving existing endpoints", "error", err)
return err
}
var status *pbresource.Condition
if serviceUnderManagement(serviceData.service) {
rt.Logger.Trace("service is enabled for automatic endpoint management")
// This service should have its endpoints automatically managed
status = ConditionManaged
// Inform the WorkloadMapper to track this service and its selectors. So
// future workload updates that would be matched by the services selectors
// cause this service to be rereconciled.
r.workloadMap.TrackIDForSelector(req.ID, serviceData.service.GetWorkloads())
// Now read and umarshal all workloads selected by the service. It is imperative
// that this happens after we notify the selection tracker to be tracking that
// selection criteria. If the order were reversed we could potentially miss
// workload creations that should be selected if they happen after gathering
// the workloads but before tracking the selector. Tracking first ensures that
// any event that happens after that would get mapped to an event for these
// endpoints.
workloadData, err := getWorkloadData(ctx, rt, serviceData)
if err != nil {
rt.Logger.Trace("error retrieving selected workloads", "error", err)
return err
}
// Calculate the latest endpoints from the already gathered workloads
latestEndpoints := workloadsToEndpoints(serviceData.service, workloadData)
// Before writing the endpoints actually check to see if they are changed
if endpointsData == nil || !proto.Equal(endpointsData.endpoints, latestEndpoints) {
rt.Logger.Trace("endpoints have changed")
// First encode the endpoints data as an Any type.
endpointData, err := anypb.New(latestEndpoints)
if err != nil {
rt.Logger.Error("error marshalling latest endpoints", "error", err)
return err
}
// Now perform the write. The endpoints resource should be owned by the service
// so that it will automatically be deleted upon service deletion. We are using
// a special metadata entry to track that this controller is responsible for
// the management of this resource.
_, err = rt.Client.Write(ctx, &pbresource.WriteRequest{
Resource: &pbresource.Resource{
Id: req.ID,
Owner: serviceData.resource.Id,
Metadata: map[string]string{
endpointsMetaManagedBy: StatusKey,
},
Data: endpointData,
},
})
if err != nil {
rt.Logger.Error("error writing generated endpoints", "error", err)
return err
} else {
rt.Logger.Trace("updated endpoints were successfully written")
}
}
} else {
rt.Logger.Trace("endpoints are not being automatically managed")
// This service is not having its endpoints automatically managed
status = ConditionUnmanaged
// Inform the WorkloadMapper that it no longer needs to track this service
// as it is no longer under endpoint management
r.workloadMap.UntrackID(req.ID)
// Delete the managed ServiceEndpoints if necessary if the metadata would
// indicate that they were previously managed by this controller
if endpointsData != nil && endpointsData.resource.Metadata[endpointsMetaManagedBy] == StatusKey {
rt.Logger.Trace("removing previous managed endpoints")
// This performs a CAS deletion to protect against the case where the user
// has overwritten the endpoints since we fetched them.
_, err := rt.Client.Delete(ctx, &pbresource.DeleteRequest{
Id: endpointsData.resource.Id,
Version: endpointsData.resource.Version,
})
// Potentially we could look for CAS failures by checking if the gRPC
// status code is Aborted. However its an edge case and there could
// possibly be other reasons why the gRPC status code would be aborted
// besides CAS version mismatches. The simplest thing to do is to just
// propagate the error and retry reconciliation later.
if err != nil {
rt.Logger.Error("error deleting previously managed endpoints", "error", err)
return err
}
}
}
// Update the Service status if necessary. Mainly we want to inform the user
// whether we are automatically managing the endpoints to set expectations
// for that object existing or not.
newStatus := &pbresource.Status{
ObservedGeneration: serviceData.resource.Generation,
Conditions: []*pbresource.Condition{
status,
},
}
// If the status is unchanged then we should return and avoid the unnecessary write
if resource.EqualStatus(serviceData.resource.Status[StatusKey], newStatus, false) {
return nil
}
_, err = rt.Client.WriteStatus(ctx, &pbresource.WriteStatusRequest{
Id: serviceData.resource.Id,
Key: StatusKey,
Status: newStatus,
})
if err != nil {
rt.Logger.Error("error updating the service's status", "error", err, "service", serviceID)
}
return err
}
// determineWorkloadHealth will find the workload-health controller's status
// within the resource status and parse the workloads health out of it. If
// the workload-health controller has yet to reconcile the workload health
// or the status isn't in the expected form then this function will return
// HEALTH_CRITICAL.
func determineWorkloadHealth(workload *pbresource.Resource) pbcatalog.Health {
status, found := workload.Status[workloadhealth.StatusKey]
if !found {
return pbcatalog.Health_HEALTH_CRITICAL
}
for _, condition := range status.Conditions {
if condition.Type == workloadhealth.StatusConditionHealthy {
raw, found := pbcatalog.Health_value[condition.Reason]
if found {
return pbcatalog.Health(raw)
}
return pbcatalog.Health_HEALTH_CRITICAL
}
}
return pbcatalog.Health_HEALTH_CRITICAL
}
// serviceUnderManagement detects whether this service should have its
// endpoints automatically managed by the controller
func serviceUnderManagement(svc *pbcatalog.Service) bool {
sel := svc.GetWorkloads()
if sel == nil {
// The selector wasn't present at all. Therefore this service is not under
// automatic endpoint management.
return false
}
if len(sel.Names) < 1 && len(sel.Prefixes) < 1 {
// The selector was set in the request but the list of workload names
// and prefixes were both empty. Therefore this service is not under
// automatic endpoint management
return false
}
// Some workload selection criteria exists, so this service is considered
// under automatic endpoint management.
return true
}
// workloadsToEndpoints will translate the Workload resources into a ServiceEndpoints resource
func workloadsToEndpoints(svc *pbcatalog.Service, workloads []*workloadData) *pbcatalog.ServiceEndpoints {
var endpoints []*pbcatalog.Endpoint
for _, workload := range workloads {
endpoint := workloadToEndpoint(svc, workload)
if endpoint != nil {
endpoints = append(endpoints, endpoint)
}
}
return &pbcatalog.ServiceEndpoints{
Endpoints: endpoints,
}
}
// workloadToEndpoint will convert a workload resource into a singular Endpoint to be
// put within a ServiceEndpoints resource.
//
// The conversion process involves parsing the workloads health and filtering its
// addresses and ports down to just what the service wants to consume.
//
// Determining the workloads health requires the workload-health controller to already
// have reconciled the workloads health and stored it within the resources Status field.
// Any unreconciled workload health will be represented in the ServiceEndpoints with
// the ANY health status.
func workloadToEndpoint(svc *pbcatalog.Service, data *workloadData) *pbcatalog.Endpoint {
health := determineWorkloadHealth(data.resource)
endpointPorts := make(map[string]*pbcatalog.WorkloadPort)
// Create the endpoints filtered ports map. Only workload ports specified in
// one of the services ports are included. Ports with a protocol mismatch
// between the service and workload will be excluded as well.
for _, svcPort := range svc.Ports {
workloadPort, found := data.workload.Ports[svcPort.TargetPort]
if !found {
// this workload doesn't have this port so ignore it
continue
}
if workloadPort.Protocol != svcPort.Protocol {
// workload port mismatch - ignore it
continue
}
endpointPorts[svcPort.TargetPort] = workloadPort
}
var workloadAddrs []*pbcatalog.WorkloadAddress
// Now we filter down the addresses and their corresponding port usage to just
// what the service needs to consume. If the address isn't being used to serve
// any of the services target ports, it will be entirely excluded from the
// address list. If some but not all of its ports are served, then the list
// of ports will be reduced to just the intersection of the service ports
// and the workload addresses ports
for _, addr := range data.workload.Addresses {
var ports []string
if len(addr.Ports) > 0 {
// The workload address has defined ports, filter these as necessary
for _, portName := range addr.Ports {
// check if the workload port has been selected by the service
_, found := endpointPorts[portName]
if !found {
// this port isn't selected by the service so drop this port
continue
}
ports = append(ports, portName)
}
} else {
// The workload address doesn't specify ports. This lack of port specification
// means that all ports are exposed on the interface so here we create a list
// of all the port names exposed by the service.
for portName := range endpointPorts {
ports = append(ports, portName)
}
}
// sort the ports to keep them stable and prevent unnecessary rewrites when the endpoints
// get diffed
sort.Slice(ports, func(i, j int) bool {
return ports[i] < ports[j]
})
// Only record this workload address if one or more of its ports were consumed
// by the service.
if len(ports) > 0 {
workloadAddrs = append(workloadAddrs, &pbcatalog.WorkloadAddress{
Host: addr.Host,
External: addr.External,
Ports: ports,
})
}
}
// If all the workload addresses were filtered out then we should completely ignore
// the workload. While the name matched nothing else did so it isn't useable as
// far as the service is concerned.
if len(workloadAddrs) < 1 {
return nil
}
return &pbcatalog.Endpoint{
TargetRef: data.resource.Id,
HealthStatus: health,
Addresses: workloadAddrs,
Ports: endpointPorts,
}
}

View File

@ -0,0 +1,709 @@
package endpoints
import (
"context"
"testing"
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
"github.com/hashicorp/consul/internal/catalog/internal/controllers/workloadhealth"
"github.com/hashicorp/consul/internal/catalog/internal/mappers/selectiontracker"
"github.com/hashicorp/consul/internal/catalog/internal/types"
"github.com/hashicorp/consul/internal/controller"
rtest "github.com/hashicorp/consul/internal/resource/resourcetest"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/proto/private/prototest"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
)
var (
badId = rtest.Resource(&pbresource.Type{Group: "not", Kind: "found", GroupVersion: "vfake"}, "foo").ID()
)
func TestWorkloadsToEndpoints(t *testing.T) {
// This test's purpose is to ensure that converting multiple workloads to endpoints
// happens as expected. It is not concerned with the data in each endpoint but rather
// the removal of unconvertable workloads (nil endpoints returned by workloadToEndpoint).
// The workload to endpoint conversion only cares about the service ports
service := &pbcatalog.Service{
Ports: []*pbcatalog.ServicePort{
{TargetPort: "http2", Protocol: pbcatalog.Protocol_PROTOCOL_HTTP2},
},
}
workloadAddresses := []*pbcatalog.WorkloadAddress{
{Host: "127.0.0.1"},
}
// This workload is port-matched with the service and should show up as an
// endpoint in the final set.
workloadData1 := &pbcatalog.Workload{
Addresses: workloadAddresses,
Ports: map[string]*pbcatalog.WorkloadPort{
"http2": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP2},
},
}
// This workload is NOT port-matched with the service and should be omitted.
workloadData2 := &pbcatalog.Workload{
Addresses: workloadAddresses,
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
}
// Build out the workloads.
workloads := []*workloadData{
{
// this workload should result in an endpoints
resource: rtest.Resource(types.WorkloadType, "foo").
WithData(t, workloadData1).
Build(),
workload: workloadData1,
},
{
// this workload should be filtered out
resource: rtest.Resource(types.WorkloadType, "bar").
WithData(t, workloadData2).
Build(),
workload: workloadData2,
},
}
endpoints := workloadsToEndpoints(service, workloads)
require.Len(t, endpoints.Endpoints, 1)
prototest.AssertDeepEqual(t, workloads[0].resource.Id, endpoints.Endpoints[0].TargetRef)
}
func TestWorkloadToEndpoint(t *testing.T) {
// This test handles ensuring that the bulk of the functionality of
// the workloadToEndpoint function works correctly.
//
// * WorkloadPorts that are not selected by one service port are ignored
// and not present in the resulting Endpoint
// * WorkloadPorts that have a protocol mismatch with the service port
// are ignored and not present in the resulting Endpoint
// * WorkloadAddresses with 0 non-ignored ports are omitted from the
// resulting Endpoint.
// * Specifying no ports for a WorkloadAddress will use all the non-ignored
// ports. These are explicitly set but that is intended to be an
// implementation detail at this point.
service := &pbcatalog.Service{
Ports: []*pbcatalog.ServicePort{
// the workload will not have this port so it should be ignored
{TargetPort: "not-found", Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
{TargetPort: "http", Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
// the workload will have a different protocol for this port and so it
// will be ignored.
{TargetPort: "grpc", Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
},
}
workload := &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{
// this address will be in the endpoint with all the ports that are
// not filtered out - so just http
{Host: "127.0.0.1"},
// this address will be in the endpoint but with a filtered ports list
{Host: "198.18.1.1", Ports: []string{"http", "grpc"}},
// this address should not show up in the endpoint because the port it
// uses is filtered out
{Host: "198.8.0.1", Ports: []string{"grpc"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
// the protocol is wrong here so it will not show up in the endpoints.
"grpc": {Port: 9090, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP2},
},
}
data := &workloadData{
resource: rtest.Resource(types.WorkloadType, "foo").
WithData(t, workload).
Build(),
workload: workload,
}
expected := &pbcatalog.Endpoint{
TargetRef: data.resource.Id,
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "127.0.0.1", Ports: []string{"http"}},
{Host: "198.18.1.1", Ports: []string{"http"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": workload.Ports["http"],
},
// The health is critical because we are not setting the workload's
// health status. The tests for determineWorkloadHealth will ensure
// that we can properly determine the health status and the overall
// controller tests will prove that the integration works as expected.
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
}
prototest.AssertDeepEqual(t, expected, workloadToEndpoint(service, data))
}
func TestWorkloadToEndpoint_AllAddressesFiltered(t *testing.T) {
// This test checks the specific case where the workload has no
// address/port combinations that remain unfiltered. In this
// case we want to ensure nil is returned instead of an Endpoint
// with no addresses.
service := &pbcatalog.Service{
Ports: []*pbcatalog.ServicePort{
{TargetPort: "not-found", Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
}
workload := &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "127.0.0.1"},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
}
data := &workloadData{
resource: rtest.Resource(types.WorkloadType, "foo").
WithData(t, workload).
Build(),
workload: workload,
}
require.Nil(t, workloadToEndpoint(service, data))
}
func TestServiceUnderManagement(t *testing.T) {
// This test ensures that we can properly detect when a service
// should have endpoints generated for it vs when those endpoints
// are not being automatically managed.
type testCase struct {
svc *pbcatalog.Service
managed bool
}
cases := map[string]testCase{
"nil": {
svc: nil,
managed: false,
},
"nil-selector": {
svc: &pbcatalog.Service{Workloads: nil},
managed: false,
},
"empty-selector": {
svc: &pbcatalog.Service{Workloads: &pbcatalog.WorkloadSelector{}},
managed: false,
},
"exact-match": {
svc: &pbcatalog.Service{Workloads: &pbcatalog.WorkloadSelector{
Names: []string{"foo"},
}},
managed: true,
},
"prefix-match": {
svc: &pbcatalog.Service{Workloads: &pbcatalog.WorkloadSelector{
Prefixes: []string{"foo"},
}},
managed: true,
},
"multiple": {
svc: &pbcatalog.Service{Workloads: &pbcatalog.WorkloadSelector{
Names: []string{"foo"},
Prefixes: []string{"api-"},
}},
managed: true,
},
}
for name, tcase := range cases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tcase.managed, serviceUnderManagement(tcase.svc))
})
}
}
func TestDetermineWorkloadHealth(t *testing.T) {
// This test ensures that parsing workload health out of the
// resource status works as expected.
type testCase struct {
res *pbresource.Resource
expected pbcatalog.Health
}
cases := map[string]testCase{
"no-status": {
res: rtest.Resource(types.WorkloadType, "foo").Build(),
expected: pbcatalog.Health_HEALTH_CRITICAL,
},
"condition-not-found": {
res: rtest.Resource(types.WorkloadType, "foo").
WithStatus(workloadhealth.StatusKey, &pbresource.Status{
Conditions: []*pbresource.Condition{
{
Type: "other",
State: pbresource.Condition_STATE_TRUE,
Reason: "NOT_RELEVANT",
},
},
}).
Build(),
expected: pbcatalog.Health_HEALTH_CRITICAL,
},
"invalid-reason": {
res: rtest.Resource(types.WorkloadType, "foo").
WithStatus(workloadhealth.StatusKey, &pbresource.Status{
Conditions: []*pbresource.Condition{
{
Type: workloadhealth.StatusConditionHealthy,
State: pbresource.Condition_STATE_TRUE,
Reason: "INVALID_HEALTH_STATUS_REASON",
},
},
}).
Build(),
expected: pbcatalog.Health_HEALTH_CRITICAL,
},
"passing": {
res: rtest.Resource(types.WorkloadType, "foo").
WithStatus(workloadhealth.StatusKey, &pbresource.Status{
Conditions: []*pbresource.Condition{
{
Type: workloadhealth.StatusConditionHealthy,
State: pbresource.Condition_STATE_TRUE,
Reason: pbcatalog.Health_HEALTH_PASSING.String(),
},
},
}).
Build(),
expected: pbcatalog.Health_HEALTH_PASSING,
},
"warning": {
res: rtest.Resource(types.WorkloadType, "foo").
WithStatus(workloadhealth.StatusKey, &pbresource.Status{
Conditions: []*pbresource.Condition{
{
Type: workloadhealth.StatusConditionHealthy,
State: pbresource.Condition_STATE_TRUE,
Reason: pbcatalog.Health_HEALTH_WARNING.String(),
},
},
}).
Build(),
expected: pbcatalog.Health_HEALTH_WARNING,
},
"critical": {
res: rtest.Resource(types.WorkloadType, "foo").
WithStatus(workloadhealth.StatusKey, &pbresource.Status{
Conditions: []*pbresource.Condition{
{
Type: workloadhealth.StatusConditionHealthy,
State: pbresource.Condition_STATE_TRUE,
Reason: pbcatalog.Health_HEALTH_CRITICAL.String(),
},
},
}).
Build(),
expected: pbcatalog.Health_HEALTH_CRITICAL,
},
"maintenance": {
res: rtest.Resource(types.WorkloadType, "foo").
WithStatus(workloadhealth.StatusKey, &pbresource.Status{
Conditions: []*pbresource.Condition{
{
Type: workloadhealth.StatusConditionHealthy,
State: pbresource.Condition_STATE_TRUE,
Reason: pbcatalog.Health_HEALTH_MAINTENANCE.String(),
},
},
}).
Build(),
expected: pbcatalog.Health_HEALTH_MAINTENANCE,
},
}
for name, tcase := range cases {
t.Run(name, func(t *testing.T) {
require.Equal(t, tcase.expected, determineWorkloadHealth(tcase.res))
})
}
}
type controllerSuite struct {
suite.Suite
ctx context.Context
client *rtest.Client
rt controller.Runtime
tracker *selectiontracker.WorkloadSelectionTracker
reconciler *serviceEndpointsReconciler
}
func (suite *controllerSuite) SetupTest() {
suite.ctx = testutil.TestContext(suite.T())
client := svctest.RunResourceService(suite.T(), types.Register)
suite.rt = controller.Runtime{
Client: client,
Logger: testutil.Logger(suite.T()),
}
suite.client = rtest.NewClient(client)
suite.tracker = selectiontracker.New()
suite.reconciler = newServiceEndpointsReconciler(suite.tracker)
}
func (suite *controllerSuite) requireTracking(workload *pbresource.Resource, ids ...*pbresource.ID) {
reqs, err := suite.tracker.MapWorkload(suite.ctx, suite.rt, workload)
require.NoError(suite.T(), err)
require.Len(suite.T(), reqs, len(ids))
for _, id := range ids {
prototest.AssertContainsElement(suite.T(), reqs, controller.Request{ID: id})
}
}
func (suite *controllerSuite) requireEndpoints(resource *pbresource.Resource, expected ...*pbcatalog.Endpoint) {
var svcEndpoints pbcatalog.ServiceEndpoints
require.NoError(suite.T(), resource.Data.UnmarshalTo(&svcEndpoints))
require.Len(suite.T(), svcEndpoints.Endpoints, len(expected))
prototest.AssertElementsMatch(suite.T(), expected, svcEndpoints.Endpoints)
}
func (suite *controllerSuite) TestReconcile_ServiceNotFound() {
// This test's purpose is to ensure that when we are reconciling
// endpoints for a service that no longer exists, we stop
// tracking the endpoints resource ID in the selection tracker.
// generate a workload resource to use for checking if it maps
// to a service endpoints object
workload := rtest.Resource(types.WorkloadType, "foo").Build()
// ensure that the tracker knows about the service prior to
// calling reconcile so that we can ensure it removes tracking
id := rtest.Resource(types.ServiceEndpointsType, "not-found").ID()
suite.tracker.TrackIDForSelector(id, &pbcatalog.WorkloadSelector{Prefixes: []string{""}})
// verify that mapping the workload to service endpoints returns a
// non-empty list prior to reconciliation which should remove the
// tracking.
suite.requireTracking(workload, id)
// Because the endpoints don't exist, this reconcile call should
// cause tracking of the endpoints to be removed
err := suite.reconciler.Reconcile(suite.ctx, suite.rt, controller.Request{ID: id})
require.NoError(suite.T(), err)
// Now ensure that the tracking was removed
suite.requireTracking(workload)
}
func (suite *controllerSuite) TestReconcile_NoSelector_NoEndpoints() {
// This test's purpose is to ensure that the service's status is
// updated to record that its endpoints are not being automatically
// managed. Additionally, with no endpoints pre-existing it will
// not attempt to delete them.
service := rtest.Resource(types.ServiceType, "test").
WithData(suite.T(), &pbcatalog.Service{
Ports: []*pbcatalog.ServicePort{
{TargetPort: "http", Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
}).
Write(suite.T(), suite.client)
endpointsID := rtest.Resource(types.ServiceEndpointsType, "test").ID()
err := suite.reconciler.Reconcile(suite.ctx, suite.rt, controller.Request{ID: endpointsID})
require.NoError(suite.T(), err)
suite.client.RequireStatusCondition(suite.T(), service.Id, StatusKey, ConditionUnmanaged)
}
func (suite *controllerSuite) TestReconcile_NoSelector_ManagedEndpoints() {
// This test's purpose is to ensure that when moving from managed endpoints
// to unmanaged endpoints for a service, any already generated managed endpoints
// get deleted.
service := rtest.Resource(types.ServiceType, "test").
WithData(suite.T(), &pbcatalog.Service{
Ports: []*pbcatalog.ServicePort{
{TargetPort: "http", Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
}).
Write(suite.T(), suite.client)
endpoints := rtest.Resource(types.ServiceEndpointsType, "test").
WithData(suite.T(), &pbcatalog.ServiceEndpoints{}).
// this marks these endpoints as under management
WithMeta(endpointsMetaManagedBy, StatusKey).
Write(suite.T(), suite.client)
err := suite.reconciler.Reconcile(suite.ctx, suite.rt, controller.Request{ID: endpoints.Id})
require.NoError(suite.T(), err)
// the status should indicate the services endpoints are not being managed
suite.client.RequireStatusCondition(suite.T(), service.Id, StatusKey, ConditionUnmanaged)
// endpoints under management should be deleted
suite.client.RequireResourceNotFound(suite.T(), endpoints.Id)
}
func (suite *controllerSuite) TestReconcile_NoSelector_UnmanagedEndpoints() {
// This test's purpose is to ensure that when re-reconciling a service that
// doesn't have its endpoints managed, that we do not delete any unmanaged
// ServiceEndpoints resource that the user would have manually written.
service := rtest.Resource(types.ServiceType, "test").
WithData(suite.T(), &pbcatalog.Service{
Ports: []*pbcatalog.ServicePort{
{TargetPort: "http", Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
}).
Write(suite.T(), suite.client)
endpoints := rtest.Resource(types.ServiceEndpointsType, "test").
WithData(suite.T(), &pbcatalog.ServiceEndpoints{}).
Write(suite.T(), suite.client)
err := suite.reconciler.Reconcile(suite.ctx, suite.rt, controller.Request{ID: endpoints.Id})
require.NoError(suite.T(), err)
// the status should indicate the services endpoints are not being managed
suite.client.RequireStatusCondition(suite.T(), service.Id, StatusKey, ConditionUnmanaged)
// unmanaged endpoints should not be deleted when the service is unmanaged
suite.client.RequireResourceExists(suite.T(), endpoints.Id)
}
func (suite *controllerSuite) TestReconcile_Managed_NoPreviousEndpoints() {
// This test's purpose is to ensure the managed endpoint generation occurs
// as expected when there are no pre-existing endpoints.
service := rtest.Resource(types.ServiceType, "test").
WithData(suite.T(), &pbcatalog.Service{
Workloads: &pbcatalog.WorkloadSelector{
Prefixes: []string{""},
},
Ports: []*pbcatalog.ServicePort{
{TargetPort: "http", Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
}).
Write(suite.T(), suite.client)
endpointsID := rtest.Resource(types.ServiceEndpointsType, "test").ID()
rtest.Resource(types.WorkloadType, "test-workload").
WithData(suite.T(), &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{{Host: "127.0.0.1"}},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
}).
Write(suite.T(), suite.client)
err := suite.reconciler.Reconcile(suite.ctx, suite.rt, controller.Request{ID: endpointsID})
require.NoError(suite.T(), err)
// Verify that the services status has been set to indicate endpoints are automatically managed.
suite.client.RequireStatusCondition(suite.T(), service.Id, StatusKey, ConditionManaged)
// The service endpoints metadata should include our tag to indcate it was generated by this controller
res := suite.client.RequireResourceMeta(suite.T(), endpointsID, endpointsMetaManagedBy, StatusKey)
var endpoints pbcatalog.ServiceEndpoints
err = res.Data.UnmarshalTo(&endpoints)
require.NoError(suite.T(), err)
require.Len(suite.T(), endpoints.Endpoints, 1)
// We are not going to retest that the workloads to endpoints conversion process
// The length check should be sufficient to prove the endpoints are being
// converted. The unit tests for the workloadsToEndpoints functions prove that
// the process works correctly in all cases.
}
func (suite *controllerSuite) TestReconcile_Managed_ExistingEndpoints() {
// This test's purpose is to ensure that when the current set of endpoints
// differs from any prior set of endpoints that the resource gets rewritten.
service := rtest.Resource(types.ServiceType, "test").
WithData(suite.T(), &pbcatalog.Service{
Workloads: &pbcatalog.WorkloadSelector{
Prefixes: []string{""},
},
Ports: []*pbcatalog.ServicePort{
{TargetPort: "http", Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
}).
Write(suite.T(), suite.client)
endpoints := rtest.Resource(types.ServiceEndpointsType, "test").
WithData(suite.T(), &pbcatalog.ServiceEndpoints{}).
WithOwner(service.Id).
Write(suite.T(), suite.client)
rtest.Resource(types.WorkloadType, "test-workload").
WithData(suite.T(), &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{{Host: "127.0.0.1"}},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
}).
Write(suite.T(), suite.client)
err := suite.reconciler.Reconcile(suite.ctx, suite.rt, controller.Request{ID: endpoints.Id})
require.NoError(suite.T(), err)
suite.client.RequireStatusCondition(suite.T(), service.Id, StatusKey, ConditionManaged)
res := suite.client.RequireResourceMeta(suite.T(), endpoints.Id, endpointsMetaManagedBy, StatusKey)
var newEndpoints pbcatalog.ServiceEndpoints
err = res.Data.UnmarshalTo(&newEndpoints)
require.NoError(suite.T(), err)
require.Len(suite.T(), newEndpoints.Endpoints, 1)
}
func (suite *controllerSuite) TestController() {
// This test's purpose is to exercise the controller in a halfway realistic way.
// Generally we are trying to go through the whole lifecycle of creating services,
// adding workloads, modifying workload health and modifying the service selection
// criteria. This isn't a full integration test as that would require also
// executing the workload health controller. Instead workload health status is
// synthesized as necessary.
// Run the controller manager
mgr := controller.NewManager(suite.client, suite.rt.Logger)
mgr.Register(ServiceEndpointsController(suite.tracker))
mgr.SetRaftLeader(true)
go mgr.Run(suite.ctx)
// Add a service - there are no workloads so an empty endpoints
// object should be created.
service := rtest.Resource(types.ServiceType, "api").
WithData(suite.T(), &pbcatalog.Service{
Workloads: &pbcatalog.WorkloadSelector{
Prefixes: []string{"api-"},
},
Ports: []*pbcatalog.ServicePort{
{TargetPort: "http", Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
}).
Write(suite.T(), suite.client)
// Wait for the controller to record that the endpoints are being managed
res := suite.client.WaitForReconciliation(suite.T(), service.Id, StatusKey)
// Check that the services status was updated accordingly
rtest.RequireStatusCondition(suite.T(), res, StatusKey, ConditionManaged)
// Check that the endpoints resource exists and contains 0 endpoints
endpointsID := rtest.Resource(types.ServiceEndpointsType, "api").ID()
endpoints := suite.client.RequireResourceExists(suite.T(), endpointsID)
suite.requireEndpoints(endpoints)
// Now add a workload that would be selected by the service. Leave
// the workload in a state where its health has not been reconciled
workload := rtest.Resource(types.WorkloadType, "api-1").
WithData(suite.T(), &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{{Host: "127.0.0.1"}},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
Identity: "api",
}).
Write(suite.T(), suite.client)
// Wait for the endpoints to be regenerated
endpoints = suite.client.WaitForNewVersion(suite.T(), endpointsID, endpoints.Version)
// Verify that the generated endpoints now contain the workload
suite.requireEndpoints(endpoints, &pbcatalog.Endpoint{
TargetRef: workload.Id,
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "127.0.0.1", Ports: []string{"http"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
HealthStatus: pbcatalog.Health_HEALTH_CRITICAL,
})
// Update the health status of the workload
suite.client.WriteStatus(suite.ctx, &pbresource.WriteStatusRequest{
Id: workload.Id,
Key: workloadhealth.StatusKey,
Status: &pbresource.Status{
ObservedGeneration: workload.Generation,
Conditions: []*pbresource.Condition{
{
Type: workloadhealth.StatusConditionHealthy,
State: pbresource.Condition_STATE_TRUE,
Reason: "HEALTH_PASSING",
},
},
},
})
// Wait for the endpoints to be regenerated
endpoints = suite.client.WaitForNewVersion(suite.T(), endpointsID, endpoints.Version)
// ensure the endpoint was put into the passing state
suite.requireEndpoints(endpoints, &pbcatalog.Endpoint{
TargetRef: workload.Id,
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "127.0.0.1", Ports: []string{"http"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
HealthStatus: pbcatalog.Health_HEALTH_PASSING,
})
// rewrite the service to add more selection criteria. This should trigger
// reconciliation but shouldn't result in updating the endpoints because
// the actual list of currently selected workloads has not changed
rtest.Resource(types.ServiceType, "api").
WithData(suite.T(), &pbcatalog.Service{
Workloads: &pbcatalog.WorkloadSelector{
Prefixes: []string{"api-"},
Names: []string{"doesnt-matter"},
},
Ports: []*pbcatalog.ServicePort{
{TargetPort: "http", Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
}).
Write(suite.T(), suite.client)
// Wait for the service status' observed generation to get bumped
service = suite.client.WaitForReconciliation(suite.T(), service.Id, StatusKey)
// Verify that the endpoints were not regenerated
suite.client.RequireVersionUnchanged(suite.T(), endpointsID, endpoints.Version)
// Delete the endpoints. The controller should bring these back momentarily
suite.client.Delete(suite.ctx, &pbresource.DeleteRequest{Id: endpointsID})
// Wait for controller to recreate the endpoints
retry.Run(suite.T(), func(r *retry.R) {
suite.client.RequireResourceExists(r, endpointsID)
})
// Move the service to having unmanaged endpoints
rtest.Resource(types.ServiceType, "api").
WithData(suite.T(), &pbcatalog.Service{
Ports: []*pbcatalog.ServicePort{
{TargetPort: "http", Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
}).
Write(suite.T(), suite.client)
res = suite.client.WaitForReconciliation(suite.T(), service.Id, StatusKey)
rtest.RequireStatusCondition(suite.T(), res, StatusKey, ConditionUnmanaged)
// Verify that the endpoints were deleted
suite.client.RequireResourceNotFound(suite.T(), endpointsID)
}
func TestController(t *testing.T) {
suite.Run(t, new(controllerSuite))
}

View File

@ -0,0 +1,177 @@
package endpoints
import (
"context"
"sort"
"github.com/hashicorp/consul/internal/catalog/internal/types"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/resource"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1"
"github.com/hashicorp/consul/proto-public/pbresource"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type serviceData struct {
resource *pbresource.Resource
service *pbcatalog.Service
}
type endpointsData struct {
resource *pbresource.Resource
endpoints *pbcatalog.ServiceEndpoints
}
type workloadData struct {
resource *pbresource.Resource
workload *pbcatalog.Workload
}
// getServiceData will read the service with the given ID and unmarshal the
// Data field. The return value is a struct that contains the retrieved
// resource as well as the unmsashalled form. If the resource doesn't
// exist, nil will be returned. Any other error either with retrieving
// the resource or unmarshalling it will cause the error to be returned
// to the caller
func getServiceData(ctx context.Context, rt controller.Runtime, id *pbresource.ID) (*serviceData, error) {
rsp, err := rt.Client.Read(ctx, &pbresource.ReadRequest{Id: id})
switch {
case status.Code(err) == codes.NotFound:
return nil, nil
case err != nil:
return nil, err
}
var service pbcatalog.Service
err = rsp.Resource.Data.UnmarshalTo(&service)
if err != nil {
return nil, resource.NewErrDataParse(&service, err)
}
return &serviceData{resource: rsp.Resource, service: &service}, nil
}
// getEndpointsData will read the endpoints with the given ID and unmarshal the
// Data field. The return value is a struct that contains the retrieved
// resource as well as the unmsashalled form. If the resource doesn't
// exist, nil will be returned. Any other error either with retrieving
// the resource or unmarshalling it will cause the error to be returned
// to the caller
func getEndpointsData(ctx context.Context, rt controller.Runtime, id *pbresource.ID) (*endpointsData, error) {
rsp, err := rt.Client.Read(ctx, &pbresource.ReadRequest{Id: id})
switch {
case status.Code(err) == codes.NotFound:
return nil, nil
case err != nil:
return nil, err
}
var endpoints pbcatalog.ServiceEndpoints
err = rsp.Resource.Data.UnmarshalTo(&endpoints)
if err != nil {
return nil, resource.NewErrDataParse(&endpoints, err)
}
return &endpointsData{resource: rsp.Resource, endpoints: &endpoints}, nil
}
// getWorkloadData will retrieve all workloads for the given services selector
// and unmarhshal them, returning a slic of objects hold both the resource and
// unmarshaled forms. Unmarshalling errors, or other resource service errors
// will be returned to the caller.
func getWorkloadData(ctx context.Context, rt controller.Runtime, svc *serviceData) ([]*workloadData, error) {
workloadResources, err := gatherWorkloadsForService(ctx, rt, svc)
if err != nil {
return nil, err
}
var results []*workloadData
for _, res := range workloadResources {
var workload pbcatalog.Workload
err = res.Data.UnmarshalTo(&workload)
if err != nil {
return nil, resource.NewErrDataParse(&workload, err)
}
results = append(results, &workloadData{resource: res, workload: &workload})
}
return results, nil
}
// gatherWorkloadsForService will retrieve all the unique workloads for a given selector.
// NotFound errors for workloads selected by Name will be ignored. Any other
// resource service errors will be returned to the caller. Prior to returning
// the slice of resources, they will be sorted by name. The consistent ordering
// will allow callers to diff two versions of the data to determine if anything
// has changed but it also will make testing a little easier.
func gatherWorkloadsForService(ctx context.Context, rt controller.Runtime, svc *serviceData) ([]*pbresource.Resource, error) {
var workloads []*pbresource.Resource
sel := svc.service.GetWorkloads()
// this map will track all the gathered workloads by name, this is mainly to deduplicate workloads if they
// are specified multiple times throughout the list of selection criteria
workloadNames := make(map[string]struct{})
// First gather all the prefix matched workloads. We could do this second but by doing
// it first its possible we can avoid some resource service calls to read individual
// workloads selected by name if they are also matched by a prefix.
for _, prefix := range sel.GetPrefixes() {
rsp, err := rt.Client.List(ctx, &pbresource.ListRequest{
Type: types.WorkloadType,
Tenancy: svc.resource.Id.Tenancy,
NamePrefix: prefix,
})
if err != nil {
return nil, err
}
// append all workloads in the list response to our list of all selected workloads
for _, workload := range rsp.Resources {
// ignore duplicate workloads
if _, found := workloadNames[workload.Id.Name]; !found {
workloads = append(workloads, workload)
workloadNames[workload.Id.Name] = struct{}{}
}
}
}
// Now gather the exact match selections
for _, name := range sel.GetNames() {
// ignore names we have already fetched
if _, found := workloadNames[name]; found {
continue
}
workloadID := &pbresource.ID{
Type: types.WorkloadType,
Tenancy: svc.resource.Id.Tenancy,
Name: name,
}
rsp, err := rt.Client.Read(ctx, &pbresource.ReadRequest{Id: workloadID})
switch {
case status.Code(err) == codes.NotFound:
// Ignore not found errors as services may select workloads that do not
// yet exist. This is not considered an error state or mis-configuration
// as the user could be getting ready to add the workloads.
continue
case err != nil:
return nil, err
}
workloads = append(workloads, rsp.Resource)
workloadNames[rsp.Resource.Id.Name] = struct{}{}
}
// Sorting ensures deterministic output. This will help for testing but
// the real reason to do this is so we will be able to diff the set of
// workloads endpoints to determine if we need to update them.
sort.Slice(workloads, func(i, j int) bool {
return workloads[i].Id.Name < workloads[j].Id.Name
})
return workloads, nil
}

View File

@ -0,0 +1,260 @@
package endpoints
import (
"context"
"testing"
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
"github.com/hashicorp/consul/internal/catalog/internal/types"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/resource"
rtest "github.com/hashicorp/consul/internal/resource/resourcetest"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/proto/private/prototest"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type reconciliationDataSuite struct {
suite.Suite
ctx context.Context
client pbresource.ResourceServiceClient
rt controller.Runtime
apiServiceData *pbcatalog.Service
apiService *pbresource.Resource
apiEndpoints *pbresource.Resource
api1Workload *pbresource.Resource
api2Workload *pbresource.Resource
api123Workload *pbresource.Resource
web1Workload *pbresource.Resource
web2Workload *pbresource.Resource
}
func (suite *reconciliationDataSuite) SetupTest() {
suite.ctx = testutil.TestContext(suite.T())
suite.client = svctest.RunResourceService(suite.T(), types.Register)
suite.rt = controller.Runtime{
Client: suite.client,
Logger: testutil.Logger(suite.T()),
}
suite.apiServiceData = &pbcatalog.Service{
Workloads: &pbcatalog.WorkloadSelector{
// This services selectors are specially crafted to exercise both the
// dedeuplication and sorting behaviors of gatherWorkloadsForService
Prefixes: []string{"api-"},
Names: []string{"api-1", "web-2", "web-1", "api-1", "not-found"},
},
Ports: []*pbcatalog.ServicePort{
{
TargetPort: "http",
Protocol: pbcatalog.Protocol_PROTOCOL_HTTP,
},
},
}
suite.apiService = rtest.Resource(types.ServiceType, "api").
WithData(suite.T(), suite.apiServiceData).
Write(suite.T(), suite.client)
suite.api1Workload = rtest.Resource(types.WorkloadType, "api-1").
WithData(suite.T(), &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "127.0.0.1"},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
Identity: "api",
}).
Write(suite.T(), suite.client)
suite.api2Workload = rtest.Resource(types.WorkloadType, "api-2").
WithData(suite.T(), &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "127.0.0.1"},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
Identity: "api",
}).
Write(suite.T(), suite.client)
suite.api123Workload = rtest.Resource(types.WorkloadType, "api-123").
WithData(suite.T(), &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "127.0.0.1"},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
Identity: "api",
}).
Write(suite.T(), suite.client)
suite.web1Workload = rtest.Resource(types.WorkloadType, "web-1").
WithData(suite.T(), &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "127.0.0.1"},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
Identity: "web",
}).
Write(suite.T(), suite.client)
suite.web2Workload = rtest.Resource(types.WorkloadType, "web-2").
WithData(suite.T(), &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "127.0.0.1"},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
Identity: "web",
}).
Write(suite.T(), suite.client)
suite.apiEndpoints = rtest.Resource(types.ServiceEndpointsType, "api").
WithData(suite.T(), &pbcatalog.ServiceEndpoints{
Endpoints: []*pbcatalog.Endpoint{
{
TargetRef: rtest.Resource(types.WorkloadType, "api-1").ID(),
Addresses: []*pbcatalog.WorkloadAddress{
{
Host: "127.0.0.1",
Ports: []string{"http"},
},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
HealthStatus: pbcatalog.Health_HEALTH_PASSING,
},
},
}).
Write(suite.T(), suite.client)
}
func (suite *reconciliationDataSuite) TestGetServiceData_NotFound() {
// This test's purposes is to ensure that NotFound errors when retrieving
// the service data are ignored properly.
data, err := getServiceData(suite.ctx, suite.rt, rtest.Resource(types.ServiceType, "not-found").ID())
require.NoError(suite.T(), err)
require.Nil(suite.T(), data)
}
func (suite *reconciliationDataSuite) TestGetServiceData_ReadError() {
// This test's purpose is to ensure that Read errors other than NotFound
// are propagated back to the caller. Specifying a resource ID with an
// unregistered type is the easiest way to force a resource service error.
badType := &pbresource.Type{
Group: "not",
Kind: "found",
GroupVersion: "vfake",
}
data, err := getServiceData(suite.ctx, suite.rt, rtest.Resource(badType, "foo").ID())
require.Error(suite.T(), err)
require.Equal(suite.T(), codes.InvalidArgument, status.Code(err))
require.Nil(suite.T(), data)
}
func (suite *reconciliationDataSuite) TestGetServiceData_UnmarshalError() {
// This test's purpose is to ensure that unmarshlling errors are returned
// to the caller. We are using a resource id that points to an endpoints
// object instead of a service to ensure that the data will be unmarshallable.
data, err := getServiceData(suite.ctx, suite.rt, rtest.Resource(types.ServiceEndpointsType, "api").ID())
require.Error(suite.T(), err)
var parseErr resource.ErrDataParse
require.ErrorAs(suite.T(), err, &parseErr)
require.Nil(suite.T(), data)
}
func (suite *reconciliationDataSuite) TestGetServiceData_Ok() {
// This test's purpose is to ensure that the happy path for
// retrieving a service works as expected.
data, err := getServiceData(suite.ctx, suite.rt, suite.apiService.Id)
require.NoError(suite.T(), err)
require.NotNil(suite.T(), data)
require.NotNil(suite.T(), data.resource)
prototest.AssertDeepEqual(suite.T(), suite.apiService.Id, data.resource.Id)
require.Len(suite.T(), data.service.Ports, 1)
}
func (suite *reconciliationDataSuite) TestGetEndpointsData_NotFound() {
// This test's purposes is to ensure that NotFound errors when retrieving
// the endpoint data are ignored properly.
data, err := getEndpointsData(suite.ctx, suite.rt, rtest.Resource(types.ServiceEndpointsType, "not-found").ID())
require.NoError(suite.T(), err)
require.Nil(suite.T(), data)
}
func (suite *reconciliationDataSuite) TestGetEndpointsData_ReadError() {
// This test's purpose is to ensure that Read errors other than NotFound
// are propagated back to the caller. Specifying a resource ID with an
// unregistered type is the easiest way to force a resource service error.
badType := &pbresource.Type{
Group: "not",
Kind: "found",
GroupVersion: "vfake",
}
data, err := getEndpointsData(suite.ctx, suite.rt, rtest.Resource(badType, "foo").ID())
require.Error(suite.T(), err)
require.Equal(suite.T(), codes.InvalidArgument, status.Code(err))
require.Nil(suite.T(), data)
}
func (suite *reconciliationDataSuite) TestGetEndpointsData_UnmarshalError() {
// This test's purpose is to ensure that unmarshlling errors are returned
// to the caller. We are using a resource id that points to a service object
// instead of an endpoints object to ensure that the data will be unmarshallable.
data, err := getEndpointsData(suite.ctx, suite.rt, rtest.Resource(types.ServiceType, "api").ID())
require.Error(suite.T(), err)
var parseErr resource.ErrDataParse
require.ErrorAs(suite.T(), err, &parseErr)
require.Nil(suite.T(), data)
}
func (suite *reconciliationDataSuite) TestGetEndpointsData_Ok() {
// This test's purpose is to ensure that the happy path for
// retrieving an endpoints object works as expected.
data, err := getEndpointsData(suite.ctx, suite.rt, suite.apiEndpoints.Id)
require.NoError(suite.T(), err)
require.NotNil(suite.T(), data)
require.NotNil(suite.T(), data.resource)
prototest.AssertDeepEqual(suite.T(), suite.apiEndpoints.Id, data.resource.Id)
require.Len(suite.T(), data.endpoints.Endpoints, 1)
}
func (suite *reconciliationDataSuite) TestGetWorkloadData() {
// This test's purpose is to ensure that gather workloads for
// a service work as expected. The services selector was crafted
// to exercise the deduplication behavior as well as the sorting
// behavior. The assertions in this test will verify that only
// unique workloads are returned and that they are ordered.
data, err := getWorkloadData(suite.ctx, suite.rt, &serviceData{
resource: suite.apiService,
service: suite.apiServiceData,
})
require.NoError(suite.T(), err)
require.Len(suite.T(), data, 5)
prototest.AssertDeepEqual(suite.T(), suite.api1Workload, data[0].resource)
prototest.AssertDeepEqual(suite.T(), suite.api123Workload, data[1].resource)
prototest.AssertDeepEqual(suite.T(), suite.api2Workload, data[2].resource)
prototest.AssertDeepEqual(suite.T(), suite.web1Workload, data[3].resource)
prototest.AssertDeepEqual(suite.T(), suite.web2Workload, data[4].resource)
}
func TestReconciliationData(t *testing.T) {
suite.Run(t, new(reconciliationDataSuite))
}

View File

@ -0,0 +1,33 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package endpoints
import "github.com/hashicorp/consul/proto-public/pbresource"
const (
StatusKey = "consul.io/endpoint-manager"
StatusConditionEndpointsManaged = "EndpointsManaged"
StatusReasonSelectorNotFound = "SelectorNotFound"
StatusReasonSelectorFound = "SelectorFound"
SelectorFoundMessage = "A valid workload selector is present within the service."
SelectorNotFoundMessage = "Either the workload selector was not present or contained no selection criteria."
)
var (
ConditionManaged = &pbresource.Condition{
Type: StatusConditionEndpointsManaged,
State: pbresource.Condition_STATE_TRUE,
Reason: StatusReasonSelectorFound,
Message: SelectorFoundMessage,
}
ConditionUnmanaged = &pbresource.Condition{
Type: StatusConditionEndpointsManaged,
State: pbresource.Condition_STATE_FALSE,
Reason: StatusReasonSelectorNotFound,
Message: SelectorNotFoundMessage,
}
)

View File

@ -50,22 +50,10 @@ func (r *nodeHealthReconciler) Reconcile(ctx context.Context, rt controller.Runt
return err return err
} }
message := NodeHealthyMessage
statusState := pbresource.Condition_STATE_TRUE
if health != pbcatalog.Health_HEALTH_PASSING {
statusState = pbresource.Condition_STATE_FALSE
message = NodeUnhealthyMessage
}
newStatus := &pbresource.Status{ newStatus := &pbresource.Status{
ObservedGeneration: res.Generation, ObservedGeneration: res.Generation,
Conditions: []*pbresource.Condition{ Conditions: []*pbresource.Condition{
{ Conditions[health],
Type: StatusConditionHealthy,
State: statusState,
Reason: health.String(),
Message: message,
},
}, },
} }

View File

@ -3,6 +3,11 @@
package nodehealth package nodehealth
import (
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
const ( const (
StatusKey = "consul.io/node-health" StatusKey = "consul.io/node-health"
StatusConditionHealthy = "healthy" StatusConditionHealthy = "healthy"
@ -10,3 +15,40 @@ const (
NodeHealthyMessage = "All node health checks are passing" NodeHealthyMessage = "All node health checks are passing"
NodeUnhealthyMessage = "One or more node health checks are not passing" NodeUnhealthyMessage = "One or more node health checks are not passing"
) )
var (
ConditionPassing = &pbresource.Condition{
Type: StatusConditionHealthy,
State: pbresource.Condition_STATE_TRUE,
Reason: pbcatalog.Health_HEALTH_PASSING.String(),
Message: NodeHealthyMessage,
}
ConditionWarning = &pbresource.Condition{
Type: StatusConditionHealthy,
State: pbresource.Condition_STATE_FALSE,
Reason: pbcatalog.Health_HEALTH_WARNING.String(),
Message: NodeUnhealthyMessage,
}
ConditionCritical = &pbresource.Condition{
Type: StatusConditionHealthy,
State: pbresource.Condition_STATE_FALSE,
Reason: pbcatalog.Health_HEALTH_CRITICAL.String(),
Message: NodeUnhealthyMessage,
}
ConditionMaintenance = &pbresource.Condition{
Type: StatusConditionHealthy,
State: pbresource.Condition_STATE_FALSE,
Reason: pbcatalog.Health_HEALTH_MAINTENANCE.String(),
Message: NodeUnhealthyMessage,
}
Conditions = map[pbcatalog.Health]*pbresource.Condition{
pbcatalog.Health_HEALTH_PASSING: ConditionPassing,
pbcatalog.Health_HEALTH_WARNING: ConditionWarning,
pbcatalog.Health_HEALTH_CRITICAL: ConditionCritical,
pbcatalog.Health_HEALTH_MAINTENANCE: ConditionMaintenance,
}
)

View File

@ -4,6 +4,7 @@
package controllers package controllers
import ( import (
"github.com/hashicorp/consul/internal/catalog/internal/controllers/endpoints"
"github.com/hashicorp/consul/internal/catalog/internal/controllers/nodehealth" "github.com/hashicorp/consul/internal/catalog/internal/controllers/nodehealth"
"github.com/hashicorp/consul/internal/catalog/internal/controllers/workloadhealth" "github.com/hashicorp/consul/internal/catalog/internal/controllers/workloadhealth"
"github.com/hashicorp/consul/internal/controller" "github.com/hashicorp/consul/internal/controller"
@ -11,9 +12,11 @@ import (
type Dependencies struct { type Dependencies struct {
WorkloadHealthNodeMapper workloadhealth.NodeMapper WorkloadHealthNodeMapper workloadhealth.NodeMapper
EndpointsWorkloadMapper endpoints.WorkloadMapper
} }
func Register(mgr *controller.Manager, deps Dependencies) { func Register(mgr *controller.Manager, deps Dependencies) {
mgr.Register(nodehealth.NodeHealthController()) mgr.Register(nodehealth.NodeHealthController())
mgr.Register(workloadhealth.WorkloadHealthController(deps.WorkloadHealthNodeMapper)) mgr.Register(workloadhealth.WorkloadHealthController(deps.WorkloadHealthNodeMapper))
mgr.Register(endpoints.ServiceEndpointsController(deps.EndpointsWorkloadMapper))
} }

View File

@ -58,7 +58,7 @@ type workloadHealthReconciler struct {
} }
func (r *workloadHealthReconciler) Reconcile(ctx context.Context, rt controller.Runtime, req controller.Request) error { func (r *workloadHealthReconciler) Reconcile(ctx context.Context, rt controller.Runtime, req controller.Request) error {
// The runtime is passed by value so replacing it here for the remaineder of this // The runtime is passed by value so replacing it here for the remainder of this
// reconciliation request processing will not affect future invocations. // reconciliation request processing will not affect future invocations.
rt.Logger = rt.Logger.With("resource-id", req.ID, "controller", StatusKey) rt.Logger = rt.Logger.With("resource-id", req.ID, "controller", StatusKey)
@ -90,6 +90,12 @@ func (r *workloadHealthReconciler) Reconcile(ctx context.Context, rt controller.
if workload.NodeName != "" { if workload.NodeName != "" {
nodeID := r.nodeMap.NodeIDFromWorkload(res, &workload) nodeID := r.nodeMap.NodeIDFromWorkload(res, &workload)
r.nodeMap.TrackWorkload(res.Id, nodeID) r.nodeMap.TrackWorkload(res.Id, nodeID)
// It is important that getting the nodes health happens after tracking the
// Workload with the node mapper. If the order were reversed we could
// potentially miss events for data that changes after we read the node but
// before we configured the node mapper to map subsequent events to this
// workload.
nodeHealth, err = getNodeHealth(ctx, rt, nodeID) nodeHealth, err = getNodeHealth(ctx, rt, nodeID)
if err != nil { if err != nil {
rt.Logger.Error("error looking up node health", "error", err, "node-id", nodeID) rt.Logger.Error("error looking up node health", "error", err, "node-id", nodeID)
@ -114,33 +120,15 @@ func (r *workloadHealthReconciler) Reconcile(ctx context.Context, rt controller.
health = workloadHealth health = workloadHealth
} }
statusState := pbresource.Condition_STATE_TRUE condition := WorkloadConditions[workloadHealth]
if health != pbcatalog.Health_HEALTH_PASSING {
statusState = pbresource.Condition_STATE_FALSE
}
message := WorkloadHealthyMessage
if workload.NodeName != "" { if workload.NodeName != "" {
message = NodeAndWorkloadHealthyMessage condition = NodeAndWorkloadConditions[workloadHealth][nodeHealth]
}
switch {
case workloadHealth != pbcatalog.Health_HEALTH_PASSING && nodeHealth != pbcatalog.Health_HEALTH_PASSING:
message = NodeAndWorkloadUnhealthyMessage
case workloadHealth != pbcatalog.Health_HEALTH_PASSING:
message = WorkloadUnhealthyMessage
case nodeHealth != pbcatalog.Health_HEALTH_PASSING:
message = nodehealth.NodeUnhealthyMessage
} }
newStatus := &pbresource.Status{ newStatus := &pbresource.Status{
ObservedGeneration: res.Generation, ObservedGeneration: res.Generation,
Conditions: []*pbresource.Condition{ Conditions: []*pbresource.Condition{
{ condition,
Type: StatusConditionHealthy,
State: statusState,
Reason: health.String(),
Message: message,
},
}, },
} }

View File

@ -1,5 +1,11 @@
package workloadhealth package workloadhealth
import (
"github.com/hashicorp/consul/internal/catalog/internal/controllers/nodehealth"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
const ( const (
StatusKey = "consul.io/workload-health" StatusKey = "consul.io/workload-health"
StatusConditionHealthy = "healthy" StatusConditionHealthy = "healthy"
@ -9,3 +15,122 @@ const (
NodeAndWorkloadUnhealthyMessage = "One or more workload and node health checks are not passing" NodeAndWorkloadUnhealthyMessage = "One or more workload and node health checks are not passing"
WorkloadUnhealthyMessage = "One or more workload health checks are not passing" WorkloadUnhealthyMessage = "One or more workload health checks are not passing"
) )
var (
ConditionWorkloadPassing = &pbresource.Condition{
Type: StatusConditionHealthy,
State: pbresource.Condition_STATE_TRUE,
Reason: pbcatalog.Health_HEALTH_PASSING.String(),
Message: WorkloadHealthyMessage,
}
ConditionWorkloadWarning = &pbresource.Condition{
Type: StatusConditionHealthy,
State: pbresource.Condition_STATE_FALSE,
Reason: pbcatalog.Health_HEALTH_WARNING.String(),
Message: WorkloadUnhealthyMessage,
}
ConditionWorkloadCritical = &pbresource.Condition{
Type: StatusConditionHealthy,
State: pbresource.Condition_STATE_FALSE,
Reason: pbcatalog.Health_HEALTH_CRITICAL.String(),
Message: WorkloadUnhealthyMessage,
}
ConditionWorkloadMaintenance = &pbresource.Condition{
Type: StatusConditionHealthy,
State: pbresource.Condition_STATE_FALSE,
Reason: pbcatalog.Health_HEALTH_MAINTENANCE.String(),
Message: WorkloadUnhealthyMessage,
}
ConditionNodeAndWorkloadPassing = &pbresource.Condition{
Type: StatusConditionHealthy,
State: pbresource.Condition_STATE_TRUE,
Reason: pbcatalog.Health_HEALTH_PASSING.String(),
Message: NodeAndWorkloadHealthyMessage,
}
ConditionNodeAndWorkloadWarning = &pbresource.Condition{
Type: StatusConditionHealthy,
State: pbresource.Condition_STATE_FALSE,
Reason: pbcatalog.Health_HEALTH_WARNING.String(),
Message: NodeAndWorkloadUnhealthyMessage,
}
ConditionNodeAndWorkloadCritical = &pbresource.Condition{
Type: StatusConditionHealthy,
State: pbresource.Condition_STATE_FALSE,
Reason: pbcatalog.Health_HEALTH_CRITICAL.String(),
Message: NodeAndWorkloadUnhealthyMessage,
}
ConditionNodeAndWorkloadMaintenance = &pbresource.Condition{
Type: StatusConditionHealthy,
State: pbresource.Condition_STATE_FALSE,
Reason: pbcatalog.Health_HEALTH_MAINTENANCE.String(),
Message: NodeAndWorkloadUnhealthyMessage,
}
ConditionNodeWarning = &pbresource.Condition{
Type: StatusConditionHealthy,
State: pbresource.Condition_STATE_FALSE,
Reason: pbcatalog.Health_HEALTH_WARNING.String(),
Message: nodehealth.NodeUnhealthyMessage,
}
ConditionNodeCritical = &pbresource.Condition{
Type: StatusConditionHealthy,
State: pbresource.Condition_STATE_FALSE,
Reason: pbcatalog.Health_HEALTH_CRITICAL.String(),
Message: nodehealth.NodeUnhealthyMessage,
}
ConditionNodeMaintenance = &pbresource.Condition{
Type: StatusConditionHealthy,
State: pbresource.Condition_STATE_FALSE,
Reason: pbcatalog.Health_HEALTH_MAINTENANCE.String(),
Message: nodehealth.NodeUnhealthyMessage,
}
// WorkloadConditions is a map of the workloadhealth to the status condition
// used to represent that health.
WorkloadConditions = map[pbcatalog.Health]*pbresource.Condition{
pbcatalog.Health_HEALTH_PASSING: ConditionWorkloadPassing,
pbcatalog.Health_HEALTH_WARNING: ConditionWorkloadWarning,
pbcatalog.Health_HEALTH_CRITICAL: ConditionWorkloadCritical,
pbcatalog.Health_HEALTH_MAINTENANCE: ConditionWorkloadMaintenance,
}
// NodeAndWorkloadConditions is a map whose ultimate values are the status conditions
// used to represent the combined health of a workload and its associated node.
// The outer map's keys are the workloads health and the inner maps keys are the nodes
// health
NodeAndWorkloadConditions = map[pbcatalog.Health]map[pbcatalog.Health]*pbresource.Condition{
pbcatalog.Health_HEALTH_PASSING: {
pbcatalog.Health_HEALTH_PASSING: ConditionNodeAndWorkloadPassing,
pbcatalog.Health_HEALTH_WARNING: ConditionNodeWarning,
pbcatalog.Health_HEALTH_CRITICAL: ConditionNodeCritical,
pbcatalog.Health_HEALTH_MAINTENANCE: ConditionNodeMaintenance,
},
pbcatalog.Health_HEALTH_WARNING: {
pbcatalog.Health_HEALTH_PASSING: ConditionWorkloadWarning,
pbcatalog.Health_HEALTH_WARNING: ConditionNodeAndWorkloadWarning,
pbcatalog.Health_HEALTH_CRITICAL: ConditionNodeAndWorkloadCritical,
pbcatalog.Health_HEALTH_MAINTENANCE: ConditionNodeAndWorkloadMaintenance,
},
pbcatalog.Health_HEALTH_CRITICAL: {
pbcatalog.Health_HEALTH_PASSING: ConditionWorkloadCritical,
pbcatalog.Health_HEALTH_WARNING: ConditionNodeAndWorkloadCritical,
pbcatalog.Health_HEALTH_CRITICAL: ConditionNodeAndWorkloadCritical,
pbcatalog.Health_HEALTH_MAINTENANCE: ConditionNodeAndWorkloadMaintenance,
},
pbcatalog.Health_HEALTH_MAINTENANCE: {
pbcatalog.Health_HEALTH_PASSING: ConditionWorkloadMaintenance,
pbcatalog.Health_HEALTH_WARNING: ConditionNodeAndWorkloadMaintenance,
pbcatalog.Health_HEALTH_CRITICAL: ConditionNodeAndWorkloadMaintenance,
pbcatalog.Health_HEALTH_MAINTENANCE: ConditionNodeAndWorkloadMaintenance,
},
}
)

View File

@ -0,0 +1,163 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package selectiontracker
import (
"context"
"sync"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/radix"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/lib/stringslice"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
type WorkloadSelectionTracker struct {
lock sync.Mutex
prefixes *radix.Tree[[]controller.Request]
exact *radix.Tree[[]controller.Request]
// workloadSelectors contains a map keyed on resource names with values
// being the selector that resource is currently associated with. This map
// is kept mainly to make tracking removal operations more efficient.
// Generally any operation that could take advantage of knowing where
// in the trees the resource id is referenced can use this to prevent
// needing to search the whole tree.
workloadSelectors map[string]*pbcatalog.WorkloadSelector
}
func New() *WorkloadSelectionTracker {
return &WorkloadSelectionTracker{
prefixes: radix.New[[]controller.Request](),
exact: radix.New[[]controller.Request](),
workloadSelectors: make(map[string]*pbcatalog.WorkloadSelector),
}
}
// MapWorkload will return a slice of controller.Requests with 1 resource for
// each resource that selects the specified Workload resource.
func (t *WorkloadSelectionTracker) MapWorkload(_ context.Context, _ controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) {
t.lock.Lock()
defer t.lock.Unlock()
var reqs []controller.Request
// gather the list of all resources that select the specified workload using a prefix match
t.prefixes.WalkPath(res.Id.Name, func(path string, requests []controller.Request) bool {
reqs = append(reqs, requests...)
return false
})
// gather the list of all resources that select the specified workload using an exact match
exactReqs, _ := t.exact.Get(res.Id.Name)
// return the combined list of all resources that select the specified workload
return append(reqs, exactReqs...), nil
}
// TrackIDForSelector will associate workloads matching the specified workload
// selector with the given resource id.
func (t *WorkloadSelectionTracker) TrackIDForSelector(id *pbresource.ID, selector *pbcatalog.WorkloadSelector) {
t.lock.Lock()
defer t.lock.Unlock()
if previousSelector, found := t.workloadSelectors[id.Name]; found {
if stringslice.Equal(previousSelector.Names, selector.Names) &&
stringslice.Equal(previousSelector.Prefixes, selector.Prefixes) {
// the selector is unchanged so do nothing
return
}
// Potentially we could detect differences and do more minimal work. However
// users are not expected to alter workload selectors often and therefore
// not optimizing this further is probably fine. Therefore we are going
// to wipe all tracking of the id and reinsert things.
t.untrackID(id)
}
// loop over all the exact matching rules and associate those workload names
// with the given resource id
for _, name := range selector.GetNames() {
// lookup any resource id associations for the given workload name
leaf, _ := t.exact.Get(name)
// append the ID to the existing request list
t.exact.Insert(name, append(leaf, controller.Request{ID: id}))
}
// loop over all the prefix matching rules and associate those prefixes
// with the given resource id.
for _, prefix := range selector.GetPrefixes() {
// lookup any resource id associations for the given workload name prefix
leaf, _ := t.prefixes.Get(prefix)
// append the new resource ID to the existing request list
t.prefixes.Insert(prefix, append(leaf, controller.Request{ID: id}))
}
t.workloadSelectors[id.Name] = selector
}
// UntrackID causes the tracker to stop tracking the given resource ID
func (t *WorkloadSelectionTracker) UntrackID(id *pbresource.ID) {
t.lock.Lock()
defer t.lock.Unlock()
t.untrackID(id)
}
// untrackID should be called to stop tracking a resource ID.
// This method assumes the lock is already held. Besides modifying
// the prefix & name trees to not reference this ID, it will also
// delete any corresponding entry within the workloadSelectors map
func (t *WorkloadSelectionTracker) untrackID(id *pbresource.ID) {
selector, found := t.workloadSelectors[id.Name]
if !found {
return
}
removeIDFromTreeAtPaths(t.exact, id, selector.Names)
removeIDFromTreeAtPaths(t.prefixes, id, selector.Prefixes)
// If we don't do this deletion then reinsertion of the id for
// tracking in the future could prevent selection criteria from
// being properly inserted into the radix trees.
delete(t.workloadSelectors, id.Name)
}
// removeIDFromTree will remove the given resource ID from all leaf nodes in the radix tree.
func removeIDFromTreeAtPaths(t *radix.Tree[[]controller.Request], id *pbresource.ID, paths []string) {
for _, path := range paths {
requests, _ := t.Get(path)
foundIdx := -1
for idx, req := range requests {
if resource.EqualID(req.ID, id) {
foundIdx = idx
break
}
}
if foundIdx != -1 {
l := len(requests)
if l == 1 {
requests = nil
} else if foundIdx == l-1 {
requests = requests[:foundIdx]
} else if foundIdx == 0 {
requests = requests[1:]
} else {
requests = append(requests[:foundIdx], requests[foundIdx+1:]...)
}
if len(requests) > 1 {
t.Insert(path, requests)
} else {
t.Delete(path)
}
}
}
}

View File

@ -0,0 +1,275 @@
package selectiontracker
import (
"context"
"testing"
"github.com/hashicorp/consul/internal/catalog/internal/types"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/radix"
rtest "github.com/hashicorp/consul/internal/resource/resourcetest"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/proto/private/prototest"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
)
var (
workloadData = &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{
{
Host: "198.18.0.1",
},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {
Port: 8080,
Protocol: pbcatalog.Protocol_PROTOCOL_HTTP,
},
},
}
)
func TestRemoveIDFromTreeAtPaths(t *testing.T) {
tree := radix.New[[]controller.Request]()
toRemove := rtest.Resource(types.ServiceEndpointsType, "blah").ID()
other1 := rtest.Resource(types.ServiceEndpointsType, "other1").ID()
other2 := rtest.Resource(types.ServiceEndpointsType, "other1").ID()
// we are trying to create a tree such that removal of the toRemove id causes a
// few things to happen.
//
// * All the slice modification conditions are executed
// - removal from beginning of the list
// - removal from the end of the list
// - removal of only element in the list
// - removal from middle of the list
// * Paths without matching ids are ignored
notMatching := []controller.Request{
{ID: other1},
{ID: other2},
}
matchAtBeginning := []controller.Request{
{ID: toRemove},
{ID: other1},
{ID: other2},
}
matchAtEnd := []controller.Request{
{ID: other1},
{ID: other2},
{ID: toRemove},
}
matchInMiddle := []controller.Request{
{ID: other1},
{ID: toRemove},
{ID: other2},
}
matchOnly := []controller.Request{
{ID: toRemove},
}
tree.Insert("no-match", notMatching)
tree.Insert("match-beginning", matchAtBeginning)
tree.Insert("match-end", matchAtEnd)
tree.Insert("match-middle", matchInMiddle)
tree.Insert("match-only", matchOnly)
removeIDFromTreeAtPaths(tree, toRemove, []string{
"no-match",
"match-beginning",
"match-end",
"match-middle",
"match-only",
})
reqs, found := tree.Get("no-match")
require.True(t, found)
require.Equal(t, notMatching, reqs)
reqs, found = tree.Get("match-beginning")
require.True(t, found)
require.Equal(t, notMatching, reqs)
reqs, found = tree.Get("match-end")
require.True(t, found)
require.Equal(t, notMatching, reqs)
reqs, found = tree.Get("match-middle")
require.True(t, found)
require.Equal(t, notMatching, reqs)
// The last tracked request should cause removal from the tree
_, found = tree.Get("match-only")
require.False(t, found)
}
type selectionTrackerSuite struct {
suite.Suite
rt controller.Runtime
tracker *WorkloadSelectionTracker
workloadAPI1 *pbresource.Resource
workloadWeb1 *pbresource.Resource
endpointsFoo *pbresource.ID
endpointsBar *pbresource.ID
}
func (suite *selectionTrackerSuite) SetupTest() {
suite.tracker = New()
suite.workloadAPI1 = rtest.Resource(types.WorkloadType, "api-1").WithData(suite.T(), workloadData).Build()
suite.workloadWeb1 = rtest.Resource(types.WorkloadType, "web-1").WithData(suite.T(), workloadData).Build()
suite.endpointsFoo = rtest.Resource(types.ServiceEndpointsType, "foo").ID()
suite.endpointsBar = rtest.Resource(types.ServiceEndpointsType, "bar").ID()
}
func (suite *selectionTrackerSuite) requireMappedIDs(workload *pbresource.Resource, ids ...*pbresource.ID) {
suite.T().Helper()
reqs, err := suite.tracker.MapWorkload(context.Background(), suite.rt, workload)
require.NoError(suite.T(), err)
require.Len(suite.T(), reqs, len(ids))
for _, id := range ids {
prototest.AssertContainsElement(suite.T(), reqs, controller.Request{ID: id})
}
}
func (suite *selectionTrackerSuite) TestMapWorkload_Empty() {
// If we aren't tracking anything than the default mapping behavior
// should be to return an empty list of requests.
suite.requireMappedIDs(suite.workloadAPI1)
}
func (suite *selectionTrackerSuite) TestUntrackID_Empty() {
// this test has no assertions but mainly is here to prove that things
// dont explode if this is attempted.
suite.tracker.UntrackID(suite.endpointsFoo)
}
func (suite *selectionTrackerSuite) TestTrackAndMap_SingleResource_MultipleWorkloadMappings() {
// This test aims to prove that tracking a resources workload selector and
// then mapping a workload back to that resource works as expected when the
// result set is a single resource. This test will ensure that both prefix
// and exact match criteria are handle correctly and that one resource
// can be mapped from multiple distinct workloads.
// associate the foo endpoints with some workloads
suite.tracker.TrackIDForSelector(suite.endpointsFoo, &pbcatalog.WorkloadSelector{
Names: []string{"bar", "api", "web-1"},
Prefixes: []string{"api-"},
})
// Ensure that mappings tracked by prefix work.
suite.requireMappedIDs(suite.workloadAPI1, suite.endpointsFoo)
// Ensure that mappings tracked by exact match work.
suite.requireMappedIDs(suite.workloadWeb1, suite.endpointsFoo)
}
func (suite *selectionTrackerSuite) TestTrackAndMap_MultiResource_SingleWorkloadMapping() {
// This test aims to prove that multiple resources selecting of a workload
// will result in multiple requests when mapping that workload.
// associate the foo endpoints with some workloads
suite.tracker.TrackIDForSelector(suite.endpointsFoo, &pbcatalog.WorkloadSelector{
Prefixes: []string{"api-"},
})
// associate the bar endpoints with some workloads
suite.tracker.TrackIDForSelector(suite.endpointsBar, &pbcatalog.WorkloadSelector{
Names: []string{"api-1"},
})
// now the mapping should return both endpoints resource ids
suite.requireMappedIDs(suite.workloadAPI1, suite.endpointsFoo, suite.endpointsBar)
}
func (suite *selectionTrackerSuite) TestDuplicateTracking() {
// This test aims to prove that tracking some ID multiple times doesn't
// result in multiple requests for the same ID
// associate the foo endpoints with some workloads 3 times without changing
// the selection criteria. The second two times should be no-ops
suite.tracker.TrackIDForSelector(suite.endpointsFoo, &pbcatalog.WorkloadSelector{
Prefixes: []string{"api-"},
})
suite.tracker.TrackIDForSelector(suite.endpointsFoo, &pbcatalog.WorkloadSelector{
Prefixes: []string{"api-"},
})
suite.tracker.TrackIDForSelector(suite.endpointsFoo, &pbcatalog.WorkloadSelector{
Prefixes: []string{"api-"},
})
// regardless of the number of times tracked we should only see a single request
suite.requireMappedIDs(suite.workloadAPI1, suite.endpointsFoo)
}
func (suite *selectionTrackerSuite) TestModifyTracking() {
// This test aims to prove that modifying selection criteria for a resource
// works as expected. Adding new criteria results in all being tracked.
// Removal of some criteria does't result in removal of all etc. More or
// less we want to ensure that updating selection criteria leaves the
// tracker in a consistent/expected state.
// track the web-1 workload
suite.tracker.TrackIDForSelector(suite.endpointsFoo, &pbcatalog.WorkloadSelector{
Names: []string{"web-1"},
})
// ensure that api-1 isn't mapped but web-1 is
suite.requireMappedIDs(suite.workloadAPI1)
suite.requireMappedIDs(suite.workloadWeb1, suite.endpointsFoo)
// now also track the api- prefix
suite.tracker.TrackIDForSelector(suite.endpointsFoo, &pbcatalog.WorkloadSelector{
Names: []string{"web-1"},
Prefixes: []string{"api-"},
})
// ensure that both workloads are mapped appropriately
suite.requireMappedIDs(suite.workloadAPI1, suite.endpointsFoo)
suite.requireMappedIDs(suite.workloadWeb1, suite.endpointsFoo)
// now remove the web tracking
suite.tracker.TrackIDForSelector(suite.endpointsFoo, &pbcatalog.WorkloadSelector{
Prefixes: []string{"api-"},
})
// ensure that only api-1 is mapped
suite.requireMappedIDs(suite.workloadAPI1, suite.endpointsFoo)
suite.requireMappedIDs(suite.workloadWeb1)
}
func (suite *selectionTrackerSuite) TestRemove() {
// This test aims to prove that removal of a resource from tracking
// actually prevents subsequent mapping calls from returning the
// workload.
// track the web-1 workload
suite.tracker.TrackIDForSelector(suite.endpointsFoo, &pbcatalog.WorkloadSelector{
Names: []string{"web-1"},
})
// ensure that api-1 isn't mapped but web-1 is
suite.requireMappedIDs(suite.workloadWeb1, suite.endpointsFoo)
// untrack the resource
suite.tracker.UntrackID(suite.endpointsFoo)
// ensure that we no longer map the previous workload to the resource
suite.requireMappedIDs(suite.workloadWeb1)
}
func TestWorkloadSelectionSuite(t *testing.T) {
suite.Run(t, new(selectionTrackerSuite))
}

View File

@ -60,3 +60,12 @@ type errTooMuchMesh struct {
func (err errTooMuchMesh) Error() string { func (err errTooMuchMesh) Error() string {
return fmt.Sprintf("protocol \"mesh\" was specified in more than 1 port: %+v", err.Ports) return fmt.Sprintf("protocol \"mesh\" was specified in more than 1 port: %+v", err.Ports)
} }
type errInvalidEndpointsOwnerName struct {
Name string
OwnerName string
}
func (err errInvalidEndpointsOwnerName) Error() string {
return fmt.Sprintf("invalid owner name %q. ServiceEndpoints objects must be owned by a Service with the same name: %q", err.OwnerName, err.Name)
}

View File

@ -52,6 +52,10 @@ func TestErrorStrings(t *testing.T) {
"errTooMuchMesh": errTooMuchMesh{ "errTooMuchMesh": errTooMuchMesh{
Ports: []string{"http", "grpc"}, Ports: []string{"http", "grpc"},
}, },
"errInvalidEndpointsOwnerName": errInvalidEndpointsOwnerName{
Name: "foo",
OwnerName: "bar",
},
"errNotDNSLabel": errNotDNSLabel, "errNotDNSLabel": errNotDNSLabel,
"errNotIPAddress": errNotIPAddress, "errNotIPAddress": errNotIPAddress,
"errUnixSocketMultiport": errUnixSocketMultiport, "errUnixSocketMultiport": errUnixSocketMultiport,

View File

@ -73,7 +73,7 @@ func ValidateHealthStatus(res *pbresource.Resource) error {
Wrapped: resource.ErrMissing, Wrapped: resource.ErrMissing,
}) })
} else if !resource.EqualType(res.Owner.Type, WorkloadType) && !resource.EqualType(res.Owner.Type, NodeType) { } else if !resource.EqualType(res.Owner.Type, WorkloadType) && !resource.EqualType(res.Owner.Type, NodeType) {
err = multierror.Append(err, resource.ErrOwnerInvalid{ResourceType: res.Id.Type, OwnerType: res.Owner.Type}) err = multierror.Append(err, resource.ErrOwnerTypeInvalid{ResourceType: res.Id.Type, OwnerType: res.Owner.Type})
} }
return err return err

View File

@ -203,11 +203,11 @@ func TestValidateHealthStatus_InvalidOwner(t *testing.T) {
res := createHealthStatusResource(t, data, tcase.owner) res := createHealthStatusResource(t, data, tcase.owner)
err := ValidateHealthStatus(res) err := ValidateHealthStatus(res)
require.Error(t, err) require.Error(t, err)
expected := resource.ErrOwnerInvalid{ expected := resource.ErrOwnerTypeInvalid{
ResourceType: HealthStatusType, ResourceType: HealthStatusType,
OwnerType: tcase.owner.Type, OwnerType: tcase.owner.Type,
} }
var actual resource.ErrOwnerInvalid var actual resource.ErrOwnerTypeInvalid
require.ErrorAs(t, err, &actual) require.ErrorAs(t, err, &actual)
require.Equal(t, expected, actual) require.Equal(t, expected, actual)
}) })

View File

@ -71,7 +71,7 @@ func ValidateService(res *pbresource.Resource) error {
}, },
}, },
}) })
} else { } else if port.VirtualPort != 0 {
usedVirtualPorts[port.VirtualPort] = idx usedVirtualPorts[port.VirtualPort] = idx
} }

View File

@ -30,10 +30,48 @@ func RegisterServiceEndpoints(r resource.Registry) {
r.Register(resource.Registration{ r.Register(resource.Registration{
Type: ServiceEndpointsV1Alpha1Type, Type: ServiceEndpointsV1Alpha1Type,
Proto: &pbcatalog.ServiceEndpoints{}, Proto: &pbcatalog.ServiceEndpoints{},
Validate: nil, Validate: ValidateServiceEndpoints,
Mutate: MutateServiceEndpoints,
}) })
} }
func MutateServiceEndpoints(res *pbresource.Resource) error {
if res.Owner == nil {
res.Owner = &pbresource.ID{
Type: ServiceV1Alpha1Type,
Tenancy: res.Id.Tenancy,
Name: res.Id.Name,
}
}
var err error
if !resource.EqualType(res.Owner.Type, ServiceV1Alpha1Type) {
err = multierror.Append(err, resource.ErrOwnerTypeInvalid{
ResourceType: ServiceEndpointsV1Alpha1Type,
OwnerType: res.Owner.Type,
})
}
if !resource.EqualTenancy(res.Owner.Tenancy, res.Id.Tenancy) {
err = multierror.Append(err, resource.ErrOwnerTenantInvalid{
ResourceTenancy: res.Id.Tenancy,
OwnerTenancy: res.Owner.Tenancy,
})
}
if res.Owner.Name != res.Id.Name {
err = multierror.Append(err, resource.ErrInvalidField{
Name: "name",
Wrapped: errInvalidEndpointsOwnerName{
Name: res.Id.Name,
OwnerName: res.Owner.Name,
},
})
}
return err
}
func ValidateServiceEndpoints(res *pbresource.Resource) error { func ValidateServiceEndpoints(res *pbresource.Resource) error {
var svcEndpoints pbcatalog.ServiceEndpoints var svcEndpoints pbcatalog.ServiceEndpoints

View File

@ -45,6 +45,18 @@ func TestValidateService_Ok(t *testing.T) {
VirtualPort: 42, VirtualPort: 42,
Protocol: pbcatalog.Protocol_PROTOCOL_HTTP, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP,
}, },
{
TargetPort: "other",
// leaving VirtualPort unset to verify that seeing
// a zero virtual port multiple times is fine.
Protocol: pbcatalog.Protocol_PROTOCOL_HTTP2,
},
{
TargetPort: "other2",
// leaving VirtualPort unset to verify that seeing
// a zero virtual port multiple times is fine.
Protocol: pbcatalog.Protocol_PROTOCOL_GRPC,
},
}, },
VirtualIps: []string{"198.18.0.1"}, VirtualIps: []string{"198.18.0.1"},
} }

View File

@ -0,0 +1 @@
invalid owner name "bar". ServiceEndpoints objects must be owned by a Service with the same name: "foo"

View File

@ -152,46 +152,6 @@ type Reconciler interface {
Reconcile(ctx context.Context, rt Runtime, req Request) error Reconcile(ctx context.Context, rt Runtime, req Request) error
} }
// DependencyMapper is called when a dependency watched via WithWatch is changed
// to determine which of the controller's managed resources need to be reconciled.
type DependencyMapper func(
ctx context.Context,
rt Runtime,
res *pbresource.Resource,
) ([]Request, error)
// MapOwner implements a DependencyMapper that returns the updated resource's owner.
func MapOwner(_ context.Context, _ Runtime, res *pbresource.Resource) ([]Request, error) {
var reqs []Request
if res.Owner != nil {
reqs = append(reqs, Request{ID: res.Owner})
}
return reqs, nil
}
func MapOwnerFiltered(filter *pbresource.Type) DependencyMapper {
return func(_ context.Context, _ Runtime, res *pbresource.Resource) ([]Request, error) {
if res.Owner == nil {
return nil, nil
}
ownerType := res.Owner.GetType()
if ownerType.Group != filter.Group {
return nil, nil
}
if ownerType.GroupVersion != filter.GroupVersion {
return nil, nil
}
if ownerType.Kind != filter.Kind {
return nil, nil
}
return []Request{{ID: res.Owner}}, nil
}
}
// Placement determines where and how many replicas of the controller will run. // Placement determines where and how many replicas of the controller will run.
type Placement int type Placement int

View File

@ -9,7 +9,6 @@ import (
"testing" "testing"
"time" "time"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
@ -267,76 +266,3 @@ func testContext(t *testing.T) context.Context {
return ctx return ctx
} }
func resourceID(group string, version string, kind string, name string) *pbresource.ID {
return &pbresource.ID{
Type: &pbresource.Type{
Group: group,
GroupVersion: version,
Kind: kind,
},
Tenancy: &pbresource.Tenancy{
Partition: "default",
Namespace: "default",
PeerName: "local",
},
Name: name,
}
}
func TestMapOwnerFiltered(t *testing.T) {
mapper := controller.MapOwnerFiltered(&pbresource.Type{
Group: "foo",
GroupVersion: "v1",
Kind: "bar",
})
type testCase struct {
owner *pbresource.ID
matches bool
}
cases := map[string]testCase{
"nil-owner": {
owner: nil,
matches: false,
},
"group-mismatch": {
owner: resourceID("other", "v1", "bar", "irrelevant"),
matches: false,
},
"group-version-mismatch": {
owner: resourceID("foo", "v2", "bar", "irrelevant"),
matches: false,
},
"kind-mismatch": {
owner: resourceID("foo", "v1", "baz", "irrelevant"),
matches: false,
},
"match": {
owner: resourceID("foo", "v1", "bar", "irrelevant"),
matches: true,
},
}
for name, tcase := range cases {
t.Run(name, func(t *testing.T) {
// the runtime is not used by the mapper so its fine to pass an empty struct
req, err := mapper(context.Background(), controller.Runtime{}, &pbresource.Resource{
Id: resourceID("foo", "v1", "other", "x"),
Owner: tcase.owner,
})
// The mapper has no error paths at present
require.NoError(t, err)
if tcase.matches {
require.NotNil(t, req)
require.Len(t, req, 1)
prototest.AssertDeepEqual(t, req[0].ID, tcase.owner, cmpopts.EquateEmpty())
} else {
require.Nil(t, req)
}
})
}
}

View File

@ -0,0 +1,58 @@
package controller
import (
"context"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/proto-public/pbresource"
)
// DependencyMapper is called when a dependency watched via WithWatch is changed
// to determine which of the controller's managed resources need to be reconciled.
type DependencyMapper func(
ctx context.Context,
rt Runtime,
res *pbresource.Resource,
) ([]Request, error)
// MapOwner implements a DependencyMapper that returns the updated resource's owner.
func MapOwner(_ context.Context, _ Runtime, res *pbresource.Resource) ([]Request, error) {
var reqs []Request
if res.Owner != nil {
reqs = append(reqs, Request{ID: res.Owner})
}
return reqs, nil
}
// MapOwnerFiltered creates a DependencyMapper that returns owner IDs as Requests
// if the type of the owner ID matches the given filter type.
func MapOwnerFiltered(filter *pbresource.Type) DependencyMapper {
return func(_ context.Context, _ Runtime, res *pbresource.Resource) ([]Request, error) {
if res.Owner == nil {
return nil, nil
}
if !resource.EqualType(res.Owner.GetType(), filter) {
return nil, nil
}
return []Request{{ID: res.Owner}}, nil
}
}
// ReplaceType creates a DependencyMapper that returns request IDs with the same
// name and tenancy as the original resource but with the type replaced with
// the type specified as this functions parameter.
func ReplaceType(desiredType *pbresource.Type) DependencyMapper {
return func(_ context.Context, _ Runtime, res *pbresource.Resource) ([]Request, error) {
return []Request{
{
ID: &pbresource.ID{
Type: desiredType,
Tenancy: res.Id.Tenancy,
Name: res.Id.Name,
},
},
}, nil
}
}

View File

@ -0,0 +1,137 @@
package controller
import (
"context"
"testing"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/proto/private/prototest"
"github.com/stretchr/testify/require"
)
func resourceID(group string, version string, kind string, name string) *pbresource.ID {
return &pbresource.ID{
Type: &pbresource.Type{
Group: group,
GroupVersion: version,
Kind: kind,
},
Tenancy: &pbresource.Tenancy{
Partition: "default",
Namespace: "default",
PeerName: "local",
},
Name: name,
}
}
func TestMapOwner(t *testing.T) {
owner := resourceID("foo", "v99", "bar", "object")
res := &pbresource.Resource{
Id: resourceID("something", "v1", "else", "x"),
Owner: owner,
}
reqs, err := MapOwner(context.Background(), Runtime{}, res)
require.NoError(t, err)
require.Len(t, reqs, 1)
prototest.AssertDeepEqual(t, owner, reqs[0].ID)
}
func TestMapOwnerFiltered(t *testing.T) {
mapper := MapOwnerFiltered(&pbresource.Type{
Group: "foo",
GroupVersion: "v1",
Kind: "bar",
})
type testCase struct {
owner *pbresource.ID
matches bool
}
cases := map[string]testCase{
"nil-owner": {
owner: nil,
matches: false,
},
"group-mismatch": {
owner: resourceID("other", "v1", "bar", "irrelevant"),
matches: false,
},
"group-version-mismatch": {
owner: resourceID("foo", "v2", "bar", "irrelevant"),
matches: false,
},
"kind-mismatch": {
owner: resourceID("foo", "v1", "baz", "irrelevant"),
matches: false,
},
"match": {
owner: resourceID("foo", "v1", "bar", "irrelevant"),
matches: true,
},
}
for name, tcase := range cases {
t.Run(name, func(t *testing.T) {
// the runtime is not used by the mapper so its fine to pass an empty struct
req, err := mapper(context.Background(), Runtime{}, &pbresource.Resource{
Id: resourceID("foo", "v1", "other", "x"),
Owner: tcase.owner,
})
// The mapper has no error paths at present
require.NoError(t, err)
if tcase.matches {
require.NotNil(t, req)
require.Len(t, req, 1)
prototest.AssertDeepEqual(t, req[0].ID, tcase.owner, cmpopts.EquateEmpty())
} else {
require.Nil(t, req)
}
})
}
}
func TestReplaceType(t *testing.T) {
rtype := &pbresource.Type{
Group: "foo",
GroupVersion: "v1",
Kind: "bar",
}
tenant := &pbresource.Tenancy{
Partition: "not",
Namespace: "using",
PeerName: "the-defaults",
}
in := &pbresource.Resource{
Id: &pbresource.ID{
Type: &pbresource.Type{
Group: "other",
GroupVersion: "v2",
Kind: "baz",
},
Tenancy: tenant,
Name: "arr-matey",
},
}
mapper := ReplaceType(rtype)
reqs, err := mapper(nil, Runtime{}, in)
require.NoError(t, err)
require.Len(t, reqs, 1)
expected := &pbresource.ID{
Type: rtype,
Tenancy: tenant,
Name: "arr-matey",
}
prototest.AssertDeepEqual(t, expected, reqs[0].ID)
}

4
internal/radix/doc.go Normal file
View File

@ -0,0 +1,4 @@
// This packages contents were originally copied from github.com/armon/go-radix.
// After the intial copy all the data structures were made to use Go 1.18 generics
// instead of relying on the use of interface{} or the any type.
package radix

568
internal/radix/radix.go Normal file
View File

@ -0,0 +1,568 @@
package radix
import (
"sort"
"strings"
)
// WalkFn is used when walking the tree. Takes a
// key and value, returning if iteration should
// be terminated.
type WalkFn[T any] func(s string, v T) bool
// leafNode is used to represent a value
type leafNode[T any] struct {
key string
val T
}
// edge is used to represent an edge node
type edge[T any] struct {
label byte
node *node[T]
}
type node[T any] struct {
// leaf is used to store possible leaf
leaf *leafNode[T]
// prefix is the common prefix we ignore
prefix string
// Edges should be stored in-order for iteration.
// We avoid a fully materialized slice to save memory,
// since in most cases we expect to be sparse
edges edges[T]
}
func (n *node[T]) isLeaf() bool {
return n.leaf != nil
}
func (n *node[T]) addEdge(e edge[T]) {
num := len(n.edges)
idx := sort.Search(num, func(i int) bool {
return n.edges[i].label >= e.label
})
n.edges = append(n.edges, edge[T]{})
copy(n.edges[idx+1:], n.edges[idx:])
n.edges[idx] = e
}
func (n *node[T]) updateEdge(label byte, node *node[T]) {
num := len(n.edges)
idx := sort.Search(num, func(i int) bool {
return n.edges[i].label >= label
})
if idx < num && n.edges[idx].label == label {
n.edges[idx].node = node
return
}
panic("replacing missing edge")
}
func (n *node[T]) getEdge(label byte) *node[T] {
num := len(n.edges)
idx := sort.Search(num, func(i int) bool {
return n.edges[i].label >= label
})
if idx < num && n.edges[idx].label == label {
return n.edges[idx].node
}
return nil
}
func (n *node[T]) delEdge(label byte) {
num := len(n.edges)
idx := sort.Search(num, func(i int) bool {
return n.edges[i].label >= label
})
if idx < num && n.edges[idx].label == label {
copy(n.edges[idx:], n.edges[idx+1:])
n.edges[len(n.edges)-1] = edge[T]{}
n.edges = n.edges[:len(n.edges)-1]
}
}
type edges[T any] []edge[T]
func (e edges[T]) Len() int {
return len(e)
}
func (e edges[T]) Less(i, j int) bool {
return e[i].label < e[j].label
}
func (e edges[T]) Swap(i, j int) {
e[i], e[j] = e[j], e[i]
}
func (e edges[T]) Sort() {
sort.Sort(e)
}
// Tree implements a radix tree. This can be treated as a
// Dictionary abstract data type. The main advantage over
// a standard hash map is prefix-based lookups and
// ordered iteration,
type Tree[T any] struct {
root *node[T]
size int
}
// New returns an empty Tree
func New[T any]() *Tree[T] {
return NewFromMap[T](nil)
}
// NewFromMap returns a new tree containing the keys
// from an existing map
func NewFromMap[T any](m map[string]T) *Tree[T] {
t := &Tree[T]{root: &node[T]{}}
for k, v := range m {
t.Insert(k, v)
}
return t
}
// Len is used to return the number of elements in the tree
func (t *Tree[T]) Len() int {
return t.size
}
// longestPrefix finds the length of the shared prefix
// of two strings
func longestPrefix(k1, k2 string) int {
max := len(k1)
if l := len(k2); l < max {
max = l
}
var i int
for i = 0; i < max; i++ {
if k1[i] != k2[i] {
break
}
}
return i
}
// Insert is used to add a newentry or update
// an existing entry. Returns true if an existing record is updated.
func (t *Tree[T]) Insert(s string, v T) (T, bool) {
var zeroVal T
var parent *node[T]
n := t.root
search := s
for {
// Handle key exhaution
if len(search) == 0 {
if n.isLeaf() {
old := n.leaf.val
n.leaf.val = v
return old, true
}
n.leaf = &leafNode[T]{
key: s,
val: v,
}
t.size++
return zeroVal, false
}
// Look for the edge
parent = n
n = n.getEdge(search[0])
// No edge, create one
if n == nil {
e := edge[T]{
label: search[0],
node: &node[T]{
leaf: &leafNode[T]{
key: s,
val: v,
},
prefix: search,
},
}
parent.addEdge(e)
t.size++
return zeroVal, false
}
// Determine longest prefix of the search key on match
commonPrefix := longestPrefix(search, n.prefix)
if commonPrefix == len(n.prefix) {
search = search[commonPrefix:]
continue
}
// Split the node
t.size++
child := &node[T]{
prefix: search[:commonPrefix],
}
parent.updateEdge(search[0], child)
// Restore the existing node
child.addEdge(edge[T]{
label: n.prefix[commonPrefix],
node: n,
})
n.prefix = n.prefix[commonPrefix:]
// Create a new leaf node
leaf := &leafNode[T]{
key: s,
val: v,
}
// If the new key is a subset, add to this node
search = search[commonPrefix:]
if len(search) == 0 {
child.leaf = leaf
return zeroVal, false
}
// Create a new edge for the node
child.addEdge(edge[T]{
label: search[0],
node: &node[T]{
leaf: leaf,
prefix: search,
},
})
return zeroVal, false
}
}
// Delete is used to delete a key, returning the previous
// value and if it was deleted
func (t *Tree[T]) Delete(s string) (T, bool) {
var zeroVal T
var parent *node[T]
var label byte
n := t.root
search := s
for {
// Check for key exhaution
if len(search) == 0 {
if !n.isLeaf() {
break
}
goto DELETE
}
// Look for an edge
parent = n
label = search[0]
n = n.getEdge(label)
if n == nil {
break
}
// Consume the search prefix
if strings.HasPrefix(search, n.prefix) {
search = search[len(n.prefix):]
} else {
break
}
}
return zeroVal, false
DELETE:
// Delete the leaf
leaf := n.leaf
n.leaf = nil
t.size--
// Check if we should delete this node from the parent
if parent != nil && len(n.edges) == 0 {
parent.delEdge(label)
}
// Check if we should merge this node
if n != t.root && len(n.edges) == 1 {
n.mergeChild()
}
// Check if we should merge the parent's other child
if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() {
parent.mergeChild()
}
return leaf.val, true
}
// DeletePrefix is used to delete the subtree under a prefix
// Returns how many nodes were deleted
// Use this to delete large subtrees efficiently
func (t *Tree[T]) DeletePrefix(s string) int {
return t.deletePrefix(nil, t.root, s)
}
// delete does a recursive deletion
func (t *Tree[T]) deletePrefix(parent, n *node[T], prefix string) int {
// Check for key exhaustion
if len(prefix) == 0 {
// Remove the leaf node
subTreeSize := 0
//recursively walk from all edges of the node to be deleted
recursiveWalk(n, func(s string, v T) bool {
subTreeSize++
return false
})
if n.isLeaf() {
n.leaf = nil
}
n.edges = nil // deletes the entire subtree
// Check if we should merge the parent's other child
if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() {
parent.mergeChild()
}
t.size -= subTreeSize
return subTreeSize
}
// Look for an edge
label := prefix[0]
child := n.getEdge(label)
if child == nil || (!strings.HasPrefix(child.prefix, prefix) && !strings.HasPrefix(prefix, child.prefix)) {
return 0
}
// Consume the search prefix
if len(child.prefix) > len(prefix) {
prefix = prefix[len(prefix):]
} else {
prefix = prefix[len(child.prefix):]
}
return t.deletePrefix(n, child, prefix)
}
func (n *node[T]) mergeChild() {
e := n.edges[0]
child := e.node
n.prefix = n.prefix + child.prefix
n.leaf = child.leaf
n.edges = child.edges
}
// Get is used to lookup a specific key, returning
// the value and if it was found
func (t *Tree[T]) Get(s string) (T, bool) {
var zeroVal T
n := t.root
search := s
for {
// Check for key exhaution
if len(search) == 0 {
if n.isLeaf() {
return n.leaf.val, true
}
break
}
// Look for an edge
n = n.getEdge(search[0])
if n == nil {
break
}
// Consume the search prefix
if strings.HasPrefix(search, n.prefix) {
search = search[len(n.prefix):]
} else {
break
}
}
return zeroVal, false
}
// LongestPrefix is like Get, but instead of an
// exact match, it will return the longest prefix match.
func (t *Tree[T]) LongestPrefix(s string) (string, T, bool) {
var zeroVal T
var last *leafNode[T]
n := t.root
search := s
for {
// Look for a leaf node
if n.isLeaf() {
last = n.leaf
}
// Check for key exhaution
if len(search) == 0 {
break
}
// Look for an edge
n = n.getEdge(search[0])
if n == nil {
break
}
// Consume the search prefix
if strings.HasPrefix(search, n.prefix) {
search = search[len(n.prefix):]
} else {
break
}
}
if last != nil {
return last.key, last.val, true
}
return "", zeroVal, false
}
// Minimum is used to return the minimum value in the tree
func (t *Tree[T]) Minimum() (string, T, bool) {
var zeroVal T
n := t.root
for {
if n.isLeaf() {
return n.leaf.key, n.leaf.val, true
}
if len(n.edges) > 0 {
n = n.edges[0].node
} else {
break
}
}
return "", zeroVal, false
}
// Maximum is used to return the maximum value in the tree
func (t *Tree[T]) Maximum() (string, T, bool) {
var zeroVal T
n := t.root
for {
if num := len(n.edges); num > 0 {
n = n.edges[num-1].node
continue
}
if n.isLeaf() {
return n.leaf.key, n.leaf.val, true
}
break
}
return "", zeroVal, false
}
// Walk is used to walk the tree
func (t *Tree[T]) Walk(fn WalkFn[T]) {
recursiveWalk(t.root, fn)
}
// WalkPrefix is used to walk the tree under a prefix
func (t *Tree[T]) WalkPrefix(prefix string, fn WalkFn[T]) {
n := t.root
search := prefix
for {
// Check for key exhaustion
if len(search) == 0 {
recursiveWalk(n, fn)
return
}
// Look for an edge
n = n.getEdge(search[0])
if n == nil {
return
}
// Consume the search prefix
if strings.HasPrefix(search, n.prefix) {
search = search[len(n.prefix):]
continue
}
if strings.HasPrefix(n.prefix, search) {
// Child may be under our search prefix
recursiveWalk(n, fn)
}
return
}
}
// WalkPath is used to walk the tree, but only visiting nodes
// from the root down to a given leaf. Where WalkPrefix walks
// all the entries *under* the given prefix, this walks the
// entries *above* the given prefix.
func (t *Tree[T]) WalkPath(path string, fn WalkFn[T]) {
n := t.root
search := path
for {
// Visit the leaf values if any
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
return
}
// Check for key exhaution
if len(search) == 0 {
return
}
// Look for an edge
n = n.getEdge(search[0])
if n == nil {
return
}
// Consume the search prefix
if strings.HasPrefix(search, n.prefix) {
search = search[len(n.prefix):]
} else {
break
}
}
}
// recursiveWalk is used to do a pre-order walk of a node
// recursively. Returns true if the walk should be aborted
func recursiveWalk[T any](n *node[T], fn WalkFn[T]) bool {
// Visit the leaf values if any
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
return true
}
// Recurse on the children
i := 0
k := len(n.edges) // keeps track of number of edges in previous iteration
for i < k {
e := n.edges[i]
if recursiveWalk(e.node, fn) {
return true
}
// It is a possibility that the WalkFn modified the node we are
// iterating on. If there are no more edges, mergeChild happened,
// so the last edge became the current node n, on which we'll
// iterate one last time.
if len(n.edges) == 0 {
return recursiveWalk(n, fn)
}
// If there are now less edges than in the previous iteration,
// then do not increment the loop index, since the current index
// points to a new edge. Otherwise, get to the next index.
if len(n.edges) >= k {
i++
}
k = len(n.edges)
}
return false
}
// ToMap is used to walk the tree and convert it into a map
func (t *Tree[T]) ToMap() map[string]T {
out := make(map[string]T, t.size)
t.Walk(func(k string, v T) bool {
out[k] = v
return false
})
return out
}

View File

@ -0,0 +1,408 @@
package radix
import (
crand "crypto/rand"
"fmt"
"reflect"
"sort"
"strconv"
"testing"
)
func TestRadix(t *testing.T) {
var min, max string
inp := make(map[string]interface{})
for i := 0; i < 1000; i++ {
gen := generateUUID()
inp[gen] = i
if gen < min || i == 0 {
min = gen
}
if gen > max || i == 0 {
max = gen
}
}
r := NewFromMap(inp)
if r.Len() != len(inp) {
t.Fatalf("bad length: %v %v", r.Len(), len(inp))
}
r.Walk(func(k string, v interface{}) bool {
println(k)
return false
})
for k, v := range inp {
out, ok := r.Get(k)
if !ok {
t.Fatalf("missing key: %v", k)
}
if out != v {
t.Fatalf("value mis-match: %v %v", out, v)
}
}
// Check min and max
outMin, _, _ := r.Minimum()
if outMin != min {
t.Fatalf("bad minimum: %v %v", outMin, min)
}
outMax, _, _ := r.Maximum()
if outMax != max {
t.Fatalf("bad maximum: %v %v", outMax, max)
}
for k, v := range inp {
out, ok := r.Delete(k)
if !ok {
t.Fatalf("missing key: %v", k)
}
if out != v {
t.Fatalf("value mis-match: %v %v", out, v)
}
}
if r.Len() != 0 {
t.Fatalf("bad length: %v", r.Len())
}
}
func TestRoot(t *testing.T) {
r := New[bool]()
_, ok := r.Delete("")
if ok {
t.Fatalf("bad")
}
_, ok = r.Insert("", true)
if ok {
t.Fatalf("bad")
}
val, ok := r.Get("")
if !ok || val != true {
t.Fatalf("bad: %v", val)
}
val, ok = r.Delete("")
if !ok || val != true {
t.Fatalf("bad: %v", val)
}
}
func TestDelete(t *testing.T) {
r := New[bool]()
s := []string{"", "A", "AB"}
for _, ss := range s {
r.Insert(ss, true)
}
for _, ss := range s {
_, ok := r.Delete(ss)
if !ok {
t.Fatalf("bad %q", ss)
}
}
}
func TestDeletePrefix(t *testing.T) {
type exp struct {
inp []string
prefix string
out []string
numDeleted int
}
cases := []exp{
{[]string{"", "A", "AB", "ABC", "R", "S"}, "A", []string{"", "R", "S"}, 3},
{[]string{"", "A", "AB", "ABC", "R", "S"}, "ABC", []string{"", "A", "AB", "R", "S"}, 1},
{[]string{"", "A", "AB", "ABC", "R", "S"}, "", []string{}, 6},
{[]string{"", "A", "AB", "ABC", "R", "S"}, "S", []string{"", "A", "AB", "ABC", "R"}, 1},
{[]string{"", "A", "AB", "ABC", "R", "S"}, "SS", []string{"", "A", "AB", "ABC", "R", "S"}, 0},
}
for _, test := range cases {
r := New[bool]()
for _, ss := range test.inp {
r.Insert(ss, true)
}
deleted := r.DeletePrefix(test.prefix)
if deleted != test.numDeleted {
t.Fatalf("Bad delete, expected %v to be deleted but got %v", test.numDeleted, deleted)
}
out := []string{}
fn := func(s string, v bool) bool {
out = append(out, s)
return false
}
r.Walk(fn)
if !reflect.DeepEqual(out, test.out) {
t.Fatalf("mis-match: %v %v", out, test.out)
}
}
}
func TestLongestPrefix(t *testing.T) {
r := New[interface{}]()
keys := []string{
"",
"foo",
"foobar",
"foobarbaz",
"foobarbazzip",
"foozip",
}
for _, k := range keys {
r.Insert(k, nil)
}
if r.Len() != len(keys) {
t.Fatalf("bad len: %v %v", r.Len(), len(keys))
}
type exp struct {
inp string
out string
}
cases := []exp{
{"a", ""},
{"abc", ""},
{"fo", ""},
{"foo", "foo"},
{"foob", "foo"},
{"foobar", "foobar"},
{"foobarba", "foobar"},
{"foobarbaz", "foobarbaz"},
{"foobarbazzi", "foobarbaz"},
{"foobarbazzip", "foobarbazzip"},
{"foozi", "foo"},
{"foozip", "foozip"},
{"foozipzap", "foozip"},
}
for _, test := range cases {
m, _, ok := r.LongestPrefix(test.inp)
if !ok {
t.Fatalf("no match: %v", test)
}
if m != test.out {
t.Fatalf("mis-match: %v %v", m, test)
}
}
}
func TestWalkPrefix(t *testing.T) {
r := New[interface{}]()
keys := []string{
"foobar",
"foo/bar/baz",
"foo/baz/bar",
"foo/zip/zap",
"zipzap",
}
for _, k := range keys {
r.Insert(k, nil)
}
if r.Len() != len(keys) {
t.Fatalf("bad len: %v %v", r.Len(), len(keys))
}
type exp struct {
inp string
out []string
}
cases := []exp{
{
"f",
[]string{"foobar", "foo/bar/baz", "foo/baz/bar", "foo/zip/zap"},
},
{
"foo",
[]string{"foobar", "foo/bar/baz", "foo/baz/bar", "foo/zip/zap"},
},
{
"foob",
[]string{"foobar"},
},
{
"foo/",
[]string{"foo/bar/baz", "foo/baz/bar", "foo/zip/zap"},
},
{
"foo/b",
[]string{"foo/bar/baz", "foo/baz/bar"},
},
{
"foo/ba",
[]string{"foo/bar/baz", "foo/baz/bar"},
},
{
"foo/bar",
[]string{"foo/bar/baz"},
},
{
"foo/bar/baz",
[]string{"foo/bar/baz"},
},
{
"foo/bar/bazoo",
[]string{},
},
{
"z",
[]string{"zipzap"},
},
}
for _, test := range cases {
out := []string{}
fn := func(s string, v interface{}) bool {
out = append(out, s)
return false
}
r.WalkPrefix(test.inp, fn)
sort.Strings(out)
sort.Strings(test.out)
if !reflect.DeepEqual(out, test.out) {
t.Fatalf("mis-match: %v %v", out, test.out)
}
}
}
func TestWalkPath(t *testing.T) {
r := New[interface{}]()
keys := []string{
"foo",
"foo/bar",
"foo/bar/baz",
"foo/baz/bar",
"foo/zip/zap",
"zipzap",
}
for _, k := range keys {
r.Insert(k, nil)
}
if r.Len() != len(keys) {
t.Fatalf("bad len: %v %v", r.Len(), len(keys))
}
type exp struct {
inp string
out []string
}
cases := []exp{
{
"f",
[]string{},
},
{
"foo",
[]string{"foo"},
},
{
"foo/",
[]string{"foo"},
},
{
"foo/ba",
[]string{"foo"},
},
{
"foo/bar",
[]string{"foo", "foo/bar"},
},
{
"foo/bar/baz",
[]string{"foo", "foo/bar", "foo/bar/baz"},
},
{
"foo/bar/bazoo",
[]string{"foo", "foo/bar", "foo/bar/baz"},
},
{
"z",
[]string{},
},
}
for _, test := range cases {
out := []string{}
fn := func(s string, v interface{}) bool {
out = append(out, s)
return false
}
r.WalkPath(test.inp, fn)
sort.Strings(out)
sort.Strings(test.out)
if !reflect.DeepEqual(out, test.out) {
t.Fatalf("mis-match: %v %v", out, test.out)
}
}
}
func TestWalkDelete(t *testing.T) {
r := New[interface{}]()
r.Insert("init0/0", nil)
r.Insert("init0/1", nil)
r.Insert("init0/2", nil)
r.Insert("init0/3", nil)
r.Insert("init1/0", nil)
r.Insert("init1/1", nil)
r.Insert("init1/2", nil)
r.Insert("init1/3", nil)
r.Insert("init2", nil)
deleteFn := func(s string, v interface{}) bool {
r.Delete(s)
return false
}
r.WalkPrefix("init1", deleteFn)
for _, s := range []string{"init0/0", "init0/1", "init0/2", "init0/3", "init2"} {
if _, ok := r.Get(s); !ok {
t.Fatalf("expecting to still find %q", s)
}
}
if n := r.Len(); n != 5 {
t.Fatalf("expected to find exactly 5 nodes, instead found %d: %v", n, r.ToMap())
}
r.Walk(deleteFn)
if n := r.Len(); n != 0 {
t.Fatalf("expected to find exactly 0 nodes, instead found %d: %v", n, r.ToMap())
}
}
// generateUUID is used to generate a random UUID
func generateUUID() string {
buf := make([]byte, 16)
if _, err := crand.Read(buf); err != nil {
panic(fmt.Errorf("failed to read random bytes: %v", err))
}
return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
buf[0:4],
buf[4:6],
buf[6:8],
buf[8:10],
buf[10:16])
}
func BenchmarkInsert(b *testing.B) {
r := New[bool]()
for i := 0; i < 10000; i++ {
r.Insert(fmt.Sprintf("init%d", i), true)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
_, updated := r.Insert(strconv.Itoa(n), true)
if updated {
b.Fatal("bad")
}
}
}

View File

@ -92,12 +92,12 @@ func (err ErrInvalidMapKey) Unwrap() error {
return err.Wrapped return err.Wrapped
} }
type ErrOwnerInvalid struct { type ErrOwnerTypeInvalid struct {
ResourceType *pbresource.Type ResourceType *pbresource.Type
OwnerType *pbresource.Type OwnerType *pbresource.Type
} }
func (err ErrOwnerInvalid) Error() string { func (err ErrOwnerTypeInvalid) Error() string {
return fmt.Sprintf( return fmt.Sprintf(
"resources of type %s cannot be owned by resources with type %s", "resources of type %s cannot be owned by resources with type %s",
ToGVK(err.ResourceType), ToGVK(err.ResourceType),
@ -105,6 +105,20 @@ func (err ErrOwnerInvalid) Error() string {
) )
} }
type ErrOwnerTenantInvalid struct {
ResourceType *pbresource.Type
ResourceTenancy *pbresource.Tenancy
OwnerTenancy *pbresource.Tenancy
}
func (err ErrOwnerTenantInvalid) Error() string {
return fmt.Sprintf(
"resource in partition %s, namespace %s and peer %s cannot be owned by a resource in partition %s, namespace %s and peer %s",
err.ResourceTenancy.Partition, err.ResourceTenancy.Namespace, err.ResourceTenancy.PeerName,
err.OwnerTenancy.Partition, err.OwnerTenancy.Namespace, err.OwnerTenancy.PeerName,
)
}
type ErrInvalidReferenceType struct { type ErrInvalidReferenceType struct {
AllowedType *pbresource.Type AllowedType *pbresource.Type
} }

View File

@ -63,7 +63,7 @@ func TestErrorStrings(t *testing.T) {
Key: "http", Key: "http",
Wrapped: fakeWrappedErr, Wrapped: fakeWrappedErr,
}, },
"ErrOwnerInvalid": ErrOwnerInvalid{ "ErrOwnerInvalid": ErrOwnerTypeInvalid{
ResourceType: &pbresource.Type{Group: "foo", GroupVersion: "v1", Kind: "bar"}, ResourceType: &pbresource.Type{Group: "foo", GroupVersion: "v1", Kind: "bar"},
OwnerType: &pbresource.Type{Group: "other", GroupVersion: "v2", Kind: "something"}, OwnerType: &pbresource.Type{Group: "other", GroupVersion: "v2", Kind: "something"},
}, },

View File

@ -2,7 +2,6 @@ package resourcetest
import ( import (
"context" "context"
"testing"
"github.com/hashicorp/consul/proto-public/pbresource" "github.com/hashicorp/consul/proto-public/pbresource"
"github.com/oklog/ulid/v2" "github.com/oklog/ulid/v2"
@ -38,13 +37,24 @@ func Resource(rtype *pbresource.Type, name string) *resourceBuilder {
} }
} }
func (b *resourceBuilder) WithData(t *testing.T, data protoreflect.ProtoMessage) *resourceBuilder { func (b *resourceBuilder) WithData(t T, data protoreflect.ProtoMessage) *resourceBuilder {
t.Helper()
anyData, err := anypb.New(data) anyData, err := anypb.New(data)
require.NoError(t, err) require.NoError(t, err)
b.resource.Data = anyData b.resource.Data = anyData
return b return b
} }
func (b *resourceBuilder) WithMeta(key string, value string) *resourceBuilder {
if b.resource.Metadata == nil {
b.resource.Metadata = make(map[string]string)
}
b.resource.Metadata[key] = value
return b
}
func (b *resourceBuilder) WithOwner(id *pbresource.ID) *resourceBuilder { func (b *resourceBuilder) WithOwner(id *pbresource.ID) *resourceBuilder {
b.resource.Owner = id b.resource.Owner = id
return b return b
@ -91,7 +101,13 @@ func (b *resourceBuilder) Build() *pbresource.Resource {
return res return res
} }
func (b *resourceBuilder) Write(t *testing.T, client pbresource.ResourceServiceClient) *pbresource.Resource { func (b *resourceBuilder) ID() *pbresource.ID {
return b.resource.Id
}
func (b *resourceBuilder) Write(t T, client pbresource.ResourceServiceClient) *pbresource.Resource {
t.Helper()
res := b.resource res := b.resource
rsp, err := client.Write(context.Background(), &pbresource.WriteRequest{ rsp, err := client.Write(context.Background(), &pbresource.WriteRequest{
@ -101,7 +117,9 @@ func (b *resourceBuilder) Write(t *testing.T, client pbresource.ResourceServiceC
require.NoError(t, err) require.NoError(t, err)
if !b.dontCleanup { if !b.dontCleanup {
t.Cleanup(func() { cleaner, ok := t.(CleanupT)
require.True(t, ok, "T does not implement a Cleanup method and cannot be used with automatic resource cleanup")
cleaner.Cleanup(func() {
_, err := client.Delete(context.Background(), &pbresource.DeleteRequest{ _, err := client.Delete(context.Background(), &pbresource.DeleteRequest{
Id: rsp.Resource.Id, Id: rsp.Resource.Id,
}) })

View File

@ -0,0 +1,218 @@
package resourcetest
import (
"context"
"math/rand"
"time"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/stretchr/testify/require"
"golang.org/x/exp/slices"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type Client struct {
pbresource.ResourceServiceClient
timeout time.Duration
wait time.Duration
}
func NewClient(client pbresource.ResourceServiceClient) *Client {
return &Client{
ResourceServiceClient: client,
timeout: 7 * time.Second,
wait: 25 * time.Millisecond,
}
}
func (client *Client) SetRetryerConfig(timeout time.Duration, wait time.Duration) {
client.timeout = timeout
client.wait = wait
}
func (client *Client) retry(t T, fn func(r *retry.R)) {
retryer := &retry.Timer{Timeout: client.timeout, Wait: client.wait}
retry.RunWith(retryer, t, fn)
}
func (client *Client) PublishResources(t T, resources []*pbresource.Resource) {
// Randomize the order of insertion. Generally insertion order shouldn't matter as the
// controllers should eventually converge on the desired state. The exception to this
// is that you cannot insert resources with owner refs before the resource they are
// owned by or insert a resource into a non-default tenant before that tenant exists.
rand.Shuffle(len(resources), func(i, j int) {
temp := resources[i]
resources[i] = resources[j]
resources[j] = temp
})
// This slice will be used to track the resources actually published each round. When
// a resource with an owner ID is encountered we will not attempt to write but defer it
// to the next round of publishing
var written []*pbresource.ID
for len(resources) > 0 {
var left []*pbresource.Resource
published := 0
for _, res := range resources {
// check that any owner references would be satisfied
if res.Owner != nil {
found := slices.ContainsFunc(written, func(id *pbresource.ID) bool {
return resource.EqualID(res.Owner, id)
})
// the owner hasn't yet been published then we cannot publish this resource
if !found {
left = append(left, res)
continue
}
}
t.Logf("Writing resource %s with type %s", res.Id.Name, resource.ToGVK(res.Id.Type))
_, err := client.Write(context.Background(), &pbresource.WriteRequest{
Resource: res,
})
require.NoError(t, err)
// track the number o
published += 1
written = append(written, res.Id)
}
// the next round only has this subset of resources to attempt writing
resources = left
// if we didn't publish any resources this round then nothing would
// enable us to do so by iterating again so we break to prevent infinite
// loooping.
if published == 0 {
break
}
}
require.Empty(t, resources, "Could not publish all resources - some resources have invalid owner references")
}
func (client *Client) RequireResourceNotFound(t T, id *pbresource.ID) {
t.Helper()
rsp, err := client.Read(context.Background(), &pbresource.ReadRequest{Id: id})
require.Error(t, err)
require.Equal(t, codes.NotFound, status.Code(err))
require.Nil(t, rsp)
}
func (client *Client) RequireResourceExists(t T, id *pbresource.ID) *pbresource.Resource {
t.Helper()
rsp, err := client.Read(context.Background(), &pbresource.ReadRequest{Id: id})
require.NoError(t, err, "error reading %s with type %s", id.Name, resource.ToGVK(id.Type))
require.NotNil(t, rsp)
return rsp.Resource
}
func (client *Client) RequireVersionUnchanged(t T, id *pbresource.ID, version string) *pbresource.Resource {
t.Helper()
res := client.RequireResourceExists(t, id)
RequireVersionUnchanged(t, res, version)
return res
}
func (client *Client) RequireVersionChanged(t T, id *pbresource.ID, version string) *pbresource.Resource {
t.Helper()
res := client.RequireResourceExists(t, id)
RequireVersionChanged(t, res, version)
return res
}
func (client *Client) RequireStatusCondition(t T, id *pbresource.ID, statusKey string, condition *pbresource.Condition) *pbresource.Resource {
t.Helper()
res := client.RequireResourceExists(t, id)
RequireStatusCondition(t, res, statusKey, condition)
return res
}
func (client *Client) RequireStatusConditionForCurrentGen(t T, id *pbresource.ID, statusKey string, condition *pbresource.Condition) *pbresource.Resource {
t.Helper()
res := client.RequireResourceExists(t, id)
RequireStatusConditionForCurrentGen(t, res, statusKey, condition)
return res
}
func (client *Client) RequireResourceMeta(t T, id *pbresource.ID, key string, value string) *pbresource.Resource {
t.Helper()
res := client.RequireResourceExists(t, id)
RequireResourceMeta(t, res, key, value)
return res
}
func (client *Client) RequireReconciledCurrentGen(t T, id *pbresource.ID, statusKey string) *pbresource.Resource {
t.Helper()
res := client.RequireResourceExists(t, id)
RequireReconciledCurrentGen(t, res, statusKey)
return res
}
func (client *Client) WaitForReconciliation(t T, id *pbresource.ID, statusKey string) *pbresource.Resource {
t.Helper()
var res *pbresource.Resource
client.retry(t, func(r *retry.R) {
res = client.RequireReconciledCurrentGen(r, id, statusKey)
})
return res
}
func (client *Client) WaitForStatusCondition(t T, id *pbresource.ID, statusKey string, condition *pbresource.Condition) *pbresource.Resource {
t.Helper()
var res *pbresource.Resource
client.retry(t, func(r *retry.R) {
res = client.RequireStatusConditionForCurrentGen(t, id, statusKey, condition)
})
return res
}
func (client *Client) WaitForNewVersion(t T, id *pbresource.ID, version string) *pbresource.Resource {
t.Helper()
var res *pbresource.Resource
client.retry(t, func(r *retry.R) {
res = client.RequireVersionChanged(r, id, version)
})
return res
}
func (client *Client) WaitForResourceState(t T, id *pbresource.ID, verify func(T, *pbresource.Resource)) *pbresource.Resource {
t.Helper()
var res *pbresource.Resource
client.retry(t, func(r *retry.R) {
res = client.RequireResourceExists(r, id)
verify(r, res)
})
return res
}
// ResolveResourceID will read the specified resource and returns its full ID.
// This is mainly useful to get the ID with the Uid filled out.
func (client *Client) ResolveResourceID(t T, id *pbresource.ID) *pbresource.ID {
t.Helper()
return client.RequireResourceExists(t, id).Id
}

View File

@ -0,0 +1,40 @@
package resourcetest
import (
"fmt"
"io/fs"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/encoding/protojson"
)
// ParseResourcesFromFilesystem will walk the filesystem at the given path
// and parse all files as protobuf/JSON resources.
func ParseResourcesFromFilesystem(t T, files fs.FS, path string) []*pbresource.Resource {
t.Helper()
var resources []*pbresource.Resource
err := fs.WalkDir(files, path, func(fpath string, dent fs.DirEntry, _ error) error {
if dent.IsDir() {
return nil
}
data, err := fs.ReadFile(files, fpath)
if err != nil {
return err
}
var res pbresource.Resource
err = protojson.Unmarshal(data, &res)
if err != nil {
return fmt.Errorf("error decoding data from %s: %w", fpath, err)
}
resources = append(resources, &res)
return nil
})
require.NoError(t, err)
return resources
}

View File

@ -0,0 +1,62 @@
package resourcetest
import (
"github.com/google/go-cmp/cmp"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/proto/private/prototest"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/testing/protocmp"
)
func RequireVersionUnchanged(t T, res *pbresource.Resource, version string) {
t.Helper()
require.Equal(t, version, res.Version)
}
func RequireVersionChanged(t T, res *pbresource.Resource, version string) {
t.Helper()
require.NotEqual(t, version, res.Version)
}
func RequireStatusCondition(t T, res *pbresource.Resource, statusKey string, condition *pbresource.Condition) {
t.Helper()
require.NotNil(t, res.Status)
status, found := res.Status[statusKey]
require.True(t, found)
prototest.AssertContainsElement(t, status.Conditions, condition)
}
func RequireStatusConditionForCurrentGen(t T, res *pbresource.Resource, statusKey string, condition *pbresource.Condition) {
t.Helper()
require.NotNil(t, res.Status)
status, found := res.Status[statusKey]
require.True(t, found)
require.Equal(t, res.Generation, status.ObservedGeneration)
prototest.AssertContainsElement(t, status.Conditions, condition)
}
func RequireResourceMeta(t T, res *pbresource.Resource, key string, value string) {
t.Helper()
require.NotNil(t, res.Metadata)
require.Contains(t, res.Metadata, key)
require.Equal(t, value, res.Metadata[key])
}
func RequireReconciledCurrentGen(t T, res *pbresource.Resource, statusKey string) {
t.Helper()
require.NotNil(t, res.Status)
status, found := res.Status[statusKey]
require.True(t, found)
require.Equal(t, res.Generation, status.ObservedGeneration)
}
func RequireOwner(t T, res *pbresource.Resource, owner *pbresource.ID, ignoreUid bool) {
t.Helper()
var opts []cmp.Option
if ignoreUid {
opts = append(opts, protocmp.IgnoreFields(owner, "uid"))
}
prototest.AssertDeepEqual(t, res.Owner, owner, opts...)
}

View File

@ -0,0 +1,17 @@
package resourcetest
// T represents the subset of testing.T methods that will be used
// by the various functionality in this package
type T interface {
Helper()
Log(args ...interface{})
Logf(format string, args ...interface{})
Errorf(format string, args ...interface{})
Fatalf(format string, args ...interface{})
FailNow()
}
type CleanupT interface {
T
Cleanup(func())
}

View File

@ -59,6 +59,10 @@ func (r *R) Logf(format string, args ...interface{}) {
r.log(fmt.Sprintf(format, args...)) r.log(fmt.Sprintf(format, args...))
} }
func (r *R) Log(args ...interface{}) {
r.log(fmt.Sprintln(args...))
}
func (r *R) Helper() {} func (r *R) Helper() {}
// runFailed is a sentinel value to indicate that the func itself // runFailed is a sentinel value to indicate that the func itself