added basic instructions for deploying to k8s

This commit is contained in:
trujillo-adam 2021-10-18 15:04:06 -07:00
commit ef968aaf3f
34 changed files with 831 additions and 238 deletions

3
.changelog/11241.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
telemetry: Consul Clients no longer emit Autopilot metrics.
```

3
.changelog/11298.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
ui: Fixed styling of Role remove dialog on the Token edit page
```

3
.changelog/11323.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
acl: **(Enterprise only)** ensure that auth methods with namespace rules work with partitions
```

View File

@ -602,7 +602,7 @@ jobs:
- run:
name: install yarn packages
command: cd ui && make
command: cd ui && make deps
- save_cache:
key: *YARN_CACHE_KEY

View File

@ -216,7 +216,7 @@ func (ac *AutoConfig) generateCSR() (csr string, key string, err error) {
Host: unknownTrustDomain,
Datacenter: ac.config.Datacenter,
Agent: ac.config.NodeName,
// TODO(rb)(partitions): populate the partition field from the agent config
Partition: ac.config.PartitionOrDefault(),
}
caConfig, err := ac.config.ConnectCAConfiguration()

View File

@ -2,7 +2,17 @@
package connect
import "fmt"
import (
"fmt"
"github.com/hashicorp/consul/agent/structs"
)
// GetEnterpriseMeta will synthesize an EnterpriseMeta struct from the SpiffeIDAgent.
// in OSS this just returns an empty (but never nil) struct pointer
func (id SpiffeIDAgent) GetEnterpriseMeta() *structs.EnterpriseMeta {
return &structs.EnterpriseMeta{}
}
func (id SpiffeIDAgent) uriPath() string {
return fmt.Sprintf("/agent/client/dc/%s/id/%s", id.Datacenter, id.Agent)

View File

@ -19,5 +19,5 @@ func computeTargetEnterpriseMeta(
method *structs.ACLAuthMethod,
verifiedIdentity *authmethod.Identity,
) (*structs.EnterpriseMeta, error) {
return method.TargetEnterpriseMeta(verifiedIdentity.EnterpriseMeta), nil
return &structs.EnterpriseMeta{}, nil
}

View File

@ -186,8 +186,7 @@ func (s *ConnectCA) Sign(
"we are %s", serviceID.Datacenter, s.srv.config.Datacenter)
}
} else if isAgent {
// TODO(partitions): support auto-config in different partitions
structs.DefaultEnterpriseMetaInDefaultPartition().FillAuthzContext(&authzContext)
agentID.GetEnterpriseMeta().FillAuthzContext(&authzContext)
if authz.NodeWrite(agentID.Agent, &authzContext) != acl.Allow {
return acl.ErrPermissionDenied
}

View File

@ -1438,8 +1438,7 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne
csr.URIs = uris
}
// TODO(partitions): support auto-config in different partitions
entMeta.Merge(structs.DefaultEnterpriseMetaInDefaultPartition())
entMeta.Merge(agentID.GetEnterpriseMeta())
}
commonCfg, err := config.GetCommonConfig()

View File

@ -83,7 +83,8 @@ func NewBaseDeps(configLoader ConfigLoader, logOut io.Writer) (BaseDeps, error)
return d, fmt.Errorf("failed to setup node ID: %w", err)
}
gauges, counters, summaries := getPrometheusDefs(cfg.Telemetry)
isServer := result.RuntimeConfig.ServerMode
gauges, counters, summaries := getPrometheusDefs(cfg.Telemetry, isServer)
cfg.Telemetry.PrometheusOpts.GaugeDefinitions = gauges
cfg.Telemetry.PrometheusOpts.CounterDefinitions = counters
cfg.Telemetry.PrometheusOpts.SummaryDefinitions = summaries
@ -187,7 +188,7 @@ func newConnPool(config *config.RuntimeConfig, logger hclog.Logger, tls *tlsutil
// getPrometheusDefs reaches into every slice of prometheus defs we've defined in each part of the agent, and appends
// all of our slices into one nice slice of definitions per metric type for the Consul agent to pass to go-metrics.
func getPrometheusDefs(cfg lib.TelemetryConfig) ([]prometheus.GaugeDefinition, []prometheus.CounterDefinition, []prometheus.SummaryDefinition) {
func getPrometheusDefs(cfg lib.TelemetryConfig, isServer bool) ([]prometheus.GaugeDefinition, []prometheus.CounterDefinition, []prometheus.SummaryDefinition) {
// TODO: "raft..." metrics come from the raft lib and we should migrate these to a telemetry
// package within. In the mean time, we're going to define a few here because they're key to monitoring Consul.
raftGauges := []prometheus.GaugeDefinition{
@ -204,7 +205,6 @@ func getPrometheusDefs(cfg lib.TelemetryConfig) ([]prometheus.GaugeDefinition, [
// Build slice of slices for all gauge definitions
var gauges = [][]prometheus.GaugeDefinition{
cache.Gauges,
consul.AutopilotGauges,
consul.RPCGauges,
consul.SessionGauges,
grpc.StatsGauges,
@ -216,6 +216,11 @@ func getPrometheusDefs(cfg lib.TelemetryConfig) ([]prometheus.GaugeDefinition, [
raftGauges,
}
// TODO(ffmmm): conditionally add only leader specific metrics to gauges, counters, summaries, etc
if isServer {
gauges = append(gauges, consul.AutopilotGauges)
}
// Flatten definitions
// NOTE(kit): Do we actually want to create a set here so we can ensure definition names are unique?
var gaugeDefs []prometheus.GaugeDefinition

View File

@ -70,10 +70,6 @@ func (p *ACLPolicy) EnterprisePolicyMeta() *acl.EnterprisePolicyMeta {
return nil
}
func (m *ACLAuthMethod) TargetEnterpriseMeta(_ *EnterpriseMeta) *EnterpriseMeta {
return &m.EnterpriseMeta
}
func (t *ACLToken) NodeIdentityList() []*ACLNodeIdentity {
if len(t.NodeIdentities) == 0 {
return nil

View File

@ -15,11 +15,17 @@ dist-docker: dist
clean:
rm -rf ./dist
# Build a distribution of the UI using the minimal amount of dependencies
# Build a distribution of the UI
dist: clean
cd packages/consul-ui && \
$(MAKE)
# Install deps for the UI only
deps: clean
cd packages/consul-ui && \
$(MAKE) deps
# Build a distribution of the UI for Vercel previews.
# The distribution must be copied into the ui/ subfolder
# in order to mirror the go binary

View File

@ -141,24 +141,35 @@ as |modal|>
<li role="none" class="dangerous">
<label for={{confirm}} role="menuitem" tabindex="-1" onkeypress={{keypressClick}} data-test-delete>Remove</label>
<div role="menu">
<div class="confirmation-alert warning">
<div>
<header>
Confirm Remove
</header>
<InformedAction
class="warning"
>
<:header>
Confirm Remove
</:header>
<:body>
<p>
Are you sure you want to remove this role?
</p>
</div>
<ul>
<li class="dangerous">
<button tabindex="-1" type="button" class="type-delete" onclick={{action send 'remove' item items}}>Remove</button>
</li>
<li>
<label for={{confirm}}>Cancel</label>
</li>
</ul>
</div>
</:body>
<:actions as |Actions|>
<Actions.Action class="dangerous">
<Action
tabindex="-1"
{{on 'click' (action send 'remove' item items)}}
>
Remove
</Action>
</Actions.Action>
<Actions.Action>
<Action
@for={{confirm}}
>
Cancel
</Action>
</Actions.Action>
</:actions>
</InformedAction>
</div>
</li>
{{/if}}

View File

@ -6,9 +6,11 @@ export default (clickable, deletable, collection, alias, roleForm) => (scope = '
roles: alias('selectedOptions'),
selectedOptions: collection(
'[data-test-roles] [data-test-tabular-row]',
deletable({
actions: clickable('label'),
})
{
actions: clickable('label > button'),
delete: clickable('[data-test-delete]'),
confirmDelete: clickable('.informed-action button'),
}
),
};
};

View File

@ -54,36 +54,184 @@ Values specified for [`proxy-defaults`](docs/connect/config-entries/proxy-defaul
## Requirements
In client agent configurations, the admin partition name should be specified in the agent configuration:
Your Consul configuration must meet the following requirements to use admin partitions.
```hcl
partition = "<NAME>"
```
The anti-entropy sync will use the configured admin partition name when registering the node.
### Security Configurations
The agent token used by the client agent will need to allow `node:write` in the admin partition.
* The agent token used by the client agent will need to allow `node:write` in the admin partition.
* The `read` permission for `proxy-defaults` require `admin_partition:read` for the specific partition. The `write` permission for proxy-defaults require `mesh:write`. See [Admin Partition Rules](/docs/security/acl/acl-rules#admin-partition-rules) for additional information
* The write permissions for ingress and terminating gateways must be `operator:write`.
* Existing intentions must be set to `deny` all traffic from outside the admin partition.
* Wildcards (`*`) are not supported when creating intentions for admin partitions.
The `read` permission for `proxy-defaults` require `admin_partition:read` for the specific partition. The `write` permission for proxy-defaults require `mesh:write`. See [Admin Partition Rules](/docs/security/acl/acl-rules#admin-partition-rules) for additional information
Any queries for the proxy-defaults config entry must include the appropriate `EnterpriseMeta`, which specifies the admin partition.
### Agent Configurations
The write permissions for ingress and terminating gateways must be `operator:write`.
* Any queries for the proxy-defaults config entry must include the appropriate `EnterpriseMeta`, which specifies the admin partition.
* In client agent configurations, the admin partition name should be specified in the agent configuration:
Existing intentions must be set to `deny` all traffic from outside the admin partition.
```hcl
partition = "<NAME>"
```
* The anti-entropy sync will use the configured admin partition name when registering the node.
* Any map keys used to compile the [discovery chain](/docs/connect/l7-traffic/discovery-chain) must include the admin partition name.
Any map keys used to compile the [discovery chain](/docs/connect/l7-traffic/discovery-chain) must include the admin partition name.
Wildcards (`*`) are not supported when creating intentions for admin partitions.
### Kubernetes Requirements
One of the primary use cases for admin partitions is for enabling a service mesh on Kubernetes clusters. The following requirements must be met to create admin partitions on Kubernetes:
* Two or more Kubernetes clusters with Consul servers installed on one of them. The other clusters should run Consul clients.
* A Consul Enterprise license must be installed on each instance of Consul.
* The helm chart consul-k8s v0.34.1 or greater.
* Consul 1.11.0-ent-alpha or greater.
* All instances in the VPC must be able to communicate with each other.
* Pods must be able to communicate with each other (flat pod and node network). See [step 3](#firewall-rules) in the Deploying Consul with Admin Partitions on Kubernetes section for additional information
## Usage
This section describes how to deploy Consul admin partitions to Kubernetes clusters, as well as directs you to the CLI reference for interacting with the admin partitions API on the command line.
### Deploying Consul with Admin Partitions on Kubernetes
The expected use case to create admin partitions on Kubernetes clusters. This is because many organizations prefer to use cloud-managed Kubernetes offerings to provision separate Kubernetes clusters for individual teams, business units, or environments. This is opposed to deploying a single, large Kubernetes cluster. When these organizations attempt to use a service mesh to enable cross-cluster activities, such as administration tasks and communication between nodes, they encounter problems.
The following procedure will result in different admin partitions in each Kubernetes cluster. The Consul clients running in the cluster with servers will be in the `default` partition. Another partition called `clients` will also be created.
Verify that your Consul deployment meets the [Kubernetes Requirements](#kubernetes-requirements) before proceeding.
1. <a id="firewall-rules"/> Update the firewall rules to ensure the pod network is flat. The following example for Google Kubernetes Engine (GKE) describes how to create a firewall rule that allows all pod and node network traffic to talk to the server and workload nodegroups:
1. Open the **VPC Network > Firewall** section and identify the rules associated with your clusters. The cluster name is a part of the rule.
![IP address ranges for GKE clusters](/img/admin-partitions/consul-admin-partitions-gke-cluster-1.png)
![IP address ranges for GKE clusters](/img/admin-partitions/consul-admin-partitions-gke-cluster-2.png)
The `gke-cluster-1-7b43116f-node` and `gke-cluster-2-48d3bee6-node` labels are the node names for the GKE clusters.
The `10.128.0.0/9` IP range represents the node IP network. The IP range of the node VMs in the clusters are within this range.
The `10.44.0.0/14` and `10.4.0.0/14` IP ranges are the pod IP ranges for the GKE clusters.
1. Enter the `gke-cluster-1-7b43116f-node` and `gke-cluster-2-48d3bee6-node` node names in the target fields of the firewall rule.
1. Enter the `10.128.0.0/9`, `10.44.0.0/14`, and `10.4.0.0/14` IP into the source fields. This will ensure that traffic from the nodes and the pods in each cluster can access the nodes and pods in the other.
![Configured GKE cluster firewall rule for Consul admin partitions](/img/admin-partitions/consul-admin-partitions-gke-firewall-rule.png)
1. Create the license secret in each cluster, e.g.:
```shell-session
kubectl create secret generic license --from-literal=key=<LICENSE_KEY>
```
This step must also be completed for each workload cluster.
1. Create a server configuration file to override the default Consul Helm chart settings:
```yaml
global:
enableConsulNamespaces: true
tls:
enabled: true
image: hashicorp/consul-enterprise:1.11.0-ent-alpha
adminPartitions:
enabled: true
server:
exposeGossipAndRPCPorts: true
enterpriseLicense:
secretName: license
secretKey: key
connectInject:
enabled: true
transparentProxy:
defaultEnabled: false
consulNamespaces:
mirroringK8S: true
controller:
enabled: true
```
Note that the `transparentProxy` configuration is disabled. This is to enable multi-cluster networking.
1. Start the Consul server(s) using the custom configuration file:
```shell-session
helm install server hashicorp/consul -f server.yaml
```
1. After the server starts, get the external IP address for partition service so that it can be added to the client configuration. The partition service is a `LoadBalancer` type. The IP address is where clients that across your partitions will communicate with servers in this cluster.
```shell-session
kubectl get service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 3m
servers-consul-connect-injector-svc ClusterIP 10.97.175.39 <none> 443/TCP 30s
servers-consul-controller-webhook ClusterIP 10.100.22.99 <none> 443/TCP 30s
servers-consul-dns ClusterIP 10.103.43.20 <none> 53/TCP,53/UDP 30s
servers-consul-partition-service LoadBalancer 10.111.255.152 35.192.119.38 8501:30643/TCP,8301:30466/TCP,8300:30657/TCP 30s
servers-consul-server ClusterIP None <none> 8501/TCP,8301/TCP,8301/UDP,8302/TCP,8302/UDP,8300/TCP,8600/TCP,8600/UDP 30s
servers-consul-ui ClusterIP 10.106.240.55 <none> 443/TCP 30s
1. Create the workload configuration for client nodes in your cluster. Create a configuration for each admin partition. In the following example, the external IP address from the previous step has been applied:
```yaml
global:
enabled: false
enableConsulNamespaces: true
image: hashicorp/consul-enterprise:1.11.0-ent-alpha
adminPartitions:
enabled: true
name: "clients" // partition name
tls:
enabled: true
caCert:
secretName: consul-consul-ca-cert
secretKey: tls.crt
caKey:
secretName: consul-consul-ca-key
secretKey: tls.key
server:
enterpriseLicense:
secretName: license
secretKey: key
externalServers:
enabled: true
hosts: "35.192.119.38"
tlsServerName: server.dc1.consul
client:
enabled: true
exposeGossipPorts: true
join: "35.192.119.38"
connectInject:
enabled: true
consulNamespaces:
mirroringK8S: true
controller:
enabled: true
```
1. Copy the server certificate to the workload cluster.
```shell-session
kubectl get secret server-consul-ca-cert --context server -o yaml | kubectl apply --context client -f -
```
1. Copy the server key to the workload cluster.
```shell-session
kubectl get secret consul-consul-ca-key --context server -o yaml | kubectl apply --context client -f -
```
1. Start the workload client clusters:
```shell-session
helm install client hashicorp/consul -f client.yaml
```
### CLI Usage
You can use create and manage admin partitions through the CLI. Refer to the [admin partition CLI documentation](/commands/admin-partition) for details.
The expected use case to create admin partitions on Kubernetes clusters. Refer to the following documentation and tutorial for instructions:
### Admin Partitions on Kubernetes Tutorial
* [Service Mesh](/docs/k8s/connect)
* LINK TO TUTORIAL
Refer to the [LINK TO TUTORIAL]() tutorial for additional information on how to create Consul admin partitions on Kubernetes
## Known Limitations

View File

@ -6,26 +6,41 @@ description: Guide to partnership integrations for Consul.
# Consul Integration Program
The HashiCorp Consul Integration Program enables vendors to build integrations with HashiCorp Consul that are tested and approved by HashiCorp. The program is intended to be largely self-service with links to resources, code samples, documentation, and clear integration steps.
The HashiCorp Consul Integration Program enables prospective partners to build integrations with HashiCorp Consul that are reviewed and verified by HashiCorp. Consul can be consumed in two ways: **self-managed**, or **HashiCorp Cloud Platform**, a hosted version of Consul operated by HashiCorp. HCP Consul is secure by default and offers an enterprise-level SLA to deploy an organizations most important applications.
## Types of Consul Integrations
All integrations are available with Consuls self-managed version. In some cases, these integrations can also be validated against HCP Consul. Upon completion of the validation with HCP Consul, a partner will receive a HCP Consul Verified badge which will be displayed on their partner page and utilized on the partners website as well.
By leveraging Consuls RESTful HTTP API system, vendors are able to build extensible integrations at the data plane, platform, and the infrastructure layer to extend Consuls functionalities. These integrations can be performed with the OSS (open source) version of Consul. Integrations with advanced network segmentation, advanced federation, and advanced read scalability need to be tested against Consul Enterprise, since these features are only supported by Consul Enterprise.
<img src="/img/hcp_consul_partner_badge.png" alt="HCP Consul Badge" style={{display:'block', margin:'auto', width:'192px'}}/>
[![Consul Architecture](/img/consul_ecosystem_diagram.png)](/img/consul_ecosystem_diagram.png)
The program is intended to be largely self-service with links to resources, code samples, documentation, and clear integration steps.
**Data Plane**: These integrations automate IP updates of load balancers by leveraging Consul service discovery, automate firewall security policy updates by leveraging Consul intentions within a centralized management tool, extend sidecar proxies to support Consul connect, and extend API gateways to allow Consul to route incoming traffic to the proxies for Connect-enabled services.
## Categories of Consul Integrations
**Control Plane**: Consul has a client-server architecture and is the control plane for the service mesh. No integrations at this layer.
By leveraging Consuls RESTful HTTP API system, prospective partners are able to build extensible integrations at the data plane, platform, and the infrastructure layer to extend Consuls functionalities. These integrations can be performed both with the OSS (open source) version of Consul, Consul Enterprise, and HCP Consul. For features in Consul Enterprise and HCP Consul, refer to the links below:
**Platform**: These integrations leverage automation of Consul agent deployment, configuration, and management through cloud and PaaS provisioning and orchestration tools such as Kubernetes and Pivotal Cloud Foundry (PCF). They include the Consul agent running in both client and server mode.
- [HCP Consul Features](https://cloud.hashicorp.com/docs/consul/features)
- [Consul Enterprise Features](/docs/enterprise)
**Infrastructure**: These integrations extend Consuls certificate management, secure ACL configuration, observability metrics and logging, and service discovery that allows for dynamic service mapping with ITSM tools.
**The Consul Ecosystem Architecture**
<img src="/img/consul_ecosystem_diagram2.png" alt="Consul Architecture" style={{display:'block', margin:'auto', width:'624px'}}/>
**Data Plane**: These integrations extend Consuls certificate management, secure ACL configuration, observability metrics and logging, and service discovery that allows for dynamic service mapping APM and logging tools, extend sidecar proxies to support Consul connect, and extend API gateways to allow Consul to route incoming traffic to the proxies for Connect-enabled services.
**Control Plane**: Consul has a client-server architecture and is the control plane for the service mesh.
**Platform**: These integrations leverage automation of Consul agent deployment, configuration, and management. Designed to be platform agnostic, Consul can be deployed in a variety of form factors, including major Public Cloud providers (AWS, GCP, Azure) as well as in bare-metal, virtual machine, and container (Docker, Kubernetes) environments. They include the Consul agent running in both client and server mode.
**Infrastructure**: There are two integration options in this category: natively through a direct integration with Consul or via Consul-Terraform-Sync (CTS). By leveraging Consuls powerful **Network Infrastructure Automation (NIA)*** capabilities through CTS, changes in an infrastructure are seamlessly automated when Consul detects a change in its service catalog. For example, these integrations could be used to automate IP updates of load balancers or firewall security policies by leveraging Consul service discovery.
-> **Network Infrastructure Automation (NIA)***: These integrations leverage Consuls service catalog to seamlessly integrate with Consul-Terraform-Sync (CTS) to automate changes in network infrastructure via a publisher-subscriber method. More details can be found [here](https://www.consul.io/docs/integrate/nia-integration).
## Development Process
The Consul integration development process is described in the steps below. By following these steps, Consul integrations can be developed alongside HashiCorp to ensure new integrations are reviewed, approved and released as quickly as possible.
<img src="/img/consul_integration_program_steps.png" alt="Integration Program Steps" style={{display:'block', margin:'auto', width:'624px'}}/>
1. Engage: Initial contact between vendor and HashiCorp
2. Enable: Documentation, code samples and best practices for developing the integration
3. Develop and Test: Integration development and testing by vendor
@ -43,71 +58,85 @@ Here are links to resources, documentation, examples and best practices to guide
#### Data Plane:
**Proxy**
- [How to Integrate a Sidecar Proxy Documentation](/docs/connect/proxies/integrate)
- [Example of Envoy Integration](/docs/connect/proxies/envoy)
**API Gateway**
- [Ambassador Integration documentation](https://learn.hashicorp.com/tutorials/consul/service-mesh-gateway-ambassador)
- [F5 Terminating Gateway Integration Documentation](https://www.hashicorp.com/integrations/f5-networks/consul)
- [Traefik Integration with Consul Service Mesh](https://traefik.io/blog/integrating-consul-connect-service-mesh-with-traefik-2-5/)
- [Kong's Ingress Controller Integration with Consul](https://www.hashicorp.com/integrations/kong/consul)
**Application Performance Monitoring (APM)**
- [Consul Telemetry Documentation](/docs/agent/telemetry)
- [Monitoring Consul with Datadog APM](https://www.datadoghq.com/blog/consul-datadog/)
- [Monitoring Consul with Dynatrace APM](https://www.dynatrace.com/news/blog/automatic-intelligent-observability-into-your-hashicorp-consul-service-mesh/)
**Logging**
- [Monitor Consul with Logz.io](https://www.hashicorp.com/integrations/logz-io/consul)
- [Monitor Consul with Splunk SignalFx](https://www.hashicorp.com/integrations/splunksignalfx/consul)
- [Consul Datacenter Monitoring with New Relic](https://www.hashicorp.com/integrations/new-relic/consul)
#### Platform:
- [Consul-AWS for AWS Cloud Map](https://learn.hashicorp.com/tutorials/consul/sync-aws-services)
- [Consul Integration with AWS ECS](/docs/ecs/get-started/install)
- [Consul Integration with Layer5 Meshery](https://www.hashicorp.com/integrations/layer5-io/consul)
- [Consul Integration with VMware Tanzu Application Service](https://learn.hashicorp.com/tutorials/consul/sync-pivotal-cloud-services)
#### Infrastructure:
-> **Note**: The types of integration areas below could be developed to natively work with Consul or through leveraging Consul-Terraform-Sync and Consuls network automation capabilities.
**Firewalls**
**Network Infrastructure Automation (using CTS):**
- [Automated Firewalling with Check Point](https://www.hashicorp.com/integrations/checkpoint-software/consul)
- [Automated Firewalling with Palo Alto Networks](https://www.hashicorp.com/integrations/pan/consul)
**Software-Defined Networking \(SDN\)**
- [Automating Cisco ACI with Consul](https://www.hashicorp.com/integrations/cisco/consul)
**Load Balancer**
- [Load Balancing with NGINX and Consul Template](https://learn.hashicorp.com/tutorials/consul/load-balancing-nginx)
- [Load Balancing with HAProxy Service Discovery](https://learn.hashicorp.com/tutorials/consul/load-balancing-haproxy)
**Proxy**
**Network Infrastructure Automation \(using CTS\):**
- [Automate F5 BIG-IP with Consul NIA](https://learn.hashicorp.com/tutorials/consul/consul-terraform-sync-f5-bigip-fast?in=consul/network-infrastructure-automation)
- [Automate VMware Advanced Load Balancers (Avi) with Consul NIA](https://www.hashicorp.com/integrations/_vmware/consul)
- [How to Integrate a Sidecar Proxy Documentation](/docs/connect/proxies/integrate)
- [Example of Envoy Integration](/docs/connect/proxies/envoy)
- [Consuls source code](https://github.com/hashicorp/consul)
**Application Delivery Controllers \(ADC\):**
**Firewall**
- [Consul Connect Intentions](/docs/connect/intentions)
- [Consul Connect Intentions Command Line](/commands/intention)
- [Consul Connect Intentions API](/api/connect/intentions)
**API Gateway**
- [Ambassador Integration documentation](/docs/platform/k8s/ambassador)
#### Platform:
- [Consul-AWS for AWS Cloud Map](https://learn.hashicorp.com/tutorials/consul/sync-aws-services)
- [Consul Agent Cloud Auto-joining](/docs/agent/cloud-auto-join)
#### Infrastructure:
**Certificate Authority (CA)**
- [Consul Certificate Management Documentation](/docs/connect/ca)
- [Securing RPC Communication with TLS Encryption](https://learn.hashicorp.com/tutorials/consul/tls-encryption-secure)
- [Consul Connect CA API](/api/connect/ca)
**Identity and Access Management (IAM)**
- [ACL Documentation and Guides](/docs/acl)
- [ACL API Documentation](/api/acl/acl)
- [Securing Consul with ACLs](https://learn.hashicorp.com/tutorials/consul/access-control-setup-production)
**Application Performance Monitoring (APM)**
- [Consul Telemetry Documentation](/docs/agent/telemetry)
- [Consul Cluster Monitoring and Metrics](https://learn.hashicorp.com/tutorials/consul/monitor-datacenter-health)
- [Monitoring Consul with Telegraf](https://learn.hashicorp.com/tutorials/consul/monitor-health-telegraf)
**Logging**
- [Consul Monitor Command Line](/commands/monitor)
- [Enable syslog via CLI](/docs/agent/options#enable_syslog)
- [Enable syslog via config file](/docs/agent/options#_syslog)
**Information Technology Service Management (ITSM)**
- [Consul Service Registry](https://learn.hashicorp.com/tutorials/consul/get-started-service-discovery)
- [DNS Query Interface](https://learn.hashicorp.com/tutorials/consul/get-started-service-discovery#querying-services)
- [HTTP API with Edge Triggers](https://learn.hashicorp.com/tutorials/consul/get-started-service-discovery#http-api)
- [Automate A10 ADC with Consul NIA](https://learn.hashicorp.com/tutorials/consul/consul-terraform-sync-a10-adc?in=consul/network-infrastructure-automation)
- [Automate Citrix ADC with Consul NIA](https://www.hashicorp.com/integrations/citrix-adc/consul)
### 3. Develop and Test
The only knowledge necessary to write a plugin is basic command-line skills and knowledge of the [Go programming language](http://www.golang.org). Use the plugin interface to develop your integration. All integrations should contain unit and acceptance testing.
**HCP Consul**: The process to configure a testing instance of HCP consul [is very simple](https://learn.hashicorp.com/tutorials/cloud/get-started-consul). HCP has been designed as a HashiCorp managed service so configuration is minimal as only Consul client agents need to be installed. Furthermore, HashiCorp provides all new users an initial credit which should last approximately 2 months using a [development cluster](https://cloud.hashicorp.com/pricing/consul). When deployed with AWS free tier services, there should be no cost beyond the time spent by the designated tester.
Please note that HCP Consul is currently only deployed on AWS so the partners application should be able to be deployed or run in AWS. For more information, please refer to [Peering an HVN to an AWS VPC for HCP Consul](https://www.youtube.com/watch?v=vuKjkIGYZlU).
#### HCP Consul Resource Links:
- [Getting Started with HCP Consul](https://learn.hashicorp.com/tutorials/cloud/get-started-consul?in=consul/cloud-get-started)
- [Peering an HVN to a VPC for HCP Consul](https://www.youtube.com/watch?v=vuKjkIGYZlU)
- [Connecting a Consul Client to HCP Consul](https://learn.hashicorp.com/tutorials/cloud/consul-client-virtual-machines?in=consul/cloud-get-started)
- [Monitoring HCP Consul with Datadog](https://docs.datadoghq.com/integrations/guide/hcp-consul/)
### 4. Review and Approval
HashiCorp will review and approve your Consul integration. Please send an email to [consul-integration-dev@hashicorp.com](mailto:consul-integration-dev@hashicorp.com) with any relevant documentation, demos or other resources and let us know your integration is ready for review.
HashiCorp will review and approve your Consul integration. Please send an email to [technologypartners@hashicorp.com](mailto:technologypartners@hashicorp.com) with any relevant documentation, demos or other resources and let us know your integration is ready for review.
### 5. Release
@ -125,9 +154,9 @@ Below is a checklist of steps that should be followed during the Consul integrat
- Complete the [Consul Integration Program webform](https://docs.google.com/forms/d/e/1FAIpQLSf-RyVR9F0lmosao8Nnur0TTDjnl99gttnK3QP1OkfRefVKSw/viewform)
- Develop and test your Consul integration following examples, documentation and best practices
- When the integration is completed and ready for HashiCorp review, send us the documentation, demos and any other resources for review at: [consul-integration-dev@hashicorp.com](mailto:consul-integration-dev@hashicorp.com)
- When the integration is completed and ready for HashiCorp review, send us the documentation, demos and any other resources for review at: [technologypartners@hashicorp.com](mailto:technologypartners@hashicorp.com)
- Plan to continue to support the integration with additional functionality and responding to customer issues.
## Contact Us
For any questions or feedback, please contact us at: [consul-integration-dev@hashicorp.com](mailto:consul-integration-dev@hashicorp.com)
For any questions or feedback, please contact us at: [technologypartners@hashicorp.com](mailto:technologypartners@hashicorp.com)

View File

@ -37,7 +37,7 @@ proxy when an instance goes unhealthy.
## Tasks
A task is the action triggered by the updated data monitored in Consul. It
takes the that dynamic service data and translates it into a call to the
takes the dynamic service data and translates it into a call to the
infrastructure application to configure it with the updates. It uses a driver
to push out these updates, the initial driver being a local Terraform run. An
example of a task is to automate a firewall security policy rule with
@ -48,13 +48,13 @@ discovered IP addresses for a set of Consul services.
A driver encapsulates the resources required to communicate the updates to the
network infrastructure. The following [drivers](/docs/nia/network-drivers#terraform) are supported:
* Terraform driver
* Terraform Cloud driver<EnterpriseAlert inline />
- Terraform driver
- Terraform Cloud driver<EnterpriseAlert inline />
Each driver includes a set of providers that [enables support](/docs/nia/terraform-modules) for a wide variety of infrastructure applications.
## Security Guidelines
The [Secure Consul-Terraform-Sync for Production](https://learn.hashicorp.com/tutorials/consul/consul-terraform-sync-secure?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS)
tutorial contains a checklist of best practices to secure your
The [Secure Consul-Terraform-Sync for Production](https://learn.hashicorp.com/tutorials/consul/consul-terraform-sync-secure?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS)
tutorial contains a checklist of best practices to secure your
Consul-Terraform-Sync installation for a production environment.

View File

@ -14,20 +14,22 @@ The Consul-Terraform-Sync daemon is configured using configuration files and sup
Top level options are reserved for configuring Consul-Terraform-Sync.
```hcl
log_level = "INFO"
log_level = "INFO"
working_dir = "sync-tasks"
port = 8558
port = 8558
syslog {
facility = "local2"
}
buffer_period {
enabled = true
min = "5s"
max = "20s"
min = "5s"
max = "20s"
}
```
- `buffer_period` - Configures the default buffer period for all [tasks](#task) to dampen the effects of flapping services to downstream network devices. It defines the minimum and maximum amount of time to wait for the cluster to reach a consistent state and accumulate changes before triggering task executions. The default is enabled to reduce the number of times downstream infrastructure is updated within a short period of time. This is useful to enable in systems that have a lot of flapping.
- `buffer_period` - Configures the default buffer period for all dynamic [tasks](#task) to dampen the effects of flapping services to downstream network devices. It defines the minimum and maximum amount of time to wait for the cluster to reach a consistent state and accumulate changes before triggering task executions. The default is enabled to reduce the number of times downstream infrastructure is updated within a short period of time. This is useful to enable in systems that have a lot of flapping. Buffer periods do not apply to scheduled tasks.
- `enabled` - (bool: true) Enable or disable buffer periods globally. Specifying `min` will also enable it.
- `min` - (string: "5s") The minimum period of time to wait after changes are detected before triggering related tasks.
- `max` - (string: "20s") The maximum period of time to wait after changes are detected before triggering related tasks. If `min` is set, the default period for `max` is 4 times the value of `min`.
@ -89,8 +91,8 @@ A `service` block is an optional block to explicitly define configuration of ser
```hcl
service {
name = "web"
datacenter = "dc1"
name = "web"
datacenter = "dc1"
description = "all instances of the service web in datacenter dc1"
}
```
@ -100,7 +102,6 @@ service {
- `id` - (string) ID identifies the service for Consul-Terraform-Sync. This is used to explicitly identify the service config for a task to use. If no ID is provided, the service is identified by the service name within a [task definition](#task).
- `name` - (string: required) The Consul logical name of the service (required).
- `namespace` <EnterpriseAlert inline /> - (string: "default") The namespace of the service. If not provided, the namespace will be inferred from the Consul-Terraform-Sync ACL token, or default to the `default` namespace.
- `tag` - (string) **This field is deprecated in Consul-Terraform-Sync 0.2.0 and will be removed in 0.4.0. Use `filter` with the `Service.Tags` selector instead.** Tag is used to filter nodes based on the tag for the service.
- `filter` - (string) Specifies the expression used to filter nodes for the service. For more details on supported filters, see the Consul documentation on [filtering service nodes](/api-docs/health#filtering-2).
- `cts_user_defined_meta` - (map[string]) User-defined metadata is a map of strings that will be appended to the [service input variable](/docs/nia/installation/requirements#module-specifications) for compatible Terraform modules. Not all modules may use this value. To determine if your task uses metadata or what the expected keys and format are, reference documentation for the module(s) configured for your tasks.
- If multiple tasks depend on the same service but require different metadata, you can declare different sets of metadata for the same service. Define multiple service blocks for the service with unique IDs (and identical names) for those blocks. The metadata can then be separated per task based on the service IDs.
@ -111,13 +112,13 @@ A `task` block configures which task to execute in automation. When the task sho
```hcl
task {
name = "taskA"
description = ""
enabled = true,
providers = []
services = ["web", "api"]
source = "org/example/module"
version = "1.0.0"
name = "taskA"
description = ""
enabled = true,
providers = []
services = ["web", "api"]
source = "org/example/module"
version = "1.0.0"
variable_files = []
condition "catalog-services" {
regexp = ".*"
@ -131,8 +132,10 @@ task {
- `providers` - (list[string]) Providers is the list of provider names the task is dependent on. This is used to map [Terraform provider configuration](#terraform-provider) to the task.
- `services` - (list[string]) Required depending on [`condition`](#condition) configuration. Services is the list of logical service names or service IDs the task executes on. Consul-Terraform-Sync monitors the Consul Catalog for changes to these services and triggers the task to run. Any service value not explicitly defined by a `service` block with a matching ID is assumed to be a logical service name in the default namespace. Alternative to configuring `services`, a `condition` can be configured so that the task does not trigger on changes to services (default behavior) but instead trigger on a different condition. See [Task Condition](#task-condition) configuration for more details.
- `source` - (string: required) Source is the location the driver uses to discover the Terraform module used for automation. The source is the module path which can be local or remote on the [Terraform Registry](https://registry.terraform.io/) or private module registry. Read more on [Terraform module source and other supported types here](https://www.terraform.io/docs/modules/sources.html).
- To use a private module with the [`terraform` driver](#terraform-driver), run the command [`terraform login [hostname]`](https://learn.hashicorp.com/tutorials/terraform/cloud-login) to authenticate the local Terraform CLI prior to starting Consul-Terraform-Sync.
- To use a private module with the [`terraform_cloud` driver](#terraform-cloud-driver), no extra steps are needed.
```hcl
// local module example: "./terraform-cts-hello"
source = "<PATH>"
@ -143,23 +146,30 @@ task {
// private module example: "my.tfe.hostname.io/my-org/hello/cts"
source = "<HOSTNAME>/<ORGANIZATION>/<MODULE NAME>/<PROVIDER>"
```
- `variable_files` - (list[string]) Specifies list of paths to [Terraform variable definition files (`.tfvars`)](https://www.terraform.io/docs/configuration/variables.html#variable-definitions-tfvars-files). The content of these files should consist of only variable name assignments. The variable assignments must match the corresponding variable declarations made available by the Terraform module for the task.
- Variables are loaded in the order they appear in the files. Duplicate variables are overwritten with the later value. _Unless specified by the module, configure arguments for Terraform providers using [`terraform_provider` blocks](#terraform-provider)._
<CodeBlockConfig filename="example.tfvars">
```hcl
# example.tfvars
address_group = "consul-services"
tags = [
"consul-terraform-sync",
"terraform"
]
```
</CodeBlockConfig>
- `version` - (string) The version of the provided source the task will use. For the [Terraform driver](#terraform-driver), this is the module version. The latest version will be used as the default if omitted.
- `working_dir` - (string) The working directory to manage generated artifacts by Consul-Terraform-Sync for this task, including Terraform configuration files. By default, a working directory is created for each task as a subdirectory in the base [`working_dir`](#working_dir), e.g. `sync-tasks/task-name`.
- `buffer_period` - Configures the buffer period for the task to dampen the effects of flapping services to downstream network devices. It defines the minimum and maximum amount of time to wait for the cluster to reach a consistent state and accumulate changes before triggering task execution. The default is inherited from the top level [`buffer_period` block](#global-config-options). If configured, these values will take precedence over the global buffer period. This is useful to enable for a task that is dependent on services that have a lot of flapping.
- `buffer_period` - Configures the buffer period for a dynamic task to dampen the effects of flapping services to downstream network devices. It defines the minimum and maximum amount of time to wait for the cluster to reach a consistent state and accumulate changes before triggering task execution. The default is inherited from the top level [`buffer_period` block](#global-config-options). If configured, these values will take precedence over the global buffer period. This is useful to enable for a task that is dependent on services that have a lot of flapping. Buffer periods do not apply to scheduled tasks.
- `enabled` - (bool) Enable or disable buffer periods for this task. Specifying `min` will also enable it.
- `min` - (string: "5s") The minimum period of time to wait after changes are detected before triggering related tasks.
- `max` - (string: "20s") The maximum period of time to wait after changes are detected before triggering related tasks. If `min` is set, the default period for `max` is 4 times the value of `min`.
- `condition` - (obj) The requirement that, when met, triggers Consul-Terraform-Sync to execute the task. When unconfigured, the default condition is to trigger the task on changes in the services configured in [`services`](#services). Only one `condition` may be configured per task. Consul-Terraform-Sync supports different types of conditions, which each have their own configuration options. See [Task Condition](#task-condition) configuration for full details on configuration options for each condition type.
- `source_input` - (obj) Specifies a Consul object containing values or metadata to be provided to the Terraform Module. When source input is empty, source input will be determined by the condition or services list. Only one `source_input` may be configured per task. Consul-Terraform-Sync supports different types of source input, each source input has their own configuration options. The source input block is currently only supported with [schedule condition](#schedule-condition). See [Task Source Input](#task-source-input) configuration for full details on configuration options for each source input type.
- `terraform_version` - (string) <EnterpriseAlert inline /> The version of Terraform to use for the Terraform Cloud workspace associated with the task. Defaults to the latest compatible version supported by the organization. This option is only available when used with the [Terraform Cloud driver](#terraform-cloud-driver); otherwise, set the version within the [Terraform driver](#terraform-driver).
### Task Condition
@ -174,16 +184,17 @@ See [Task Execution: Services Condition](/docs/nia/tasks#services-condition) for
```hcl
task {
name = "services_condition_task"
name = "services_condition_task"
description = "execute on changes to services with names starting with web"
providers = ["my-provider"]
source = "path/to/services-condition-module"
providers = ["my-provider"]
source = "path/to/services-condition-module"
condition "services" {
regexp = "^web.*"
}
}
```
- `regexp` - **(beta)** (string) Only services that have a name which matches the regular expression are used by the task. If `regexp` is configured, then [`task.services`](#services) must be omitted or empty. If both a list and a regex are needed, consider including the list as part of the regex or creating separate tasks.
#### Catalog-Services Condition
@ -194,10 +205,10 @@ See [Task Execution: Catalog Services Condition](/docs/nia/tasks#catalog-service
```hcl
task {
name = "catalog_service_condition_task"
name = "catalog_service_condition_task"
description = "execute on service de/registrations with name matching 'web.*'"
source = "path/to/catalog-services-module"
providers = ["my-provider"]
source = "path/to/catalog-services-module"
providers = ["my-provider"]
// configure depending on module. provides detailed information for these
// services but does not execute task. refer to module docs on how to configure.
@ -221,22 +232,19 @@ task {
- `regexp` - (string) Optional if [`task.services`](/docs/nia/configuration#services) is configured. Either `regexp` or `task.services` or both must be configured. Only services that have a name which matches the regular expression are used by the task. If not provided, `regexp` will default to an exact match on `task.services` list. For example, if `task.services = ["api", "web"]`, then `regexp` will default to `^api$|^web$`. See [Task Execution: Catalog Services Condition](/docs/nia/tasks#catalog-services-condition) for more details on the relationship between `task.services` and `regexp`. Some resources for more information on regular expressions: [regular expression syntax](https://github.com/google/re2/wiki/Syntax), [try out regular expression string matching](https://golang.org/pkg/regexp/#Regexp.MatchString).
- `source_includes_var` - (bool: false) Whether or not the module configured at [`task.source`](#source) includes the [`catalog_services` variable](/docs/nia/terraform-modules#catalog-services-variable). Please refer to the documentation of the selected module for guidance on how to configure this field. If configured inconsistently with the module, Consul-Terraform-Sync will error and exit.
#### Consul KV Condition
-> **Beta:** This feature is currently only available in Consul-Terraform-Sync v0.4.0-beta.
A consul-kv condition block configures a task to only execute on changes to a Consul KV entry. The condition can be configured for a single Consul KV entry or for any Consul KV entries that are prefixed with a given path.
See [Task Execution: Consul KV Condition](/docs/nia/tasks#consul-kv-condition) for more information on how tasks are triggered with a consul-kv condition.
```hcl
task {
name = "consul_kv_condition_task"
name = "consul_kv_condition_task"
description = "execute on changes to Consul KV entry"
source = "path/to/consul-kv-module"
providers = ["my-provider"]
services = ["web-api"]
source = "path/to/consul-kv-module"
providers = ["my-provider"]
services = ["web-api"]
condition "consul-kv" {
path = "my-key"
@ -252,7 +260,95 @@ task {
- `recurse` - (bool: false) Setting to `true` instructs Consul-Terraform-Sync to treat the path as a prefix instead of a literal match.
- `datacenter` - (string) The datacenter of the services to query for the task. If not provided, the datacenter will default to the datacenter of the agent that Consul-Terraform-Sync queries.
- `namespace` <EnterpriseAlert inline /> - (string) The namespace of the services to query for the task. If not provided, the namespace will be inferred from the Consul-Terraform-Sync ACL token or default to the `default` namespace.
- `source_includes_var` - (bool: false) If set to `true`, then Consul-Terraform-Sync will include the [`consul_kv` variable](/docs/nia/terraform-modules#consul-kv-variable) as an input to the module specified in the [`task.source`](#source) field. Refer to the documentation of the selected module for guidance on how to configure this field. If configured inconsistently with the module, Consul-Terraform-Sync will error and exit.
- `source_includes_var` - (bool: false) If set to `true`, then Consul-Terraform-Sync will include the [`consul_kv` variable](/docs/nia/terraform-modules#consul-kv-variable) as an input to the module specified in `task.source`. Refer to the documentation of the selected module for guidance on how to configure this field. If configured inconsistently with the module, Consul-Terraform-Sync will error and exit.
#### Schedule Condition
A scheduled task has a schedule condition block, which defines the schedule for executing the task. Unlike a dynamic task, a scheduled task does not dynamically trigger on changes in Consul.
Schedule tasks also rely on additional task configuration, separate from the condition block to determine the source input information to provide to the task module. See [`task.services`](#services) or [`source_input`](#source_input) block configuration for details on how to configure source input.
See [Task Execution: Schedule Condition](/docs/nia/tasks#schedule-condition) for more information on how tasks are triggered with schedule conditions.
See [Terraform Module: Source Input](/docs/nia/terraform-modules#source-input) for more information on source input options for a scheduled task.
```hcl
task {
name = "scheduled_task"
description = "execute every Monday using service information from web and db"
services = ["web", "db"]
source = "path/to/module"
condition "schedule" {
cron = "* * * * Mon"
}
}
```
- `cron` - (string: required) The CRON expression that dictates the schedule to trigger the task. For more information on CRON expressions, see the [cronexpr parsing library](https://github.com/hashicorp/cronexpr).
### Task Source Input
You can add an optional `source_input` block to the `task` block. The `source_input` block specifies a Consul object containing values or metadata to be provided to the Terraform Module. Consul-Terraform-Sync supports the following source input types.
~> **The source input block is currently only supported when using a schedule condition.** Adding a `source_input` block alongside any other type of condition will result in an error. To accomplish a similar behavior with other condition blocks, use the `source_includes_var` field.
#### Services Source Input
This `services` source input object defines services registered to Consul whose metadata will be used as [services source input to the Terraform Module](/docs/nia/terraform-modules/#services-source-input). The following parameters are supported:
| Parameter | Required |Description | Default |
| --------- | --------- | ---------- | ------- |
| `regexp` | Optional | String value matching the names of Consul services to monitor. Only services that have a name matching the regular expression are used by the task. <br/><br/> If `regexp` is configured, then [`task.services`](#services) must be omitted or empty. <br/><br/> If both a list and a regex are needed, consider including the list as part of the regex or creating separate tasks. | none |
In the following example, the scheduled task queries all Consul services with `web` as the suffix. The metadata of matching services are provided to the Terraform module.
```hcl
task {
name = "schedule_condition_task"
description = "execute every Monday using information from service names starting with web"
source = "path/to/module"
condition "schedule" {
cron = "* * * * Mon"
}
source_input “services” {
regexp = "^web.*"
}
}
```
#### Consul KV Source Input
A Consul KV source input block defines changes to Consul KV that will be monitored. These changes will then be provided as [Consul KV source input to the Terraform Module](/docs/nia/terraform-modules/#consul-kv-source-input). The source input can be configured for a single Consul KV entry or for any Consul KV entries that are prefixed with a given path. The following parameters are supported:
| Parameter | Required | Description | Default |
| --------- | -------- | ----------- | ------- |
| `path` | Required | String value that specifies the path of the key used by the task. The path can point to a single Consul KV entry or several entries within the path. | none |
| `recurse` | Optional | Boolean value that enables Consul-Terraform-Sync to treat the path as a prefix. If set to `false`, the path will be treated as a literal match. | `false` |
| `datacenter` | Optional | String value specifying the name of a datacenter to query for the task. | Datacenter of the agent that Consul-Terraform-Sync queries. |
| `namespace` | Optional | <EnterpriseAlert inline /> <br/> String value indicating the namespace of the services to query for the task. | In order of precedence: <br/> 1. Inferred from the Consul-Terraform-Sync ACL token <br/> 2. The `default` namespace. |
In the following example, the scheduled task queries datacenter `dc1` in the `default` namespace for changes to the value held by the key `my-key`.
```hcl
task {
name = "schedule_condition_task_kv"
description = "execute every Monday using information from Consul KV entry my-key"
source = "path/to/module"
condition "schedule" {
cron = "* * * * Mon"
}
source_input "consul-kv" {
path = "my-key"
recurse = false
datacenter = "dc1"
namespace = "default"
}
}
```
## Network Drivers
@ -266,9 +362,9 @@ The Terraform driver block is used to configure Consul-Terraform-Sync for instal
```hcl
driver "terraform" {
log = false
log = false
persist_log = false
path = ""
path = ""
backend "consul" {
gzip = true
@ -276,7 +372,7 @@ driver "terraform" {
required_providers {
myprovider = {
source = "namespace/myprovider"
source = "namespace/myprovider"
version = "1.3.0"
}
}
@ -304,8 +400,6 @@ driver "terraform" {
which is available with <strong>Consul Enterprise</strong>.
</EnterpriseAlert>
-> **Beta:** The integration with the HashiCorp managed service version of Terraform Cloud is currently only available in Consul-Terraform-Sync v0.4.0-beta. Integration with the self-hosted version of Terraform Cloud is available as of v0.3.0.
The Terraform Cloud driver enables Consul-Terraform-Sync Enterprise to integrate with **Terraform Cloud**, including both the [self-hosted distribution](https://www.hashicorp.com/products/terraform/editions/enterprise) and the [managed service](https://www.hashicorp.com/products/terraform/editions/cloud). With this driver, Consul-Terraform-Sync automates Terraform runs and remote operations for workspaces.
An overview of features enabled with Terraform Cloud can be viewed within the [Network Drivers](/docs/nia/network-drivers) documentation.
@ -314,7 +408,7 @@ Only one network driver can be configured per deployment of Consul-Terraform-Syn
```hcl
driver "terraform-cloud" {
hostname = "my.tfe.hostname.io"
hostname = "https://app.terraform.io"
organization = "my-org"
token = "<TEAM_TOKEN>"
// Optionally set the token to be securely queried from Vault instead of
@ -323,7 +417,7 @@ driver "terraform-cloud" {
required_providers {
myprovider = {
source = "namespace/myprovider"
source = "namespace/myprovider"
version = "1.3.0"
}
}
@ -336,6 +430,20 @@ driver "terraform-cloud" {
- We recommend creating a dedicated team and team API token to isolate automation by Consul-Terraform-Sync from other Terraform Cloud operations.
- `workspace_prefix` - (string) Specifies a prefix to prepend to the automatically-generated workspace names used for automation. This prefix will be used by all tasks that use this driver. By default, when no prefix is configured, the workspace name will be the task name. When a prefix is configured, the workspace name will be `<workspace_prefix value><task name>`. For example, if you configure the prefix as "cts_", then a task with the name "task_firewall" will have the workspace name "cts_task_firewall".
- `required_providers` - (obj: required) Declare each Terraform provider used across all tasks. This can be configured the same as how you would configure [Terraform `terraform.required_providers`](https://www.terraform.io/docs/configuration/provider-requirements.html#requiring-providers) field to specify the source and version for each provider. Consul-Terraform-Sync will process these requirements when preparing each task that uses the provider.
- `tls` - Configure TLS to allow HTTPS connections to [Terraform Enterprise](https://www.terraform.io/docs/enterprise/install/installer.html#tls-key-amp-cert).
- `enabled` - (bool) Enable TLS. Providing a value for any of the TLS options will enable this parameter implicitly.
- `ca_cert` - (string) The CA file to use for communicating with Terraform Enterprise over TLS.
- `ca_path` - (string) The path to a directory of CA certificates to use for communicating with Terraform Enterprise over TLS.
- `cert` - (string) The client certificate file to use for communicating with Terraform Enterprise over TLS.
- `key` - (string) The client key file to use for communicating with Terraform Enterprise over TLS.
- `server_name` - (string) The server name to use as the SNI host when connecting via TLS.
- `verify` - (bool: true) Enables TLS peer verification. The default is enabled, which will check the global CA chain to make sure the given certificates are valid.
- If you are using a self-signed certificate that you have not added to the CA chain, you may want to disable SSL verification to ignore any certificate warnings. However, please understand this is a potential security vulnerability.
```hcl
tls {
verify = false
}
```
Consul-Terraform-Sync generates local artifacts to prepare configuration versions used for workspace runs. The location of the files created can be set with the [`working_dir`](/docs/nia/configuration#working_dir) option or configured per task. When a task is configured with a local module and is run with the Terraform Cloud driver, the local module is copied and uploaded as a part of the configuration version.
@ -351,21 +459,21 @@ The below configuration captures the general design of defining a provider using
driver "terraform" {
required_providers {
aws = {
source = "hashicorp/aws"
source = "hashicorp/aws"
version = "3.33.0"
}
}
}
terraform_provider "aws" {
# Configuration options
// Configuration options
region = "us-east-1"
}
task {
source = "some/source"
source = "some/source"
providers = ["aws"]
services = ["web", "api"]
services = ["web", "api"]
}
```
@ -471,7 +579,7 @@ The example Consul-Terraform-Sync configuration below defines two similar tasks
```hcl
terraform_provider "aws" {
alias = "a"
alias = "a"
profile = "team-a"
task_env {
"AWS_ACCESS_KEY_ID" = "{{ env \"CTS_AWS_ACCESS_KEY_ID_A\" }}"
@ -479,7 +587,7 @@ terraform_provider "aws" {
}
terraform_provider "aws" {
alias = "b"
alias = "b"
profile = "team-b"
task_env {
"AWS_ACCESS_KEY_ID" = "{{ env \"CTS_AWS_ACCESS_KEY_ID_B\" }}"
@ -491,15 +599,15 @@ terraform_provider "dns" {
}
task {
name = "task-a"
source = "org/module"
name = "task-a"
source = "org/module"
providers = ["aws.a", "dns"]
// ...
}
task {
name = "task-b"
source = "org/module"
name = "task-b"
source = "org/module"
providers = ["aws.b", "dns"]
// ...
}

View File

@ -19,12 +19,22 @@ Consul-Terraform-Sync executes one or more automation tasks with the most recent
## Glossary
**Condition** - A task-level defined environmental requirement that, when met, triggers the Consul-Terraform-Sync binary to execute the related task to update network infrastructure.
**Condition** - A task-level defined environmental requirement. When a tasks condition is met, Consul-Terraform-Sync executes that task to update network infrastructure. Depending on the condition type, the condition definition may also define and enable the source input that the task provides to the configured Terraform Module.
**Consul-Terraform-Sync** - [GitHub repo](https://github.com/hashicorp/consul-terraform-sync) and binary/CLI name for the project that is used to perform Network Infrastructure Automation.
**Dynamic Tasks** - A dynamic task is a type of task that is dynamically triggered on a change to any relevant Consul catalog values e.g. service instances, Consul KV, catalog-services. See scheduled tasks for a type of non-dynamic task.
-> **Note:** The terminology "tasks" used throughout the documentation refers to all types of tasks except when specifically stated otherwise.
**Network Drivers** - Consul-Terraform-Sync uses [network drivers](/docs/nia/network-drivers) to execute and update network infrastructure. Drivers transform Consul service-level information into downstream changes by processing and abstracting API and resource details tied to specific network infrastructure.
**Scheduled Tasks** - A scheduled task is a type of task that is triggered only on a schedule. It is configured with a [schedule condition](/docs/nia/configuration#schedule-condition).
-> **Note:** The terminology "tasks" used throughout the documentation refers to all types of tasks except when specifically stated otherwise.
**Source Input** - A source input defines objects that provide values or metadata to the Terraform module. See [source input](/docs/nia/terraform-modules#source-input) for the supported metadata and values. For example, a user can configure a Consul KV source input to provide KV pairs as variables to their respective Terraform Module. The source input can be included in two ways. It can be specified as a parameter in a condition using `source_includes_var` and also by using the `source_input` block.
**Network Infrastructure Automation (NIA)** - Enables dynamic updates to network infrastructure devices triggered when specific conditions, such as service changes and registration, are met.
**Tasks** - A task is the translation of dynamic service information from the Consul Catalog into network infrastructure changes downstream.
@ -37,11 +47,11 @@ Consul-Terraform-Sync executes one or more automation tasks with the most recent
## Getting Started With Network Infrastructure Automation
The [Network Infrastructure Automation (NIA)](https://learn.hashicorp.com/collections/consul/network-infrastructure-automation?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS)
collection contains examples on how to configure Consul-Terraform-Sync to
perform Network Infrastructure Automation. The collection contains also a
tutorial to secure your Consul-Terraform-Sync configuration for a production
environment and one to help you build you own Consul-Terraform-Sync compatible
The [Network Infrastructure Automation (NIA)](https://learn.hashicorp.com/collections/consul/network-infrastructure-automation?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS)
collection contains examples on how to configure Consul-Terraform-Sync to
perform Network Infrastructure Automation. The collection contains also a
tutorial to secure your Consul-Terraform-Sync configuration for a production
environment and one to help you build you own Consul-Terraform-Sync compatible
module.
## Community

View File

@ -19,12 +19,12 @@ Review the Terraform module to be used for network automation and identify the T
```hcl
task {
name = "website-x"
name = "website-x"
description = "automate services for website-x"
source = "namespace/example/module"
version = "1.0.0"
providers = ["myprovider"]
services = ["web", "api"]
source = "namespace/example/module"
version = "1.0.0"
providers = ["myprovider"]
services = ["web", "api"]
}
```
@ -36,7 +36,7 @@ Configuring Terraform providers within Consul-Terraform-Sync requires 2 config c
driver "terraform" {
required_providers {
myprovider = {
source = "namespace/myprovider"
source = "namespace/myprovider"
version = "1.3.0"
}
}
@ -59,6 +59,8 @@ Piecing it all together, the configuration file for Consul-Terraform-Sync will h
An example HCL configuration file is shown below to automate one task to execute a Terraform module on the condition when there are changes to two services.
<CodeBlockConfig filename="cts-example-config.hcl">
```hcl
log_level = "info"
@ -71,12 +73,12 @@ consul {
}
task {
name = "website-x"
name = "website-x"
description = "automate services for website-x"
source = "namespace/example/module"
version = "1.0.0"
providers = ["myprovider"]
services = ["web", "api"]
source = "namespace/example/module"
version = "1.0.0"
providers = ["myprovider"]
services = ["web", "api"]
buffer_period {
min = "10s"
}
@ -87,7 +89,7 @@ driver "terraform" {
required_providers {
myprovider = {
source = "namespace/myprovider"
source = "namespace/myprovider"
version = "1.3.0"
}
}
@ -97,3 +99,5 @@ terraform_provider "myprovider" {
address = "myprovider.example.com"
}
```
</CodeBlockConfig>

View File

@ -46,7 +46,7 @@ Consul-Terraform-Sync connects with your Consul cluster in order to monitor the
```hcl
consul {
address = "localhost:8500"
token = "my-consul-acl-token"
token = "my-consul-acl-token"
}
```
@ -58,7 +58,7 @@ Once you have identified a Terraform provider for all of your network devices, y
```hcl
terraform_provider "fake-firewall" {
address = "10.10.10.10"
address = "10.10.10.10"
username = "admin"
password = "password123"
}

View File

@ -13,8 +13,6 @@ Consul-Terraform-Sync is a HashiCorp solution to Network Infrastructure Automati
The following table highlights some of the additional features Terraform and Terraform Cloud offer when used as a network driver for Consul-Terraform-Sync. Visit the [Terraform product page](https://www.hashicorp.com/products/terraform) or [contact our sales team](https://www.hashicorp.com/contact-sales) for a comprehensive list of features.
-> **Beta:** The integration with the HashiCorp managed service version of Terraform Cloud is currently only available in Consul-Terraform-Sync v0.4.0-beta. Integration with the self-hosted version of Terraform Cloud is available as of v0.3.0.
| Network Driver | Description | Features |
| -------------- | ----------- | -------- |
| [Terraform driver](/docs/nia/network-drivers/terraform) | Consul-Terraform-Sync automates a local installation of the [Terraform CLI](https://www.terraform.io/) | - Local Terraform execution <br/> - Local workspace directories <br/> - [Backend options](/docs/nia/configuration#backend) available for state storage <br/> |

View File

@ -12,9 +12,7 @@ description: >-
which is available with <strong>Consul Enterprise</strong>.
</EnterpriseAlert>
-> **Beta:** The integration with the HashiCorp managed service version of Terraform Cloud is currently only available in Consul-Terraform-Sync v0.4.0-beta. Integration with the self-hosted version of Terraform Cloud is available as of v0.3.0.
Consul-Terraform-Sync is more powerful when you integrate it with [Terraform Cloud](https://www.terraform.io/cloud). Integrating with Terraform Cloud provides features, such as enhanced workspaces and insight into Terraform operations as Consul-Terraform-Sync dynamically updates your network infrastructure. Consul-Terraform-Sync is compatible with both the [self-hosted](https://www.hashicorp.com/products/terraform/editions/enterprise) and [managed service](https://www.hashicorp.com/products/terraform/editions/cloud) versions of Terraform Cloud.
Consul-Terraform-Sync is more powerful when you integrate it with [Terraform Cloud](https://www.terraform.io/cloud). Integrating with Terraform Cloud provides features, such as enhanced workspaces and insight into Terraform operations as Consul-Terraform-Sync dynamically updates your network infrastructure. Consul-Terraform-Sync is compatible with both the [self-hosted](https://www.hashicorp.com/products/terraform/editions/enterprise) and [managed service](https://www.hashicorp.com/products/terraform/editions/cloud) versions of Terraform Cloud. It also supports all [tiers](https://www.hashicorp.com/products/terraform/pricing) of the Terraform Cloud managed service.
This page describes how the Terraform Cloud driver operates within Consul-Terraform-Sync.

View File

@ -13,12 +13,12 @@ Below is an example task configuration:
```hcl
task {
name = "frontend-firewall-policies"
name = "frontend-firewall-policies"
description = "Add firewall policy rules for frontend services"
providers = ["fake-firewall", "null"]
services = ["web", "image"]
source = "example/firewall-policy/module"
version = "1.0.0"
providers = ["fake-firewall", "null"]
services = ["web", "image"]
source = "example/firewall-policy/module"
version = "1.0.0"
}
```
@ -30,9 +30,13 @@ A task can be either enabled or disabled using the [task cli](/docs/nia/cli/task
## Task Execution
An enabled task can be configured to monitor and execute on different types of conditions. For example, on changes to services ([services condition](/docs/nia/tasks#services-condition)) or on service registration and deregistration ([catalog-services condition](/docs/nia/tasks#catalog-services-condition)).
An enabled task can be configured to monitor and execute on different types of conditions, such as changes to services ([services condition](/docs/nia/tasks#services-condition)) or service registration and deregistration ([catalog-services condition](/docs/nia/tasks#catalog-services-condition)).
A task can also monitor but not execute on other variables that are simply included to provide additional information to the task's module. For example, a task with a catalog-services condition may execute on registration changes but additionally monitor service instances for IP information. The details of what values are monitored and what values can execute the task are determined by the module.
A task can also monitor, but not execute on, other variables that provide additional information to the task's module. For example, a task with a catalog-services condition may execute on registration changes, and monitor service instances for IP information.
A source input can be specified that implicitly includes variables to be provided to the tasks module. For example, a task can specify a Consul KV source input. The specified KV keys or key paths would be monitored for changes. Any changes detected would be included as input information for the modules. The module determines the details of what values are monitored and what values can execute the task.
~> **The source input block is currently only supported when using a schedule condition.** Adding a source input block alongside any other type of condition will result in an error. To accomplish a similar behaviour with other condition blocks, use the `source_includes_var` field.
Below are details on the types of execution conditions that Consul-Terraform-Sync supports.
@ -65,22 +69,21 @@ The services condition is the default behavior if no `condition` block is config
```hcl
task {
name = "services_condition_task_1"
name = "services_condition_task_1"
description = "execute on changes to api, db, and web services"
providers = ["my-provider"]
source = "path/to/services-condition-module"
services = ["api", "db", "web"]
providers = ["my-provider"]
source = "path/to/services-condition-module"
services = ["api", "db", "web"]
}
task {
name = "services_condition_task_2"
name = "services_condition_task_2"
description = "execute on changes to api, db, and web services"
providers = ["my-provider"]
source = "path/to/services-condition-module"
services = ["api", "db", "web"]
providers = ["my-provider"]
source = "path/to/services-condition-module"
services = ["api", "db", "web"]
condition "services" {
}
condition "services" {}
}
```
@ -88,10 +91,10 @@ Below is an example configuration for a task that will execute when a service wi
```hcl
task {
name = "services_condition_task"
name = "services_condition_task"
description = "execute on changes to services whose name starts with web"
providers = ["my-provider"]
source = "path/to/services-condition-module"
providers = ["my-provider"]
source = "path/to/services-condition-module"
condition "services" {
regexp = "^web.*"
@ -106,12 +109,13 @@ Tasks with a catalog-services condition monitor and execute on service registrat
The catalog-services condition operates by monitoring the [Catalog List Services API](/api-docs/catalog#list-services) and executing the task when services are added or removed in the list of registered services. Note, the task does not execute on changes to the tags of the list of services. This is similar to how changes to service instance information, mentioned above, also does not execute a task.
Below is an example configuration for a task that will execute when a service with a name that matches the "web.*" regular expression in datacenter "dc1" has a registration change. It additionally monitors but does not execute on service instance changes to "web-api" in datacenter "dc2".
```hcl
task {
name = "catalog_service_condition_task"
source = "path/to/catalog-services-module"
name = "catalog_service_condition_task"
source = "path/to/catalog-services-module"
providers = ["my-provider"]
services = ["web-api"]
services = ["web-api"]
condition "catalog-services" {
datacenter = "dc1"
@ -121,7 +125,7 @@ task {
}
service {
name = "web-api"
name = "web-api"
datacenter = "dc2"
}
```
@ -134,19 +138,17 @@ One particular condition configuration, [`regexp`](/docs/nia/configuration#regex
### Consul KV Condition
-> **Beta:** This feature is currently only available in Consul-Terraform-Sync v0.4.0-beta.
Tasks with a consul-kv condition monitor and execute on Consul KV changes for KV pairs that satisfy the condition configuration. The consul-kv condition operates by monitoring the [Consul KV API](/api-docs/kv#read-key) and executing the task when a configured KV entry is created, deleted, or updated.
Based on the `recurse` option, the condition either monitors a single Consul KV pair for a given path or monitors all pairs that are prefixed by that path. In the example below, because `recurse` is set to true, the `path` option is treated as a prefix. Changes to an entry with the key `my-key` and an entry with the key `my-key/another-key` would both trigger the task. If `recurse` were set to false, then only changes to `my-key` would trigger the task.
```hcl
task {
name = "consul_kv_condition_task"
name = "consul_kv_condition_task"
description = "execute on changes to Consul KV entry"
source = "path/to/consul-kv-module"
providers = ["my-provider"]
services = ["web-api"]
source = "path/to/consul-kv-module"
providers = ["my-provider"]
services = ["web-api"]
condition "consul-kv" {
path = "my-key"
@ -160,6 +162,50 @@ task {
If the task condition's [`source_includes_var`](/docs/nia/configuration#source_includes_var-1) field is set to `true`, then the value of the Consul KV pair(s) will be available in the [`consul_kv` input variable](/docs/nia/terraform-modules#consul-kv-variable). To use the variable, add `consul_kv` as an input variable to the module, in addition to the required `services` variable. The condition type and `source_includes_var` configuration value should be documented in the module so that users can reference them when configuring a task.
### Schedule Condition
All scheduled tasks must be configured with a schedule condition. The schedule condition sets the cadence to trigger a task with a [`cron`](/docs/nia/configuration#cron) configuration. The schedule condition block does not support parameters to configure source input. As a result, inputs must be configured separately. You can configure [`task.services`](/docs/nia/configuration#services) or a [`source_input` block](/docs/nia/configuration#source_input) to set the source input.
Below is an example configuration for a task that will execute every Monday, which is set by the schedule conditions [`cron`](/docs/nia/configuration#cron) configuration. The source input is defined by the `task.services` configuration. When the task is triggered on Monday, it will retrieve the latest information on "web" and "db" from Consul and provide this to the modules input variables.
```hcl
task {
name = "scheduled_task"
description = "execute every Monday using service information from web and db"
services = ["web", "db"]
source = "path/to/module"
condition "schedule" {
cron = "* * * * Mon"
}
}
```
Below are the available options for source input types and how to configure them:
- [Services source input](/docs/nia/terraform-modules/#services-source-input): configure through [`task.services`](/docs/nia/configuration#services) or [`source_input "services"`](/docs/nia/configuration#services-source-input)
- [Consul KV source input](/docs/nia/terraform-modules/#consul-kv-source-input): configure through [`source_input "consul-kv"`](/docs/nia/configuration#consul-kv-source-input)
#### Running Behavior
Scheduled tasks generally run on schedule, but they can be triggered on demand when running Consul-Terraform-Sync in the following ways:
- [Long-running mode](/docs/nia/cli#long-running-mode): At the beginning of the long-running mode, Consul-Terraform-Sync first passes through a once-mode phase in which all tasks are executed once. Scheduled tasks will trigger once during this once-mode phase. This behavior also applies to tasks that are not scheduled. After once-mode has completed, scheduled tasks subsequently trigger on schedule.
- [Inspect mode](/docs/nia/cli#inspect-mode): When running in inspect mode, the terminal will output a plan of proposed updates that would be made if the tasks were to trigger at that moment and then exit. No changes are applied in this mode. The outputted plan for a scheduled task is also the proposed updates that would be made if the task was triggered at that moment, even if off-schedule.
- [Once mode](/docs/nia/cli#once-mode): During the once mode, all tasks are only triggered one time. Scheduled tasks will execute during once mode even if not on the schedule.
- [Enable CLI](/docs/nia/cli/task#task-enable): When a task is enabled through the CLI, any type of task, including scheduled tasks, will be triggered at that time.
#### Buffer Period
Because scheduled tasks trigger on a configured cadence, buffer periods are disabled for scheduled tasks. Any configured `buffer_period` at the global level or task level will only apply to dynamic tasks and not scheduled ones.
#### Events
[Events](#event) are stored each time a task executes. For scheduled tasks, an event will be stored each time the task triggers on schedule regardless of if there was a change in Consul catalog.
## Task Automation
Consul-Terraform-Sync will attempt to execute each enabled task once upon startup to synchronize infrastructure with the current state of Consul. The daemon will stop and exit if any error occurs while preparing the automation environment or executing a task for the first time. This helps ensure tasks have proper configuration and are executable before the daemon transitions into running tasks in full automation as service changes are discovered over time. As a result, it is not recommended to configure a task as disabled from the start. After all tasks have successfully executed once, task failures during automation will be logged and retried or attempted again after a subsequent change.
@ -169,17 +215,21 @@ Tasks are executed near-real time when service changes are detected. For service
## Status Information
Status-related information is collected and offered via [status API](/docs/nia/api#status) to provide visibility into what and how the tasks are running. Information is offered in three-levels (lowest to highest):
- Event data
- Task status
- Overall status
- Event data
- Task status
- Overall status
These three levels form a hierarchy where each level of data informs the one higher. The lowest-level, event data, is collected each time a task runs to update network infrastructure. This event data is then aggregated to inform individual task statuses. The count distribution of all the task statuses inform the overall status's task summary.
### Event
Each time a task's services has an update, Consul-Terraform-Sync takes a series of steps in order to update network infrastructure. This process starts with updating the task's templates to fetch new service data from Consul and ends with any post-actions after modifying network infrastructure. An event is a data structure that captures information on this process of updating network infrastructure. It stores information to help understand if the update to network infrastructure was successful or not, and it stores any errors that occurred. Because disabled tasks do not update network infrastructures, they therefore do not have store events until re-enabled.
When a task is triggered, Consul-Terraform-Sync takes a series of steps in order to update the network infrastructure. These steps consist of fetching the latest data from Consul for the task's source inputs and then updating the network infrastructure accordingly. An event captures information across this process. It stores information to help understand if the update to network infrastructure was successful or not and any errors that may have occurred.
A dynamic task will store an event when it is triggered by a change in Consul. A scheduled task will store an event when it is triggered on schedule, regardless if there is a change in Consul. A disabled task does not update network infrastructures, so it will not store events until until re-enabled.
Sample event:
```json
{
"id": "ef202675-502f-431f-b133-ed64d15b0e0e",
@ -201,17 +251,14 @@ For complete information on the event structure, see [events in our API document
Each time a task runs to update network infrastructure, event data is stored for that run. 5 most recent events are stored for each task, and these stored events are used to determine task status. For example, if the most recent stored event is not successful but the others are, then the task's health status is "errored".
Sample task status:
```json
{
"task_name": "task_b",
"status": "errored",
"providers": [
"null"
],
"services": [
"web",
],
"events_url": "/v1/status/tasks/task_b?include=events",
"task_name": "task_b",
"status": "errored",
"providers": ["null"],
"services": ["web"],
"events_url": "/v1/status/tasks/task_b?include=events"
}
```
@ -222,6 +269,7 @@ Task status information can be retrieved with [task status API](/docs/nia/api#ta
Overall status returns a summary of the health statuses across all tasks. The summary is the count of tasks in each health status category.
Sample overall status:
```json
{
"task_summary": {

View File

@ -22,9 +22,203 @@ Below are the two required elements for module compatibility with Consul-Terrafo
### Optional Input Variables
Below are additional input variables provided by Consul-Terraform-Sync that may be included in a module alongside the `services` input variable. Details for each input variable contain guidance on when to include them in a module.
In addition to the required `services` input variable, Consul-Terraform-Sync provides additional, optional input variables to be used within your module. Support for an optional input variable requires two changes:
- [**`catalog_services` input variable**](#catalog-services-variable) - This variable can optionally be included in modules for the [catalog-services condition](/docs/nia/tasks#catalog-services-condition). The declaration of the `catalog_services` input variable can be included at the top of the suggested `variables.tf` file alongside the required `services` input variable and other input variables. This variable functions as the response object from the [Consul catalog list services API](/api-docs/catalog#list-services) and surfaces registered services information. It is structured as a map of lists.
1. Updating the Terraform Module to declare the input variable in the suggested `variables.tf`
1. Adding configuration to the Consul-Terraform-Sync task block to define the source input values that should be provided to the input variables
See below sections for more information on [defining source input](#source-input) and [declaring optional input variables](#how-to-create-a-compatible-terraform-module) in your Terraform module.
### Source Input
A source input allows for a task to satisfy the input requirements defined by the Terraform Modules [input variables](https://www.terraform.io/docs/language/values/variables.html), and is configured alongside a tasks condition. Both the source input and condition define objects to be monitored, but for differing reasons.
The condition defines monitored objects with criteria. When this criteria is satisfied, Consul-Terraform-Sync will then trigger a task.
The source input however, defines monitored objects with the intent of providing values or metadata about these objects to the Terraform Module. The source input and condition objects can be the same, such as when `task.services` is provided without a source input block and condition block, but they can also be defined using separate blocks. In this way the source input does not need to be tied to the provided condition in order to satisfy the Terraform Module.
There are a few ways that a source input can be defined:
- [**`services` list**](/docs/nia/configuration#services) - The list of services to act as a source input.
- **`source_includes_var` condition field** - If the condition supports this field, and it is set to true, then the conditions objects will be used as a source input. For example, if a module is defined supporting [`catalog_services` input variable](#catalog-services-variable) then, this field can be set to true in the [catalog-services condition](/docs/nia/tasks#catalog-services-condition).
- [**`source_input` block**](/docs/nia/configuration#source-input) - This block specifically defines a source input.
Multiple ways of defining a source input adds configuration flexibility, and allows for optional additional input variables to be supported by Consul-Terraform-Sync alongside the `services` input variable.
These optional input variables include:
- [**`catalog_services` variable**](#catalog-services-variable)
- [**`consul_kv` variable**](#consul-kv-variable)
#### Services Source Input
Tasks configured with a services source input monitor for changes to services. Monitoring is either performed on a configured list of services or on any services matching a provided regex.
Sample rendered services input:
<CodeBlockConfig filename="terraform.tfvars">
```hcl
services = {
"web.test-server.dc1" = {
id = "web"
name = "web"
kind = ""
address = "127.0.0.1"
port = 80
meta = {}
tags = ["example"]
namespace = ""
status = "passing"
node = "pm8902"
node_id = "307625d3-a1cf-9e85-ff81-12017ca4d848"
node_address = "127.0.0.1"
node_datacenter = "dc1"
node_tagged_addresses = {
lan = "127.0.0.1"
lan_ipv4 = "127.0.0.1"
wan = "127.0.0.1"
wan_ipv4 = "127.0.0.1"
}
node_meta = {
consul-network-segment = ""
}
},
}
```
</CodeBlockConfig>
In order to configure a task with the services source input, the list of services that will be used for the input must be configured in one of the following ways:
- the task's [`services`](/docs/nia/configuration#services)
- a [`condition "services"` block](/docs/nia/configuration#services-condition) configured with `regexp` and `source_includes_var` set to true
- a [`source_input "services"` block](/docs/nia/configuration#services-source-input) configured with `regexp`
The services source input operates by monitoring the [Health List Nodes For Service API](/api-docs/health#list-nodes-for-service) and provides the latest service information to the input variables. A complete list of service information that would be provided to the module is expanded below:
| Attribute | Description |
| ----------------------- | ------------------------------------------------------------------------------------------------- |
| `id` | A unique Consul ID for this service. The service id is unique per Consul agent. |
| `name` | The logical name of the service. Many service instances may share the same logical service name. |
| `address` | IP address of the service host -- if empty, node address should be used. |
| `port` | Port number of the service |
| `meta` | List of user-defined metadata key/value pairs for the service |
| `tags` | List of tags for the service |
| `namespace` | Consul Enterprise namespace of the service instance |
| `status` | Representative status for the service instance based on an aggregate of the list of health checks |
| `node` | Name of the Consul node on which the service is registered |
| `node_id` | ID of the node on which the service is registered. |
| `node_address` | The IP address of the Consul node on which the service is registered. |
| `node_datacenter` | Data center of the Consul node on which the service is registered. |
| `node_tagged_addresses` | List of explicit LAN and WAN IP addresses for the agent |
| `node_meta` | List of user-defined metadata key/value pairs for the node |
Below is an example configuration for a task that will execute on a schedule and provide information about the services matching the `regexp` parameter to the tasks module. Note that because `regexp` is set, `task.services` is omitted, and since a schedule is being used to trigger task execution, a `condition "services"` block cannot be used.
```hcl
task {
name = "services_condition_task"
description = "execute on changes to services whose name starts with web"
providers = ["my-provider"]
source = "path/to/services-condition-module"
condition "schedule" {
cron = "* * * * Mon"
}
source_input "services" {
regexp = "^web.*"
}
}
```
#### Consul KV Source Input
Tasks configured with a Consul KV source input monitor Consul KV for changes to KV pairs that satisfy the provided configuration. The Consul KV source input operates by monitoring the [Consul KV API](/api-docs/kv#read-key) and provides these key values to the tasks module.
Sample rendered consul KV input:
<CodeBlockConfig filename="terraform.tfvars">
```hcl
consul_kv = {
"my-key" = "some value"
}
```
</CodeBlockConfig>
To configure a task with the Consul KV source input, the KVs which will be used for the input must be configured in one of the following ways:
- a [`condition "consul-kv"` block](/docs/nia/configuration#consul-kv-condition) configured with the `source_includes_var` set to true.
- a [`source_input "consul-kv"` block](/docs/nia/configuration#consul-kv-source-input).
Below is a similar example to the one provided in the [Consul KV Condition](/docs/nia/tasks#consul-kv-condition) section. However, the difference in this example is that instead of triggering based on a change to Consul KV, this task will instead execute on a schedule. Once execution is triggered, Consul KV information is then provided to the tasks module.
```hcl
task {
name = "consul_kv_schedule_task"
description = "executes on Monday monitoring Consul KV"
providers = ["my-provider"]
services = ["web-api"]
source = "path/to/consul-kv-module"
condition "schedule" {
cron = "* * * * Mon"
}
source_input "consul-kv" {
path = "my-key"
recurse = true
datacenter = "dc1"
namespace = "default"
}
}
```
#### Catalog Services Source Input
Tasks configured with a Catalog Services source input monitors for service and tag information provided by the [Catalog List Services API](/api-docs/catalog#list-services). The source input is a map of service names to a list of tags.
Sample rendered catalog-services input:
<CodeBlockConfig filename="terraform.tfvars">
```hcl
catalog_services = {
"api" = ["prod", "staging"]
"consul" = []
"web" = ["blue", "green"]
}
```
</CodeBlockConfig>
To configure a task with the Catalog Services source input, the catalog services which will be used for the input must be configured in one of the following ways:
- a [`condition "catalog-services"` block](/docs/nia/configuration#consul-kv-condition) configured with `source_includes_var` set to true.
-> **Note:** Currently there is no support for a `source_input “catalog-services”` block.
Example of a catalog-services condition which supports source input through `source_includes_var`:
```hcl
task {
name = "catalog_services_condition_task"
description = "execute on registration/deregistration of services"
providers = ["my-provider"]
services = ["web-api"]
source = "path/to/catalog-services-module"
condition "catalog-services" {
datacenter = "dc1"
namespace = "default"
regexp = "web.*"
source_includes_var = true
node_meta {
key = "value"
}
}
}
```
## How to Create a Compatible Terraform Module
@ -92,6 +286,8 @@ Keys of the `services` map are unique identifiers of the service across Consul a
Terraform variables when passed as module arguments can be [lossy for object types](https://www.terraform.io/docs/configuration/types.html#conversion-of-complex-types). This allows Consul-Terraform-Sync to declare the full variable with every object attribute in the generated root module, and pass the variable to a child module that contains a subset of these attributes for its variable declaration. Modules compatible with Consul-Terraform-Sync may simplify the `var.services` declaration within the module by omitting unused attributes. For example, the following services variable has 4 attributes with the rest omitted.
<CodeBlockConfig filename="variables.tf">
```hcl
variable "services" {
description = "Consul services monitored by Consul-Terraform-Sync"
@ -107,17 +303,23 @@ variable "services" {
}
```
</CodeBlockConfig>
### Catalog Services Variable
If you are creating a module for a [catalog-services condition](/docs/nia/tasks#catalog-services-condition), then you have the option to add the `catalog_services` variable, which contains service registration and tag information. If your module would benefit from consuming this information, you can copy the `catalog_services` variable declaration to your `variables.tf` file in addition to the other variables.
<CodeBlockConfig filename="variables.tf">
```hcl
variable "catalog_services" {
description = "Consul catalog service names and tags monitored by Consul-Terraform-Sync"
type = map(list(string))
type = map(list(string))
}
```
</CodeBlockConfig>
The keys of the `catalog_services` map are the names of the services that are registered with Consul at the given datacenter. The value for each service name is a list of all known tags for that service.
We recommend that if you make a module with with a catalog-services condition, that you document this in the README. This way, users that want to configure a task with your module will know to configure a catalog-services [condition](/docs/nia/configuration#condition) block.
@ -126,17 +328,19 @@ Similarly, if you include the `catalog_services` variable in your module, we rec
### Consul KV Variable
-> **Beta:** This feature is currently only available in Consul-Terraform-Sync v0.4.0-beta.
If you are creating a module for a [consul-kv condition](/docs/nia/tasks#consul-kv-condition), then you have the option to add the `consul_kv` variable, which contains a map of the keys and values for the Consul KV pairs. If your module would benefit from consuming this information, you can copy the `consul_kv` variable declaration to your `variables.tf` file in addition to the other variables.
<CodeBlockConfig filename="variables.tf">
```hcl
variable "consul_kv" {
description = "Keys and values of the Consul KV pairs monitored by Consul-Terraform-Sync"
type = map(string)
type = map(string)
}
```
</CodeBlockConfig>
If your module contains the `consul_kv` variable, we recommend documenting the usage in the README file so that users know to set the [`source_includes_var`](/docs/nia/configuration#source_includes_var-1) configuration to `true` in the `consul-kv` condition. Setting the field to `true` instructs Consul-Terraform-Sync to declare the `consul_kv` variable in the generated root module and pass the variable to a child module. Therefore, if this field is configured inconsistently, Consul-Terraform-Sync will error and exit.
### Module Input Variables
@ -149,7 +353,7 @@ Network infrastructure differs vastly across teams and organizations, and the au
4. Set reasonable default values for variables that are optional, or omit default values for variables that are required module arguments.
5. Set the [sensitive argument](https://www.terraform.io/docs/language/values/variables.html#suppressing-values-in-cli-output) for variables that contain secret or sensitive values. When set, Terraform will redact the value from output when Terraform commands are run.
Terraform is an explicit configuration language and requires variables to be declared, typed, and passed explicitly through as module arguments. Consul-Terraform-Sync abstracts this by creating intermediate variables at the root level from values intended for the module. These values are configured by practitioners within the [`task` block](/docs/nia/configuration#variable_files). Value assignments are parsed to interpolate the corresponding variable declaration and are written to the appropriate Terraform files. A few assumptions are made for the intermediate variables: the variables users provide Consul-Terraform-Sync are declared and supported by the module, matching name and type.
Terraform is an explicit configuration language and requires variables to be declared, typed, and passed explicitly through as module arguments. Consul-Terraform-Sync abstracts this by creating intermediate variables at the root level from the source input. These values are configured by practitioners within the [`task` block](/docs/nia/configuration#variable_files). Value assignments are parsed to interpolate the corresponding variable declaration and are written to the appropriate Terraform files. A few assumptions are made for the intermediate variables: the variables users provide Consul-Terraform-Sync are declared and supported by the module, matching name and type.
### Module Guidelines

View File

@ -6,6 +6,7 @@
"dependencies": {
"@hashicorp/mktg-global-styles": "^4.0.0",
"@hashicorp/mktg-logos": "^1.2.0",
"@hashicorp/platform-analytics": "^0.2.0",
"@hashicorp/platform-code-highlighting": "^0.1.2",
"@hashicorp/platform-runtime-error-monitoring": "^0.1.0",
"@hashicorp/platform-util": "^0.1.0",

View File

@ -1,6 +1,7 @@
import './style.css'
import '@hashicorp/platform-util/nprogress/style.css'
import useFathomAnalytics from '@hashicorp/platform-analytics'
import Router from 'next/router'
import Head from 'next/head'
import NProgress from '@hashicorp/platform-util/nprogress'
@ -21,7 +22,9 @@ const { ConsentManager, openConsentManager } = createConsentManager({
})
export default function App({ Component, pageProps }) {
useFathomAnalytics()
useAnchorLinkAnalytics()
return (
<ErrorBoundary FallbackComponent={Error}>
<HashiHead

Binary file not shown.

After

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 85 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 111 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 66 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

View File

@ -1083,6 +1083,11 @@ module.exports = [
destination: '/docs/upgrading/compatibility',
permanent: true,
},
{
source: '/docs/commands/acl/role',
destination: '/commands/acl/role',
permanent: true,
},
{
source: '/docs/commands/acl/role/create',
destination: '/commands/acl/role/create',