Allow consul version/consul download url to be inputted via Terraform (#9267)

This commit is contained in:
s-christoff 2020-12-11 13:11:14 -06:00 committed by GitHub
parent cb3dbc92f9
commit 348766166e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 213 additions and 107 deletions

View File

@ -9,7 +9,7 @@ To build the Consul AMI:
3. Configure your AWS credentials using one of the [options supported by the AWS 3. Configure your AWS credentials using one of the [options supported by the AWS
SDK](http://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html). Usually, the easiest option is to SDK](http://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html). Usually, the easiest option is to
set the `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, and `AWS_DEFAULT_REGION` environment variables. set the `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, and `AWS_DEFAULT_REGION` environment variables.
4. Update the `variables` section of the `consul.json` Packer template to configure the AWS region, Consul version, and datadog api key you would like to use. Feel free to reference this article to find your [datadog API key](https://docs.datadoghq.com/account_management/api-app-keys/#api-keys). 4. Update the `variables` section of the `consul.json` Packer template to configure the AWS region and datadog api key you would like to use. Feel free to reference this article to find your [datadog API key](https://docs.datadoghq.com/account_management/api-app-keys/#api-keys).
5. For additional customization you can add [tags](https://docs.datadoghq.com/getting_started/tagging/assigning_tags/?tab=noncontainerizedenvironments) within the `scripts/datadog.yaml` file. One example of a tag could be `"consul_version" : "consulent_175"`. These tags are searchable through the datadog dashboard. Another form of customization is changing the datacenter tag within `scripts/telemetry.json`, however it is defaulted to `us-east-1`. 5. For additional customization you can add [tags](https://docs.datadoghq.com/getting_started/tagging/assigning_tags/?tab=noncontainerizedenvironments) within the `scripts/datadog.yaml` file. One example of a tag could be `"consul_version" : "consulent_175"`. These tags are searchable through the datadog dashboard. Another form of customization is changing the datacenter tag within `scripts/telemetry.json`, however it is defaulted to `us-east-1`.
6. Run `packer build consul.json`. 6. Run `packer build consul.json`.

View File

@ -35,16 +35,6 @@
"source": "{{template_dir}}/scripts", "source": "{{template_dir}}/scripts",
"destination": "/home/ubuntu", "destination": "/home/ubuntu",
"pause_before": "30s" "pause_before": "30s"
},{
"type": "shell",
"inline": [
"if test -n \"{{user `download_url`}}\"; then",
"/home/ubuntu/scripts/install-consul --download-url {{user `download_url`}};",
"else",
"/home/ubuntu/scripts/install-consul --version {{user `consul_version`}};",
"fi"
],
"pause_before": "30s"
},{ },{
"type": "shell", "type": "shell",
"inline": [ "inline": [

View File

@ -5,8 +5,8 @@ mv /home/ubuntu/scripts/conf.yaml /etc/datadog-agent/conf.d/consul.d/
mv /home/ubuntu/scripts/datadog.yaml /etc/datadog-agent/ mv /home/ubuntu/scripts/datadog.yaml /etc/datadog-agent/
##Move Consul Config that hooks up to datadog ##Move Consul Config that hooks up to datadog
mkdir -p /opt/consul/config
mv /home/ubuntu/scripts/telemetry.json /opt/consul/config/ mv /home/ubuntu/scripts/telemetry.json /opt/consul/config/
chown consul:consul /opt/consul/config/telemetry.json
## Let everyone own their stuff now ## Let everyone own their stuff now
chown dd-agent:dd-agent /etc/datadog-agent/conf.d/consul.d/conf.yaml chown dd-agent:dd-agent /etc/datadog-agent/conf.d/consul.d/conf.yaml

View File

@ -5,8 +5,8 @@
}, },
"builders": [{ "builders": [{
"name": "ubuntu18-ami", "name": "ubuntu18-ami",
"ami_name": "loadtest-ubuntu-{{isotime | clean_resource_name}}-{{uuid}}", "ami_name": "consul-test-{{isotime | clean_resource_name}}-{{uuid}}",
"ami_description": "An Ubuntu 18.04 AMI that has k6 installed.", "ami_description": "An Ubuntu 18.04 AMI that has hey installed.",
"instance_type": "t2.micro", "instance_type": "t2.micro",
"region": "{{user `aws_region`}}", "region": "{{user `aws_region`}}",
"associate_public_ip_address": true, "associate_public_ip_address": true,

View File

@ -12,6 +12,4 @@ sudo apt-get update
sudo apt-get install k6 sudo apt-get install k6
# move service file # move service file
mv /home/ubuntu/scripts/loadtest.service /etc/systemd/system/loadtest.service chmod 755 /home/ubuntu/scripts/loadtest.js
chmod 755 /home/ubuntu/scripts/puts_script.js
chmod 755 /home/ubuntu/scripts/run-k6.sh

View File

@ -0,0 +1,62 @@
import http from 'k6/http';
import { uuidv4 } from "https://jslib.k6.io/k6-utils/1.0.0/index.js";
let data = JSON.parse(open('service.json'));
let check = JSON.parse(open('service-check.json'));
export default function() {
const key = uuidv4();
const ipaddress = `http://${__ENV.LB_ENDPOINT}:8500`;
const kv_uri = '/v1/kv/';
const value = { data: uuidv4() };
const kv_address = `${ipaddress + kv_uri + key}`
//Put valid K/V
let res = http.put(kv_address, JSON.stringify(value));
if (
!check(res, {
'kv status code MUST be 200': (res) => res.status == 200,
})
) {
fail('kv status code was *not* 200');
}
//Register Service
data["ID"] = key;
data["Name"] = key;
const service_uri = '/v1/agent/service/register';
const service_address = `${ipaddress + service_uri }`
let res = http.put(service_address, JSON.stringify(data))
if (
!check(res, {
'register service status code MUST be 200': (res) => res.status == 200,
})
) {
fail('register service status code was *not* 200');
}
//Register Check
check["ServiceID"] = key;
const check_uri = '/v1/agent/check/register';
const check_address = `${ipaddress + check_uri }`
let res = http.put(check_address, JSON.stringify(check))
if (
!check(res, {
'register check status code MUST be 200': (res) => res.status == 200,
})
) {
fail('register check status code was *not* 200');
}
}
export let options = {
// 1 virtual user
vus: 100,
// 1 minute
duration: "15m",
// 95% of requests must complete below 0.280s
thresholds: { http_req_duration: ["p(95)<280"] },
};

View File

@ -1,7 +0,0 @@
[Unit]
Description=Execute run-k6.
[Service]
Type=simple
ExecStart=/bin/bash -c 'exec /home/ubuntu/scripts/run-k6.sh'
[Install]
WantedBy=multi-user.target

View File

@ -1,25 +0,0 @@
import http from 'k6/http';
import { uuidv4 } from "https://jslib.k6.io/k6-utils/1.0.0/index.js";
export default function() {
const key = uuidv4();
const ipaddress = `http://${__ENV.LB_ENDPOINT}:8500`;
const uri = '/v1/kv/';
const value = { data: uuidv4() };
const address = `${ipaddress + uri + key}`
const res = http.put(address, JSON.stringify(value));
console.log(JSON.parse(res.body));
}
export let options = {
// 1 virtual user
vus: 100,
// 1 minute
duration: "15m",
// 95% of requests must complete below 0.280s
thresholds: { http_req_duration: ["p(95)<280"] },
};

View File

@ -1,3 +0,0 @@
#!/bin/bash
k6 run /home/ubuntu/scripts/puts_script.js

View File

@ -0,0 +1,12 @@
{
"ID": "api",
"Name": "Invalid Check so Services Will Automagically Deregister",
"HTTP": "https://example.com:850040220",
"tls_skip_verify": true,
"Method": "POST",
"Header": {"Content-Type": ["application/json"]},
"Body": "{\"method\":\"health\"}",
"Interval": "10s",
"Timeout": "5s",
"DeregisterCriticalServiceAfter": "2m"
}

View File

@ -0,0 +1,8 @@
{
"Tags": ["primary", "v1"],
"Address": "127.0.0.1",
"Port": 8000,
"Meta": {
"redis_version": "4.0"
}
}

View File

@ -22,6 +22,9 @@ ami_owners = ["******"]
## This is found from building the image in packer/consul-ami ## This is found from building the image in packer/consul-ami
consul_ami_id = "ami-016d80ff5472346f0" consul_ami_id = "ami-016d80ff5472346f0"
``` ```
If `consul_version` or `consul_download_url` is not set within the Terraform variables it will default to utilziing Consul 1.9.0
4. AWS Variables are set off of environment variables. Make sure to export nessecary variables [shown here](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#environment-variables). 4. AWS Variables are set off of environment variables. Make sure to export nessecary variables [shown here](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#environment-variables).
5. Run `terraform plan -var-file=vars.tfvars`, and then `terraform apply -var-file=vars.tfvars` when ready. 5. Run `terraform plan -var-file=vars.tfvars`, and then `terraform apply -var-file=vars.tfvars` when ready.
6. Upon completion k6 should run and push metrics to desired Datadog dashboard. 6. Upon completion k6 should run and push metrics to desired Datadog dashboard.

View File

@ -23,18 +23,46 @@ data "aws_ami" "consul" {
# Deploy consul cluster # Deploy consul cluster
# --------------------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------------
module "consul" { module "consul_servers" {
source = "hashicorp/consul/aws" source = "git::git@github.com:hashicorp/terraform-aws-consul.git//modules/consul-cluster?ref=v0.8.0"
version = "0.7.9"
depends_on = [module.vpc.vpc_id] cluster_name = "${var.cluster_name}-server"
ami_id = var.consul_ami_id cluster_size = var.num_servers
ssh_key_name = module.keys.key_name instance_type = var.instance_type
vpc_id = module.vpc.vpc_id cluster_tag_value = var.cluster_name
cluster_name = var.cluster_name
num_clients = var.num_clients ami_id = var.consul_ami_id == null ? data.aws_ami.consul.id : var.consul_ami_id
num_servers = var.num_servers user_data = data.template_file.user_data_server.rendered
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.public_subnets
allowed_ssh_cidr_blocks = ["0.0.0.0/0"]
allowed_inbound_cidr_blocks = ["0.0.0.0/0"]
ssh_key_name = module.keys.key_name
} }
module "consul_clients" {
source = "git::git@github.com:hashicorp/terraform-aws-consul.git//modules/consul-cluster?ref=v0.8.0"
cluster_name = "${var.cluster_name}-client"
cluster_size = var.num_clients
instance_type = var.instance_type
cluster_tag_value = var.cluster_name
ami_id = var.consul_ami_id == null ? data.aws_ami.consul.id : var.consul_ami_id
user_data = data.template_file.user_data_client.rendered
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.public_subnets
allowed_ssh_cidr_blocks = ["0.0.0.0/0"]
allowed_inbound_cidr_blocks = ["0.0.0.0/0"]
ssh_key_name = module.keys.key_name
}
# --------------------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------------
# This script will configure and start Consul agents # This script will configure and start Consul agents
# --------------------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------------
@ -43,8 +71,10 @@ data "template_file" "user_data_server" {
template = file("${path.module}/user-data-server.sh") template = file("${path.module}/user-data-server.sh")
vars = { vars = {
cluster_tag_key = var.cluster_tag_key consul_version = var.consul_version
cluster_tag_value = var.cluster_name consul_download_url = var.consul_download_url
cluster_tag_key = var.cluster_tag_key
cluster_tag_value = var.cluster_name
} }
} }
@ -52,8 +82,10 @@ data "template_file" "user_data_client" {
template = file("${path.module}/user-data-client.sh") template = file("${path.module}/user-data-client.sh")
vars = { vars = {
cluster_tag_key = var.cluster_tag_key consul_version = var.consul_version
cluster_tag_value = var.cluster_name consul_download_url = var.consul_download_url
cluster_tag_key = var.cluster_tag_key
cluster_tag_value = var.cluster_name
} }
} }
@ -65,13 +97,13 @@ module "alb" {
source = "terraform-aws-modules/alb/aws" source = "terraform-aws-modules/alb/aws"
version = "~> 5.0" version = "~> 5.0"
name = "${var.cluster_name}-${local.random_name}-alb" name = "${var.cluster_name}-alb"
load_balancer_type = "application" load_balancer_type = "application"
vpc_id = module.vpc.vpc_id vpc_id = module.vpc.vpc_id
subnets = module.vpc.public_subnets subnets = module.vpc.public_subnets
security_groups = [module.consul.security_group_id_clients] security_groups = [module.consul_clients.security_group_id]
internal = true internal = true
target_groups = [ target_groups = [
@ -95,6 +127,6 @@ module "alb" {
# Attach ALB to Consul clients # Attach ALB to Consul clients
resource "aws_autoscaling_attachment" "asg_attachment_bar" { resource "aws_autoscaling_attachment" "asg_attachment_bar" {
autoscaling_group_name = module.consul.asg_name_clients autoscaling_group_name = module.consul_clients.asg_name
alb_target_group_arn = module.alb.target_group_arns[0] alb_target_group_arn = module.alb.target_group_arns[0]
} }

View File

@ -1,5 +0,0 @@
#!/bin/bash
echo "LB_ENDPOINT=${lb_endpoint}" >> /etc/environment
systemctl start loadtest

View File

@ -1,3 +1,24 @@
data "aws_ami" "test" {
most_recent = true
owners = var.ami_owners
filter {
name = "virtualization-type"
values = ["hvm"]
}
filter {
name = "is-public"
values = ["false"]
}
filter {
name = "name"
values = ["consul-test-*"]
}
}
# --------------------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------------
# Start up test servers to run tests from # Start up test servers to run tests from
# --------------------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------------
@ -8,7 +29,7 @@ resource "aws_security_group" "test-servers" {
ingress { ingress {
from_port = 8500 from_port = 8500
to_port = 8500 to_port = 8500
security_groups = [module.consul.security_group_id_clients] security_groups = [module.consul_clients.security_group_id]
protocol = "6" protocol = "6"
cidr_blocks = ["0.0.0.0/0"] cidr_blocks = ["0.0.0.0/0"]
} }
@ -26,37 +47,24 @@ resource "aws_security_group" "test-servers" {
} }
} }
resource "aws_launch_configuration" "test-servers" { resource "aws_instance" "test-server" {
name_prefix = "${var.cluster_name}-${local.random_name}-test-" ami = var.test_server_ami == null ? data.aws_ami.test.id : var.test_server_ami
image_id = var.test_server_ami instance_type = var.test_instance_type
instance_type = var.test_instance_type key_name = module.keys.key_name
key_name = module.keys.key_name vpc_security_group_ids = toset([aws_security_group.test-servers.id])
security_groups = [aws_security_group.test-servers.id]
associate_public_ip_address = var.test_public_ip associate_public_ip_address = var.test_public_ip
lifecycle { subnet_id = (module.vpc.public_subnets)[0]
create_before_destroy = true provisioner "remote-exec" {
} inline = [
user_data = templatefile( "export LB_ENDPOINT=${module.alb.this_lb_dns_name}",
"./start-k6.sh", "k6 run /home/ubuntu/scripts/loadtest.js"
{ ]
lb_endpoint = module.alb.this_lb_dns_name connection {
type = "ssh"
user = "ubuntu"
timeout = "1m"
private_key = module.keys.private_key_pem
host = aws_instance.test-server.public_ip
} }
)
}
resource "aws_autoscaling_group" "test-servers" {
name = aws_launch_configuration.test-servers.name
launch_configuration = aws_launch_configuration.test-servers.id
min_size = 2
max_size = 5
desired_capacity = 2
wait_for_capacity_timeout = "480s"
health_check_grace_period = 15
health_check_type = "EC2"
vpc_zone_identifier = module.vpc.public_subnets
lifecycle {
create_before_destroy = true
} }
} }

View File

@ -6,11 +6,20 @@
set -e set -e
# Send the log output from this script to user-data.log, syslog, and the console # Send the log output from this script to user-data.log, syslog, and the console
# From: https://alestic.com/2010/12/ec2-user-data-output/ # From: https://alestic.com/2010/12/ec2-user-data-output/
exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
# Install Consul
if [[ -n "${consul_download_url}" ]]; then
/home/ubuntu/scripts/install-consul --download-url "${consul_download_url}"
else
/home/ubuntu/scripts/install-consul --version "${consul_version}"
fi
# Update User:Group on this file really quick
chown consul:consul /opt/consul/config/telemetry.json
# These variables are passed in via Terraform template interplation # These variables are passed in via Terraform template interplation
/opt/consul/bin/run-consul --client --cluster-tag-key "${cluster_tag_key}" --cluster-tag-value "${cluster_tag_value}" /opt/consul/bin/run-consul --client --cluster-tag-key "${cluster_tag_key}" --cluster-tag-value "${cluster_tag_value}"
# You could add commands to boot your other apps here

View File

@ -10,5 +10,15 @@ set -e
# From: https://alestic.com/2010/12/ec2-user-data-output/ # From: https://alestic.com/2010/12/ec2-user-data-output/
exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
# Install Consul
if [[ -n "${consul_download_url}" ]]; then
/home/ubuntu/scripts/install-consul --download-url "${consul_download_url}"
else
/home/ubuntu/scripts/install-consul --version "${consul_version}"
fi
# Update User:Group on this file really quick
chown consul:consul /opt/consul/config/telemetry.json
# These variables are passed in via Terraform template interplation # These variables are passed in via Terraform template interplation
/opt/consul/bin/run-consul --server --cluster-tag-key "${cluster_tag_key}" --cluster-tag-value "${cluster_tag_value}" /opt/consul/bin/run-consul --server --cluster-tag-key "${cluster_tag_key}" --cluster-tag-value "${cluster_tag_value}"

View File

@ -90,6 +90,7 @@ variable "private_subnet_cidrs" {
variable "test_server_ami" { variable "test_server_ami" {
type = string type = string
description = "The AMI ID from the Packer generated image" description = "The AMI ID from the Packer generated image"
default = null
} }
variable "test_instance_type" { variable "test_instance_type" {
@ -111,3 +112,16 @@ variable "ami_owners" {
type = list(string) type = list(string)
description = "The account owner number which the desired AMI is in" description = "The account owner number which the desired AMI is in"
} }
variable "consul_download_url" {
type = string
description = "URL to download the Consul binary from"
default = ""
}
variable "consul_version" {
type = string
description = "Version of the Consul binary to install"
default = "1.9.0"
}