upgrade to Terraform 0.12

Signed-off-by: Jakub Sokołowski <jakub@status.im>
This commit is contained in:
Jakub Sokołowski 2019-08-02 19:13:00 -04:00
parent d68e9894fd
commit b9865f5881
No known key found for this signature in database
GPG Key ID: 4EF064D0E6D63020
7 changed files with 477 additions and 168 deletions

View File

@ -9,31 +9,29 @@ endif
PLUGIN_DIR = ~/.terraform.d/plugins
PROVIDER_NAME = terraform-provider-ansible
PROVIDER_VERSION = v0.0.4
PROVIDER_VERSION = v1.0.3
PROVIDER_ARCHIVE = $(PROVIDER_NAME)-$(ARCH).zip
PROVIDER_URL = https://github.com/nbering/terraform-provider-ansible/releases/download/$(PROVIDER_VERSION)/$(PROVIDER_ARCHIVE)
PROVISIONER_NAME = terraform-provisioner-ansible
PROVISIONER_VERSION = v2.0.0
PROVISIONER_VERSION = v2.3.0
PROVISIONER_ARCHIVE = $(PROVISIONER_NAME)-$(subst _,-,$(ARCH))_$(PROVISIONER_VERSION)
PROVISIONER_URL = https://github.com/radekg/terraform-provisioner-ansible/releases/download/$(PROVISIONER_VERSION)/$(PROVISIONER_ARCHIVE)
all: deps secrets init-terraform
all: requirements install-provider install-provisioner secrets init-terraform
@echo "Success!"
deps: requirements plugins
plugins: install-provider install-provisioner
requirements:
ansible-galaxy install --ignore-errors --force -r ansible/requirements.yml
install-unzip:
ifeq (, $(shell which unzip)) \
$(error "No unzip in PATH, consider doing apt install unzip") \
endif
check-unzip:
ifeq (, $(shell which unzip))
$(error "No unzip in PATH, consider doing apt install unzip")
endif
install-provider:
install-provider: check-unzip
if [ ! -e $(PLUGIN_DIR)/$(ARCH)/$(PROVIDER_NAME)_$(PROVIDER_VERSION) ]; then \
mkdir -p $(PLUGIN_DIR); \
wget $(PROVIDER_URL) -P $(PLUGIN_DIR); \
@ -68,4 +66,4 @@ alicloud_secret_key = \"$(shell pass cloud/Alibaba/secret-key)\"\n\
" > terraform.tfvars
cleanup:
rm -rf $(PLUGIN_DIR)/$(ARCHIVE)
rm -r $(PLUGIN_DIR)/$(ARCHIVE)

View File

@ -1,13 +1,17 @@
# NOTE: This file is generated by terraform.py
# For emergency use when Consul fails
[all]
node-01.ac-cn-hongkong-c.swarm.test hostname=node-01.ac-cn-hongkong-c.swarm.test ansible_host=47.75.109.9 env=swarm stage=test data_center=ac-cn-hongkong-c region=cn-hongkong-c dns_entry=node-01.ac-cn-hongkong-c.swarm.test.statusim.net
node-01.do-ams3.swarm.test hostname=node-01.do-ams3.swarm.test ansible_host=188.166.203.183 env=swarm stage=test data_center=do-ams3 region=ams3 dns_entry=node-01.do-ams3.swarm.test.statusim.net
node-01.gc-us-central1-a.swarm.test hostname=node-01.gc-us-central1-a.swarm.test ansible_host=35.224.182.65 env=swarm stage=test data_center=gc-us-central1-a region=us-central1-a dns_entry=node-01.gc-us-central1-a.swarm.test.statusim.net
node-01.ac-cn-hongkong-c.swarm.test hostname=node-01.ac-cn-hongkong-c.swarm.test ansible_host=47.75.109.9 env=swarm stage=test data_center=ac-cn-hongkong-c region=cn-hongkong-c dns_entry=node-01.ac-cn-hongkong-c.swarm.test.statusim.net
[all]
[ac-cn-hongkong-c]
node-01.ac-cn-hongkong-c.swarm.test
[do-ams3]
node-01.do-ams3.swarm.test
[gc-us-central1-a]
node-01.gc-us-central1-a.swarm.test
[swarm]
@ -15,12 +19,3 @@ node-01.ac-cn-hongkong-c.swarm.test
node-01.do-ams3.swarm.test
node-01.gc-us-central1-a.swarm.test
[gc-us-central1-a]
node-01.gc-us-central1-a.swarm.test
[do-ams3]
node-01.do-ams3.swarm.test
[ac-cn-hongkong-c]
node-01.ac-cn-hongkong-c.swarm.test

View File

@ -1,10 +1,53 @@
#! /usr/bin/env python2
#!/usr/bin/env python
# source: https://github.com/nbering/terraform-inventory
'''
Terraform Inventory Script
==========================
This inventory script generates dynamic inventory by reading Terraform state
contents. Servers and groups a defined inside the Terraform state using special
resources defined by the Terraform Provider for Ansible.
Configuration
=============
State is fetched using the "terraform state pull" subcommand. The behaviour of
this action can be configured using some environment variables.
Environment Variables:
......................
ANSIBLE_TF_BIN
Override the path to the Terraform command executable. This is useful if
you have multiple copies or versions installed and need to specify a
specific binary. The inventory script runs the `terraform state pull`
command to fetch the Terraform state, so that remote state will be
fetched seemlessly regardless of the backend configuration.
ANSIBLE_TF_DIR
Set the working directory for the `terraform` command when the scripts
shells out to it. This is useful if you keep your terraform and ansible
configuration in separate directories. Defaults to using the current
working directory.
ANSIBLE_TF_WS_NAME
Sets the workspace for the `terraform` command when the scripts shells
out to it, defaults to `default` workspace - if you don't use workspaces
this is the one you'll be using.
'''
import sys
import json
import os
import re
import subprocess
import sys
import traceback
from subprocess import Popen, PIPE
TERRAFORM_DIR = os.environ.get('ANSIBLE_TF_DIR', os.getcwd())
TERRAFORM_ENV = os.path.join(TERRAFORM_DIR, '.terraform/environment')
TERRAFORM_PATH = os.environ.get('ANSIBLE_TF_BIN', 'terraform')
TERRAFORM_BPK = os.path.join(TERRAFORM_DIR, '.terraform/terraform.tfstate.backup')
def _tf_env():
# way to figure out currenly used TF workspace
@ -14,145 +57,398 @@ def _tf_env():
except:
return 'default'
TERRAFORM_PATH = os.environ.get('ANSIBLE_TF_BIN', 'terraform')
TERRAFORM_DIR = os.environ.get('ANSIBLE_TF_DIR', os.getcwd())
TERRAFORM_BPK = os.path.join(TERRAFORM_DIR, '.terraform/terraform.tfstate.backup')
TERRAFORM_ENV = os.path.join(TERRAFORM_DIR, '.terraform/environment')
TERRAFORM_WS_NAME = os.environ.get('ANSIBLE_TF_WS_NAME', _tf_env())
ANSIBLE_BKP = os.path.join(TERRAFORM_DIR, 'ansible/inventory', _tf_env())
def _extract_dict(attrs, key):
out = {}
for k in attrs.keys():
match = re.match(r"^" + key + r"\.(.*)", k)
if not match or match.group(1) == "%":
continue
class TerraformState(object):
'''
TerraformState wraps the state content to provide some helpers for iterating
over resources.
'''
out[match.group(1)] = attrs[k]
return out
def __init__(self, state_json):
self.state_json = state_json
def _extract_list(attrs, key):
out = []
if "modules" in state_json:
# uses pre-0.12
self.flat_attrs = True
else:
# state format for 0.12+
self.flat_attrs = False
length_key = key + ".#"
if length_key not in attrs.keys():
return []
def resources(self):
'''Generator method to iterate over resources in the state file.'''
if self.flat_attrs:
modules = self.state_json["modules"]
for module in modules:
for resource in module["resources"].values():
yield TerraformResource(resource, flat_attrs=True)
else:
resources = self.state_json["resources"]
for resource in resources:
for instance in resource["instances"]:
yield TerraformResource(instance, resource_type=resource["type"])
length = int(attrs[length_key])
if length < 1:
return []
for i in range(0, length):
out.append(attrs["{}.{}".format(key, i)])
return out
def _init_group(children=None, hosts=None, vars=None):
return {
"hosts": [] if hosts is None else hosts,
"vars": {} if vars is None else vars,
"children": [] if children is None else children
class TerraformResource(object):
'''
TerraformResource wraps individual resource content and provide some helper
methods for reading older-style dictionary and list values from attributes
defined as a single-level map.
'''
DEFAULT_PRIORITIES = {
'ansible_host': 50,
'ansible_group': 50,
'ansible_host_var': 60,
'ansible_group_var': 60
}
def _add_host(inventory, hostname, groups, host_vars):
inventory["_meta"]["hostvars"][hostname] = host_vars
for group in groups:
if group not in inventory.keys():
inventory[group] = _init_group(hosts=[hostname])
elif hostname not in inventory[group]:
inventory[group]["hosts"].append(hostname)
def __init__(self, source_json, flat_attrs=False, resource_type=None):
self.flat_attrs = flat_attrs
self._type = resource_type
self._priority = None
self.source_json = source_json
def _add_group(inventory, group_name, children, group_vars):
if group_name not in inventory.keys():
inventory[group_name] = _init_group(children=children, vars=group_vars)
else:
# Start out with support for only one "group" with a given name
# If there's a second group by the name, last in wins
inventory[group_name]["children"] = children
inventory[group_name]["vars"] = group_vars
def is_ansible(self):
'''Check if the resource is provided by the ansible provider.'''
return self.type().startswith("ansible_")
def _init_inventory():
return {
"all": _init_group(),
"_meta": {
"hostvars": {}
def priority(self):
'''Get the merge priority of the resource.'''
if self._priority is not None:
return self._priority
priority = 0
if self.read_int_attr("variable_priority") is not None:
priority = self.read_int_attr("variable_priority")
elif self.type() in TerraformResource.DEFAULT_PRIORITIES:
priority = TerraformResource.DEFAULT_PRIORITIES[self.type()]
self._priority = priority
return self._priority
def type(self):
'''Returns the Terraform resource type identifier.'''
if self._type:
return self._type
return self.source_json["type"]
def read_dict_attr(self, key):
'''
Read a dictionary attribute from the resource, handling old-style
Terraform state where maps are stored as multiple keys in the resource's
attributes.
'''
attrs = self._raw_attributes()
if self.flat_attrs:
out = {}
for k in attrs.keys():
match = re.match(r"^" + key + r"\.(.*)", k)
if not match or match.group(1) == "%":
continue
out[match.group(1)] = attrs[k]
return out
return attrs.get(key, {})
def read_list_attr(self, key):
'''
Read a list attribute from the resource, handling old-style Terraform
state where lists are stored as multiple keys in the resource's
attributes.
'''
attrs = self._raw_attributes()
if self.flat_attrs:
out = []
length_key = key + ".#"
if length_key not in attrs.keys():
return []
length = int(attrs[length_key])
if length < 1:
return []
for i in range(0, length):
out.append(attrs["{}.{}".format(key, i)])
return out
return attrs.get(key, None)
def read_int_attr(self, key):
'''
Read an attribute from state an convert it to type Int.
'''
val = self.read_attr(key)
if val is not None:
val = int(val)
return val
def read_attr(self, key):
'''
Read an attribute from the underlaying state content.
'''
return self._raw_attributes().get(key, None)
def _raw_attributes(self):
if self.flat_attrs:
return self.source_json["primary"]["attributes"]
return self.source_json["attributes"]
class AnsibleInventory(object):
'''
AnsibleInventory handles conversion from Terraform resource content to
Ansible inventory entities, and building of the final inventory json.
'''
def __init__(self):
self.groups = {}
self.hosts = {}
self.inner_json = {}
def add_host_resource(self, resource):
'''Upsert type action for host resources.'''
hostname = resource.read_attr("inventory_hostname")
if hostname in self.hosts:
host = self.hosts[hostname]
host.add_source(resource)
else:
host = AnsibleHost(hostname, source=resource)
self.hosts[hostname] = host
def add_group_resource(self, resource):
'''Upsert type action for group resources.'''
groupname = resource.read_attr("inventory_group_name")
if groupname in self.groups:
group = self.groups[groupname]
group.add_source(resource)
else:
group = AnsibleGroup(groupname, source=resource)
self.groups[groupname] = group
def update_groups(self, groupname, children=None, hosts=None, group_vars=None):
'''Upsert type action for group resources'''
if groupname in self.groups:
group = self.groups[groupname]
group.update(children=children, hosts=hosts, group_vars=group_vars)
else:
group = AnsibleGroup(groupname)
group.update(children, hosts, group_vars)
self.groups[groupname] = group
def add_resource(self, resource):
'''
Process a Terraform resource, passing to the correct handler function
by type.
'''
if resource.type().startswith("ansible_host"):
self.add_host_resource(resource)
elif resource.type().startswith("ansible_group"):
self.add_group_resource(resource)
def to_dict(self):
'''
Generate the file Ansible inventory structure to be serialized into JSON
for consumption by Ansible proper.
'''
out = {
"_meta": {
"hostvars": {}
}
}
}
def _handle_host(attrs, inventory):
host_vars = _extract_dict(attrs, "vars")
groups = _extract_list(attrs, "groups")
hostname = attrs["inventory_hostname"]
for hostname, host in self.hosts.items():
host.build()
for group in host.groups:
self.update_groups(group, hosts=[host.hostname])
out["_meta"]["hostvars"][hostname] = host.get_vars()
if "all" not in groups:
groups.append("all")
for groupname, group in self.groups.items():
group.build()
out[groupname] = group.to_dict()
_add_host(inventory, hostname, groups, host_vars)
return out
def _handle_group(attrs, inventory):
group_vars = _extract_dict(attrs, "vars")
children = _extract_list(attrs, "children")
group_name = attrs["inventory_group_name"]
_add_group(inventory, group_name, children, group_vars)
class AnsibleHost(object):
'''
AnsibleHost represents a host for the Ansible inventory.
'''
def _walk_state(tfstate, inventory):
for module in tfstate["modules"]:
for resource in module["resources"].values():
if not resource["type"].startswith("ansible_"):
continue
def __init__(self, hostname, source=None):
self.sources = []
self.hostname = hostname
self.groups = set(["all"])
self.host_vars = {}
attrs = resource["primary"]["attributes"]
if source:
self.add_source(source)
if resource["type"] == "ansible_host":
_handle_host(attrs, inventory)
if resource["type"] == "ansible_group":
_handle_group(attrs, inventory)
def update(self, groups=None, host_vars=None):
'''Update host resource with additional groups and vars.'''
if host_vars:
self.host_vars.update(host_vars)
if groups:
self.groups.update(groups)
def add_source(self, source):
'''Add a Terraform resource to the sources list.'''
self.sources.append(source)
def build(self):
'''Assemble host details from registered sources.'''
self.sources.sort(key=lambda source: source.priority())
for source in self.sources:
if source.type() == "ansible_host":
groups = source.read_list_attr("groups")
host_vars = source.read_dict_attr("vars")
self.update(groups=groups, host_vars=host_vars)
elif source.type() == "ansible_host_var":
host_vars = {source.read_attr(
"key"): source.read_attr("value")}
self.update(host_vars=host_vars)
self.groups = sorted(self.groups)
def get_vars(self):
'''Get the host's variable dictionary.'''
return dict(self.host_vars)
class AnsibleGroup(object):
'''
AnsibleGroup represents a group for the Ansible inventory.
'''
def __init__(self, groupname, source=None):
self.groupname = groupname
self.sources = []
self.hosts = set()
self.children = set()
self.group_vars = {}
if source:
self.add_source(source)
def update(self, children=None, hosts=None, group_vars=None):
'''
Update host resource with additional children, hosts, or group variables.
'''
if hosts:
self.hosts.update(hosts)
if children:
self.children.update(children)
if group_vars:
self.group_vars.update(group_vars)
def add_source(self, source):
'''Add a Terraform resource to the sources list.'''
self.sources.append(source)
def build(self):
'''Assemble group details from registered sources.'''
self.sources.sort(key=lambda source: source.priority())
for source in self.sources:
if source.type() == "ansible_group":
children = source.read_list_attr("children")
group_vars = source.read_dict_attr("vars")
self.update(children=children, group_vars=group_vars)
elif source.type() == "ansible_group_var":
group_vars = {source.read_attr(
"key"): source.read_attr("value")}
self.update(group_vars=group_vars)
self.hosts = sorted(self.hosts)
self.children = sorted(self.children)
def to_dict(self):
'''Prepare structure for final Ansible inventory JSON.'''
return {
"children": list(self.children),
"hosts": list(self.hosts),
"vars": dict(self.group_vars)
}
def _execute_shell():
encoding = 'utf-8'
tf_workspace = [TERRAFORM_PATH, 'workspace', 'select', TERRAFORM_WS_NAME]
proc_ws = Popen(tf_workspace, cwd=TERRAFORM_DIR, stdout=PIPE,
stderr=PIPE, universal_newlines=True)
_, err_ws = proc_ws.communicate()
if err_ws != '':
sys.stderr.write(str(err_ws)+'\n')
sys.exit(1)
else:
tf_command = [TERRAFORM_PATH, 'state', 'pull']
proc_tf_cmd = Popen(tf_command, cwd=TERRAFORM_DIR,
stdout=PIPE, stderr=PIPE, universal_newlines=True)
out_cmd, err_cmd = proc_tf_cmd.communicate()
if err_cmd != '':
sys.stderr.write(str(err_cmd)+'\n')
sys.exit(1)
else:
return json.loads(out_cmd, encoding=encoding)
return inventory
def _backup_tf(tfstate):
# Crates a state backup in case we lose Consul
with open(TERRAFORM_BPK, 'w') as f:
f.write(json.dumps(tfstate))
f.write(json.dumps(tfstate.state_json))
def _backup_ansible(inventory):
# Crates a state backup in Ansible inventory format
text = '# NOTE: This file is generated by terraform.py\n'
text += '# For emergency use when Consul fails\n'
text += '[all]\n'
for host in inventory['_meta']['hostvars'].values():
for hostname, host in sorted(inventory.hosts.items()):
text += (
'{hostname} hostname={hostname} ansible_host={ansible_host} '+
'env={env} stage={stage} data_center={data_center} region={region} '+
'dns_entry={dns_entry}\n'
).format(**host)
'{0} hostname={0} ansible_host={1} '
).format(hostname, host.host_vars['ansible_host']) + (
'env={env} stage={stage} data_center={data_center} '+
'region={region} dns_entry={dns_entry}\n'
).format(**host.host_vars)
text += '\n'
for name, hosts in inventory.iteritems():
if name == '_meta':
for name, hosts in sorted(inventory.groups.items()):
if name in ['_meta', 'all']:
continue
text += '[{}]\n'.format(name)
for host in hosts['hosts']:
text += '{}\n'.format(host)
for hostname in sorted(hosts.hosts):
text += '{}\n'.format(hostname)
text += '\n'
with open(ANSIBLE_BKP, 'w') as f:
f.write(text)
def _main():
try:
tf_command = [TERRAFORM_PATH, 'state', 'pull', '-input=false']
proc = subprocess.Popen(tf_command, cwd=TERRAFORM_DIR, stdout=subprocess.PIPE)
tfstate = json.load(proc.stdout)
# format state for ansible
inventory = _walk_state(tfstate, _init_inventory())
# print out for ansible
sys.stdout.write(json.dumps(inventory, indent=2))
tfstate = TerraformState(_execute_shell())
inventory = AnsibleInventory()
for resource in tfstate.resources():
if resource.is_ansible():
inventory.add_resource(resource)
sys.stdout.write(json.dumps(inventory.to_dict(), indent=2))
# backup raw TF state
_backup_tf(tfstate)
# backup ansible inventory
_backup_ansible(inventory)
except Exception as ex:
print(ex)
except Exception:
traceback.print_exc(file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
_main()

78
main.tf
View File

@ -1,31 +1,34 @@
/* PROVIDERS --------------------------------------*/
provider "digitalocean" {
token = "${var.digitalocean_token}"
version = "<= 0.1.3"
token = var.digitalocean_token
}
provider "cloudflare" {
email = "${var.cloudflare_email}"
token = "${var.cloudflare_token}"
org_id = "${var.cloudflare_org_id}"
email = var.cloudflare_email
token = var.cloudflare_token
org_id = var.cloudflare_org_id
}
provider "google" {
credentials = "${file("google-cloud.json")}"
credentials = file("google-cloud.json")
project = "russia-servers"
region = "us-central1"
}
provider "alicloud" {
access_key = "${var.alicloud_access_key}"
secret_key = "${var.alicloud_secret_key}"
region = "${var.alicloud_region}"
access_key = var.alicloud_access_key
secret_key = var.alicloud_secret_key
region = var.alicloud_region
}
/* BACKEND ----------------------------------------*/
terraform {
backend "consul" {
address = "https://consul.statusim.net:8400"
lock = true
address = "https://consul.statusim.net:8400"
lock = true
/* WARNING This needs to be changed for every repo. */
path = "terraform/swarm/"
ca_file = "ansible/files/consul-ca.crt"
@ -37,30 +40,34 @@ terraform {
/* WORKSPACES -----------------------------------*/
locals {
ws = "${merge(local.env["defaults"], local.env[terraform.workspace])}"
ws = merge(local.env["defaults"], local.env[terraform.workspace])
}
/* RESOURCES --------------------------------------*/
module "swarm" {
source = "github.com/status-im/infra-tf-multi-provider"
source = "github.com/status-im/infra-tf-multi-provider"
/* node type */
name = "node"
group = "swarm"
name = "node"
group = "swarm"
/* scaling options */
count = "${local.ws["hosts_count"]}"
host_count = local.ws["hosts_count"]
do_size = "s-2vcpu-4gb"
gc_size = "n1-standard-2"
ac_size = "ecs.sn1ne.large"
gc_vol_size = 50
/* general */
env = "${var.env}"
domain = "${var.domain}"
env = var.env
domain = var.domain
/* firewall */
open_ports = [
"443-443", /* https */
"30303-30303", /* geth */
"30399-30399", /* swarm */
open_ports = [
"443", /* https */
"30303", /* geth */
"30399", /* swarm */
]
}
@ -74,6 +81,7 @@ resource "cloudflare_load_balancer_monitor" "main" {
interval = 60
retries = 5
timeout = 7
/* disables SSl cert check, this way we can use origin */
allow_insecure = true
}
@ -81,33 +89,33 @@ resource "cloudflare_load_balancer_monitor" "main" {
/* WARNING: Statically done until Terraform 0.12 arrives */
resource "cloudflare_load_balancer_pool" "main" {
name = "${terraform.workspace}-${var.env}"
monitor = "${cloudflare_load_balancer_monitor.main.id}"
monitor = cloudflare_load_balancer_monitor.main.id
notification_email = "jakub@status.im"
minimum_origins = 1
origins {
name = "${element(keys(module.swarm.hosts["do-eu-amsterdam3"]), 0)}"
address = "${element(values(module.swarm.hosts["do-eu-amsterdam3"]), 0)}"
name = element(keys(module.swarm.hosts_by_dc["do-eu-amsterdam3"]), 0)
address = element(values(module.swarm.hosts_by_dc["do-eu-amsterdam3"]), 0)
enabled = true
}
origins {
name = "${element(keys(module.swarm.hosts["gc-us-central1-a"]), 0)}"
address = "${element(values(module.swarm.hosts["gc-us-central1-a"]), 0)}"
name = element(keys(module.swarm.hosts_by_dc["gc-us-central1-a"]), 0)
address = element(values(module.swarm.hosts_by_dc["gc-us-central1-a"]), 0)
enabled = true
}
origins {
name = "${element(keys(module.swarm.hosts["ac-cn-hongkong-c"]), 0)}"
address = "${element(values(module.swarm.hosts["ac-cn-hongkong-c"]), 0)}"
name = element(keys(module.swarm.hosts_by_dc["ac-cn-hongkong-c"]), 0)
address = element(values(module.swarm.hosts_by_dc["ac-cn-hongkong-c"]), 0)
enabled = true
}
}
// This might work, not sure yet
resource "cloudflare_load_balancer" "main" {
zone = "status.im"
name = "${terraform.workspace}-${var.env}.status.im"
description = "Load balancing of Swarm fleet."
proxied = true
zone = "status.im"
name = "${terraform.workspace}-${var.env}.status.im"
description = "Load balancing of Swarm fleet."
proxied = true
fallback_pool_id = "${cloudflare_load_balancer_pool.main.id}"
default_pool_ids = ["${cloudflare_load_balancer_pool.main.id}"]
fallback_pool_id = cloudflare_load_balancer_pool.main.id
default_pool_ids = [cloudflare_load_balancer_pool.main.id]
}

View File

@ -1,55 +1,64 @@
/* CONFIG ----------------------------------------*/
variable ssh_keys {
variable "ssh_keys" {
description = "Names of ssh public keys to add to created hosts"
type = "list"
type = list(string)
# ssh key IDs acquired using doctl
default = ["16822693", "18813432", "18813461", "19525749", "20671731", "20686611"]
default = ["16822693", "18813432", "18813461", "19525749", "20671731", "20686611"]
}
variable env {
variable "env" {
description = "Environment for these hosts, affects DNS entries."
type = string
default = "swarm"
}
variable domain {
variable "domain" {
description = "DNS Domain to update"
type = string
default = "statusim.net"
}
variable ssh_user {
variable "ssh_user" {
description = "User used to log in to instance"
type = string
default = "root"
}
/* PROVIDERS ------------------------------------*/
variable cloudflare_token {
variable "cloudflare_token" {
description = "Token for interacting with Cloudflare API."
type = string
}
variable digitalocean_token {
variable "digitalocean_token" {
description = "Token for interacting with DigitalOcean API."
type = string
}
variable cloudflare_email {
variable "cloudflare_email" {
description = "Email address of Cloudflare account."
type = string
}
variable cloudflare_org_id {
variable "cloudflare_org_id" {
description = "ID of the CloudFlare organization."
type = string
}
variable alicloud_access_key {
variable "alicloud_access_key" {
description = "Alibaba Cloud API access key."
type = string
}
variable alicloud_secret_key {
variable "alicloud_secret_key" {
description = "Alibaba Cloud API secret key."
type = string
}
variable alicloud_region {
variable "alicloud_region" {
description = "Alibaba Cloud hosting region."
type = string
default = "cn-hongkong"
}

3
versions.tf Normal file
View File

@ -0,0 +1,3 @@
terraform {
required_version = ">= 0.12"
}

View File

@ -12,9 +12,9 @@ locals {
defaults = {
hosts_count = 1
}
# For testing infra changes before rollout to other fleets
test = {}
}
}
/*---------------------------------------------------------*/