add new windows-01.he-eu-hel1.nimbus.prater host

To replace `windows-01.gc-us-central1-a.nimbus.prater` which is running
out of disk space, and is also far too expensive.

Signed-off-by: Jakub Sokołowski <jakub@status.im>
This commit is contained in:
Jakub Sokołowski 2022-02-25 14:19:08 +01:00
parent 489878038e
commit 9b365b02a7
No known key found for this signature in database
GPG Key ID: 09AA5403E54D9931
12 changed files with 109 additions and 65 deletions

View File

@ -31,6 +31,7 @@ store-03.he-eu-hel1.logs.nimbus hostname=store-03.he-eu-hel1.logs.nimbus ansible
testing-large-01.aws-eu-central-1a.nimbus.prater hostname=testing-large-01.aws-eu-central-1a.nimbus.prater ansible_host=3.65.99.236 env=nimbus stage=prater data_center=aws-eu-central-1a region=eu-central-1a dns_entry=testing-large-01.aws-eu-central-1a.nimbus.prater.statusim.net
unstable-large-01.aws-eu-central-1a.nimbus.prater hostname=unstable-large-01.aws-eu-central-1a.nimbus.prater ansible_host=3.65.85.130 env=nimbus stage=prater data_center=aws-eu-central-1a region=eu-central-1a dns_entry=unstable-large-01.aws-eu-central-1a.nimbus.prater.statusim.net
windows-01.gc-us-central1-a.nimbus.prater hostname=windows-01.gc-us-central1-a.nimbus.prater ansible_host=35.232.80.244 env=nimbus stage=prater data_center=gc-us-central1-a region=us-central1-a dns_entry=windows-01.gc-us-central1-a.nimbus.prater.statusim.net
windows-01.he-eu-hel1.nimbus.prater hostname=windows-01.he-eu-hel1.nimbus.prater ansible_host=65.21.233.67 env=nimbus stage=prater data_center=he-eu-hel1 region=eu-hel1 dns_entry=windows-01.he-eu-hel1.nimbus.prater.statusim.net
[aws-eu-central-1a]
goerli-01.aws-eu-central-1a.nimbus.geth
@ -69,6 +70,7 @@ metal-07.he-eu-hel1.nimbus.prater
store-01.he-eu-hel1.logs.nimbus
store-02.he-eu-hel1.logs.nimbus
store-03.he-eu-hel1.logs.nimbus
windows-01.he-eu-hel1.nimbus.prater
[log-dash]
node-01.aws-eu-central-1a.dash.nimbus
@ -128,6 +130,7 @@ unstable-large-01.aws-eu-central-1a.nimbus.prater
[nimbus-prater-windows]
windows-01.gc-us-central1-a.nimbus.prater
windows-01.he-eu-hel1.nimbus.prater
[nimbus-pyrmont-metal]
metal-01.he-eu-hel1.nimbus.pyrmont
@ -166,6 +169,7 @@ stable-large-01.aws-eu-central-1a.nimbus.prater
testing-large-01.aws-eu-central-1a.nimbus.prater
unstable-large-01.aws-eu-central-1a.nimbus.prater
windows-01.gc-us-central1-a.nimbus.prater
windows-01.he-eu-hel1.nimbus.prater
[nimbus.pyrmont]
metal-01.he-eu-hel1.nimbus.pyrmont

View File

@ -10,6 +10,8 @@ module "nimbus_eth1_node_hetzner" {
env = "nimbus"
stage = "eth1"
group = "nimbus-eth1-metal"
region = "eu-hel1"
prefix = "he"
domain = var.domain
ips = ["65.21.230.244"]

View File

@ -5,6 +5,8 @@ module "nimbus_log_store" {
env = "logs"
stage = "nimbus"
group = "logs.nimbus"
region = "eu-hel1"
prefix = "he"
domain = var.domain
ips = [

View File

@ -9,6 +9,8 @@ module "nimbus_nodes_mainnet_hetzner" {
env = "nimbus"
stage = "mainnet"
group = "nimbus-mainnet-metal"
region = "eu-hel1"
prefix = "he"
domain = var.domain
ips = [

View File

@ -1,5 +1,7 @@
# Description
This isn't a real Hetzner cloud provider module for Terrafom.
This is a dummy module which does not create any actual cloud resources. It's intended for registering hosts which cannot be managed with Terraform to our infra.
This is just a dummy module which creates the Ansible inventory hosts in order to make the Hetzner hosts appear the same way all the other hosts created by Terraform do.
For example providers like Hetzner or MacStadium have no provider module for Terrafom.
This module creates the Ansible inventory hosts in order to make hosts appear the same way all the other hosts created by Terraform do.

View File

@ -1,7 +1,6 @@
/*************************************************
* WARNING!
* This is not a Terraform provider for Hetzner.
* I'm just creating the inventory entries
* This is just creating the inventory entries
* the same way I do it for other hosts so
* Ansible can use them during provisioning.
*************************************************/
@ -11,37 +10,41 @@
locals {
stage = var.stage != "" ? var.stage : terraform.workspace
tokens = split(".", local.stage)
dc = "${var.provider_name}-${var.region}"
# map of ip => hostname
dc = "${var.prefix}-${var.region}"
# map of hostname => ip
hostnames = { for i, ip in var.ips :
ip => "${var.name}-${format("%02d", i + 1)}.${local.dc}.${var.env}.${local.stage}"
"${var.name}-${format("%02d", i + 1)}.${local.dc}.${var.env}.${local.stage}" => ip
}
}
/* RESOURCES ------------------------------------*/
resource "ansible_host" "host" {
for_each = local.hostnames
inventory_hostname = each.value
inventory_hostname = each.key
groups = [var.group, local.dc, "${var.env}.${local.stage}"]
vars = {
ansible_host = each.key
hostname = each.value
region = var.region
dns_domain = var.domain
dns_entry = "${each.value}.${var.domain}"
data_center = local.dc
stage = local.stage
env = var.env
hostname = each.key
dns_entry = "${each.key}.${var.domain}"
dns_domain = var.domain
data_center = local.dc
region = var.region
env = var.env
stage = local.stage
ansible_host = each.value
/* Optional extra Ansible variables necessary for Windows */
ansible_shell_type = (var.shell_type == null ? null : var.shell_type)
ansible_become = (var.become == null ? null : var.become)
}
}
resource "cloudflare_record" "host" {
for_each = local.hostnames
zone_id = var.cf_zone_id
name = each.value // hostname
value = each.key // ip
type = "A"
ttl = 3600
zone_id = var.zone_id
name = each.key
value = each.value
type = "A"
}

View File

@ -3,9 +3,9 @@ output "public_ips" {
}
output "hostnames" {
value = values(local.hostnames)
value = keys(local.hostnames)
}
output "hosts" {
value = zipmap(values(local.hostnames), var.ips)
value = local.hostnames
}

View File

@ -1,34 +1,33 @@
/* SCALING --------------------------------------*/
variable "region" {
description = "Region in which the host reside."
type = string
}
variable "prefix" {
description = "Short name of provider being used."
type = string
default = "ms"
}
/* STATIC ---------------------------------------*/
variable "ips" {
description = "Static list of IPs used by the hosts."
type = list(string)
}
variable "region" {
description = "Region in which the host reside."
type = string
default = "eu-hel1"
}
variable "provider_name" {
description = "Short name of provider being used."
type = string
default = "he"
}
/* SECURITY --------------------------------------*/
variable "ssh_user" {
description = "Default user for SSH access."
type = string
default = "root"
}
/* CONFIG ----------------------------------------*/
/* GENERAL --------------------------------------*/
variable "name" {
description = "Name for hosts. To be used in the DNS entry."
description = "Prefix of hostname before index."
type = string
default = "node"
}
variable "group" {
description = "Name of Ansible group to add hosts to."
type = string
}
@ -43,21 +42,25 @@ variable "stage" {
default = ""
}
variable "group" {
description = "Ansible group to assign hosts to."
type = string
}
variable "domain" {
description = "DNS Domain to update"
type = string
}
/* DNS ------------------------------------------*/
/* We default to: statusim.net */
variable "cf_zone_id" {
variable "zone_id" {
description = "ID of CloudFlare zone for host record."
type = string
/* We default to: statusim.net */
default = "14660d10344c9898521c4ba49789f563"
}
variable "shell_type" {
description = "Type of shell used by Ansible."
type = string
default = null
}
variable "become" {
description = "Whether Ansible should become a user."
type = string
default = null
}

View File

@ -6,8 +6,8 @@ terraform {
version = " = 1.0.4"
}
cloudflare = {
source = "cloudflare/cloudflare"
version = " = 2.21.0"
source = "cloudflare/cloudflare"
version = " = 2.21.0"
}
}
}

View File

@ -9,6 +9,8 @@ module "nimbus_openeth_mainnet" {
env = "nimbus"
stage = "openeth"
group = "nimbus-openeth-mainnet"
region = "eu-hel1"
prefix = "he"
domain = var.domain
ips = [

View File

@ -115,6 +115,26 @@ module "nimbus_nodes_prater_unstable_windows" {
root_vol_type = "pd-ssd"
}
module "nimbus_nodes_prater_windows" {
source = "./modules/dummy-module"
name = "windows"
env = "nimbus"
stage = "prater"
group = "nimbus-prater-windows"
region = "eu-hel1"
prefix = "he"
domain = var.domain
/* Windows */
become = false
shell_type = "powershell"
ips = [
"65.21.233.67", # windows-01.he-eu-hel1.nimbus.prater
]
}
module "nimbus_nodes_prater_hetzner" {
source = "./modules/dummy-module"
@ -122,6 +142,8 @@ module "nimbus_nodes_prater_hetzner" {
env = "nimbus"
stage = "prater"
group = "nimbus-prater-metal"
region = "eu-hel1"
prefix = "he"
domain = var.domain
ips = [
@ -155,13 +177,13 @@ resource "cloudflare_record" "testing_prater_beacon_api" {
module "nimbus_nodes_prater_macos" {
source = "./modules/dummy-module"
name = "macos"
env = "nimbus"
stage = "prater"
group = "nimbus-prater-macos"
region = "eu-dublin"
provider_name = "ms"
domain = var.domain
name = "macos"
env = "nimbus"
stage = "prater"
group = "nimbus-prater-macos"
region = "eu-dublin"
prefix = "ms"
domain = var.domain
ips = ["207.254.102.98"]
}

View File

@ -9,6 +9,8 @@ module "nimbus_nodes_pyrmont_hetzner" {
env = "nimbus"
stage = "pyrmont"
group = "nimbus-pyrmont-metal"
region = "eu-hel1"
prefix = "he"
domain = var.domain
ips = [