terraform and ansible configuration for boot hosts

This commit is contained in:
Anton Iakimov 2023-09-08 15:48:13 +02:00
parent 383ebc09c4
commit 79077eba5e
No known key found for this signature in database
GPG Key ID: DEA1FE58DD8BF7FA
11 changed files with 261 additions and 25 deletions

View File

@ -2,8 +2,11 @@
# Root password
bootstrap__root_pass: '{{lookup("bitwarden", "root-pass")}}'
# Consul
bootstrap__consul_encryption_key: '{{lookup("bitwarden", "consul/cluster", field="encryption-key")}}'
bootstarp__consul_agent_acl_token: '{{lookup("bitwarden", "consul/acl-tokens", field="agent-default")}}'
bootstrap__consul_encryption_key: '{{lookup("bitwarden", "consul/cluster", field="encryption-key")}}'
bootstarp__consul_agent_acl_token: '{{lookup("bitwarden", "consul/acl-tokens", field="agent-default")}}'
bootstrap__consul_certs_ca_crt: '{{lookup("bitwarden", "consul/certs", file="ca.pem")}}'
bootstrap__consul_certs_client_crt: '{{lookup("bitwarden", "consul/certs", file="client.pem")}}'
bootstrap__consul_certs_client_key: '{{lookup("bitwarden", "consul/certs", file="client-key.pem")}}'
# SSHGuard
bootstrap__sshguard_whitelist_extra: ['{{lookup("bitwarden", "sshguard/whitelist", field="jakubgs-home")}}']
# Wireguard

View File

@ -0,0 +1,77 @@
---
# Custom SSH accounts for Shards fleet, should start from UID 8000.
# TODO: check the list and move to stage group_vars
bootstrap__active_extra_users:
- { name: tanguy, uid: 8002, admin: true, key: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC5wbFl7pJ+Vl6Csw7gh50+fYiuH/HAV+dLN0997isreWsrr+H/6uTDwvtYmbfG8Nrr1NVzFjrGXTUhF3lmSTzC7l+xdlUE9QoOumTF7OI7A79Wp0B3kzKk8YAKskyCtz4JUtvJaExJhxTy385dbXXrS/hV1lfciLiDp+rkg+EkCTedMeWVWhaJpoaS8OY/UzoYfPClFmGM5sAMF9UNPPIGjvCibTdt2uGerOki4FIcgqXARzOc1J6bEA1qTeYRh1wjv6KC3AyLRsLEooXqoviVYUm0bVLMZteTpIdY5N61FlytPcFpjAla9SCJYwPd3ud1hdurcQ5+wHuaAyKksCa6Qnhf/vX9LMFwbOkOqGLNKY5sdRhDyN5xbNdfk4jnY3E+8Z0CNmSV+dpmpwcOahNTB65t5zqcU/NXynFbALf3j3A9uklQ5Or1Y8ytnzjfSko+TQZHBr5/w810vxS3VNS470wGjyzhyVKSg1qNJXb+m2GLT9k5lBxnl7j3o8CLbOs= tanguy@wheatley' }
- { name: hanno, uid: 8003, admin: true, key: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDDlvmNGKxoddAmOuXvjm0II0M82aLjczb8F1ZTSV7zyvKaw6DNuaN6c6ZCdUvdjfF4hdEwMXNB37Cr5oysPo41rIuii+RVPd+c2WLZnC/MCg8d6b0/mREIfpuEMlz+u4lyr8/DST7zO4Ke95w5kVGtlh2kR88F9mlJlkyq2NCiqBU/blKObOjqS8OMRuMJ0GpwzF9+/dfXVahzdgHgKS0Q1ATvLHL0DvZSAGwHfHcIV5nF0ddRCofX4L2shIImZk5dAsATQTkT2gNWObhF6KuWbVyuhsLfRletzdnf8jUYm9Uatf7Woa9CxbPjdGxVxniZblmOumcaCNgaioKs0qeZzwNQmgJ/PXXw9uVWhDUYvNh7Cz+SNTVm4WG1tEk3WO9EU5dcCTgfEa94LUl5G+yXYX8H00spubpiWXv/0RjEQX4CZeu0pff209GLDWdpIaV8p9QVFBr8X8t8jw5zw5j/PxkprcT5P2A1t+WOUn9LrBzOJ1iAU3oJV3ZMYQhUS78= hanno@status.im' }
- { name: anthony, uid: 8004, groups: ['docker', 'dockremap'], key: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDYM50p0dQDL2uZFtAHrpdf5sBLTrTLMZGABZosO2GTMu67GnvYeBn/L42hehhVX6yNDU4KOhZVWjLLI4Ekdex3dKf7ynLCSC1jVkR1k7YgVvVAwXCEk38MMe1LqUJmb2K0rRzlIn0LNThWawahAhGNLNU55nJR0X3MhvL7G3UnqGY1ubTABQGFkRPIlC0smuVFyRPmpLF2pGMNTyCkFpqAP7qu+wW3PguxDZRUpgjsM/iZtaHh1a8z7ikPGWhkvJkx/U0KtYuhoMbBPR/3Ta6ttDNXs8z/cOvc5cB6fBa9Y1IapfXCC8r88WsIkaN9F+7fTLdhN7+RJvHgUrxNzhLOPzvGd4KmHvE22goCrO9yZbxzxlRhZf6zgu27waU2CQhCDYXTASUcWzJgfSocxPCYPHYFeDFj9Vj8NqyAFSqEnXCFcz/nuzBAgKyEmwcbg+6uz6UfC7Fygz5f8xKerggP2tuwasEwHZkFsicc/NQZknWFkjvwT9sOqHrAmFlRYXbR8CzcsLHDMN/t5j333Akso0AuOVA2n8LvhEUmfrOI2Z9KM9Vd7uZdDXYtdYpRgUgFJCfw7KH3/cskJ8s9HH8AxbQ2KkjNB+Dv4ZWGthAkiLRbu/uxunHVjorkYZeJ76JuY5AxOd1OYvl2LTA/lxs4vEd2FVvw4AuBH+RImofd4w== anthony@status.im' }
- { name: daniel, uid: 8005, groups: ['docker', 'dockremap'], key: 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOPJlQ9iYVD2SNuW2ob2nvNHs+/O9PbcPc+dIepHNHO0 danielkaiser@status.im' }
- { name: ivan, uid: 8006, groups: ['docker', 'dockremap'], key: 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJBdm8y1PfWjT1pioaWJSZ2ETrUySb+dS/ifDg+VIpLY ivansete@status.im' }
# Tag dependent on fleet: test
# TODO: switch to shards tags?
#nim_waku_cont_tag: 'deploy-shards-{{ stage }}'
nim_waku_cont_tag: 'deploy-wakuv2-shards'
nim_waku_cont_name: 'nim-waku'
# TODO: switch for traces foe 1 node in host_vars
nim_waku_log_level: 'debug'
nim_waku_protocols_enabled: ['relay', 'filter', 'lightpush', 'peer-exchange']
nim_waku_disc_v5_enabled: true
nim_waku_dns4_domain_name: '{{ dns_entry }}'
#nim_waku_node_key: '{{lookup("bitwarden", "fleets/shards/"+stage+"/nodekeys", field=hostname)}}'
# Topic configuration
nim_waku_pubsub_topics:
- '/waku/2/rs/16/128'
- '/waku/2/rs/16/256'
nim_waku_protected_topics:
- '/waku/2/rs/16/128:045ced3b90fabf7673c5165f9cc3a038fd2cfeb96946538089c310b5eaa3a611094b27d8216d9ec8110bd0e0e9fa7a7b5a66e86a27954c9d88ebd41d0ab6cfbb91'
- '/waku/2/rs/16/256:049022b33f7583f34463f5b7622e5da29f99f993e6858a478465c68ee114ccf142204eff285ed922349c4b71b178a2e1a2154b99bcc2d8e91b3994626ffa9f1a6c'
# Ports
nim_waku_p2p_tcp_port: 30303
nim_waku_p2p_udp_port: 30303
nim_waku_metrics_port: 8008
nim_waku_disc_v5_port: 9000
#nim_waku_websock_port: 8000
nim_waku_rpc_tcp_port: 8545
# Limits
nim_waku_p2p_max_connections: 300
# Store
nim_waku_store_message_retention_policy: 'time:2592000' # 30 days
# DNS Discovery
# TODO: enable DNS discovery
nim_waku_dns_disc_enabled: false
#nim_waku_dns_disc_url: 'enrtree://AOGECG2SPND25EEFMAJ5WF3KSGJNSGV356DSTL2YVLLZWIV6SAYBM@{{ stage }}.nodes.status.im'
# Enable WebSockets via Websockify
nim_waku_websockify_enabled: false
# Enable websockets in Waku
nim_waku_websocket_enabled: false
# Consul Service
nim_waku_consul_success_before_passing: 5
nim_waku_consul_failures_before_warning: 2
nim_waku_consul_failures_before_critical: 20
# Peer connecting
waku_peers_rpc_port: '{{ nim_waku_rpc_tcp_port }}'
waku_peers_rpc_timeout: 20
waku_peers_rpc_retries: 5
waku_peers_consul_services:
- { name: '{{ nim_waku_cont_name }}', env: '{{ env }}', stage: '{{ stage }}' }
# Open LibP2P Ports
open_ports_default_comment: '{{ nim_waku_cont_name }}'
open_ports_default_chain: 'SERVICES'
open_ports_default_protocol: 'tcp'
open_ports_list:
- { port: '{{ nim_waku_p2p_udp_port }}', protocol: 'udp' }
- { port: '{{ nim_waku_p2p_tcp_port }}' }
- { port: '{{ nim_waku_disc_v5_port }}', protocol: 'udp' }
- { port: '{{ nim_waku_metrics_port }}', chain: 'VPN', ipset: 'metrics.hq' }

38
ansible/inventory/test Normal file
View File

@ -0,0 +1,38 @@
# NOTE: This file is generated by terraform.py
# For emergency use when Consul fails
[all]
boot-01.ac-cn-hongkong-c.shards.test hostname=boot-01.ac-cn-hongkong-c.shards.test ansible_host=8.218.23.76 env=shards stage=test data_center=ac-cn-hongkong-c region=cn-hongkong-c dns_entry=boot-01.ac-cn-hongkong-c.shards.test.statusim.net
boot-01.do-ams3.shards.test hostname=boot-01.do-ams3.shards.test ansible_host=167.99.19.47 env=shards stage=test data_center=do-ams3 region=ams3 dns_entry=boot-01.do-ams3.shards.test.statusim.net
boot-01.gc-us-central1-a.shards.test hostname=boot-01.gc-us-central1-a.shards.test ansible_host=34.135.13.87 env=shards stage=test data_center=gc-us-central1-a region=us-central1-a dns_entry=boot-01.gc-us-central1-a.shards.test.statusim.net
boot-02.ac-cn-hongkong-c.shards.test hostname=boot-02.ac-cn-hongkong-c.shards.test ansible_host=8.218.174.108 env=shards stage=test data_center=ac-cn-hongkong-c region=cn-hongkong-c dns_entry=boot-02.ac-cn-hongkong-c.shards.test.statusim.net
boot-02.do-ams3.shards.test hostname=boot-02.do-ams3.shards.test ansible_host=178.128.143.241 env=shards stage=test data_center=do-ams3 region=ams3 dns_entry=boot-02.do-ams3.shards.test.statusim.net
boot-02.gc-us-central1-a.shards.test hostname=boot-02.gc-us-central1-a.shards.test ansible_host=34.31.14.239 env=shards stage=test data_center=gc-us-central1-a region=us-central1-a dns_entry=boot-02.gc-us-central1-a.shards.test.statusim.net
[ac-cn-hongkong-c]
boot-01.ac-cn-hongkong-c.shards.test
boot-02.ac-cn-hongkong-c.shards.test
[boot]
boot-01.ac-cn-hongkong-c.shards.test
boot-01.do-ams3.shards.test
boot-01.gc-us-central1-a.shards.test
boot-02.ac-cn-hongkong-c.shards.test
boot-02.do-ams3.shards.test
boot-02.gc-us-central1-a.shards.test
[do-ams3]
boot-01.do-ams3.shards.test
boot-02.do-ams3.shards.test
[gc-us-central1-a]
boot-01.gc-us-central1-a.shards.test
boot-02.gc-us-central1-a.shards.test
[shards.test]
boot-01.ac-cn-hongkong-c.shards.test
boot-01.do-ams3.shards.test
boot-01.gc-us-central1-a.shards.test
boot-02.ac-cn-hongkong-c.shards.test
boot-02.do-ams3.shards.test
boot-02.gc-us-central1-a.shards.test

21
ansible/main.yml Normal file
View File

@ -0,0 +1,21 @@
---
- name: Verify Ansible versions
hosts: all
tags: always
become: false
run_once: true
gather_facts: false
tasks:
- local_action: command ./versioncheck.py
changed_when: false
- name: Configure Waku Nodes
hosts: boot
roles:
- { role: open-ports, tags: open-ports }
- { role: swap-file, tags: swap-file }
- { role: nim-waku, tags: nim-waku }
# TODO: cleanup if not needed, also from requiremnets
# - { role: conn-limit, tags: conn-limit }
# - { role: certbot, tags: certbot }
# - { role: waku-peers, tags: waku-peers }

View File

@ -1,30 +1,42 @@
---
- name: infra-role-bootstrap-linux
src: git@github.com:status-im/infra-role-bootstrap-linux.git
version: 50eda0808cceaaad2a5c5cdb4493935f2e3a637d
scm: git
- name: infra-role-wireguard
src: git@github.com:status-im/infra-role-wireguard.git
version: b711bbabd2dc3d9ce8b1c3a6e5bc785901db9d09
scm: git
- name: open-ports
src: git@github.com:status-im/infra-role-open-ports.git
version: 24dc30dbdf85e6758cb6924074b2f7a0f4541524
scm: git
- name: swap-file
src: git@github.com:status-im/infra-role-swap-file.git
version: 3fb0fb8d313ab388df1b38d516e2ff88b72a2cf7
scm: git
- name: consul-service
src: git@github.com:status-im/infra-role-consul-service.git
version: 2b3d4e53856d6cc91ae5c5a342fd12f2bb96aa88
version: 4d7c9c606f5e039a22c34ba93961d05056c0e002
scm: git
- name: systemd-timer
src: git@github.com:status-im/infra-role-systemd-timer.git
version: c6bbc3d1b4b0ba603d82fa06cd17297d12523182
version: 24b9f3c82b0f2cc89211e40b0abce07e983e67c1
scm: git
- name: waku-peers
src: git@github.com:status-im/infra-role-waku-peers.git
version: 6dca8003ac7bb3611f158beba9a5039d92a6992f
scm: git
- name: certbot
src: git@github.com:status-im/infra-role-certbot.git
version: aa7daf1fe135375bb4295742e3c1566ee4aeffba
scm: git
- name: nim-waku
src: git@github.com:status-im/infra-role-nim-waku.git
# TODO: update hash and find a working image to support topic argument change
version: 8044c33ffb92b3ee73cba677a090330ff638b70c
scm: git

34
hosts_boot.tf Normal file
View File

@ -0,0 +1,34 @@
module "boot" {
source = "github.com/status-im/infra-tf-multi-provider"
/* node type */
name = "boot"
group = "boot"
env = "shards"
stage = terraform.workspace
domain = var.domain
/* scaling */
host_count = local.ws["boot_hosts_count"]
/* instance sizes */
do_type = local.ws["do_type"] /* DigitalOcean */
ac_type = local.ws["ac_type"] /* Alibaba Cloud */
gc_type = local.ws["gc_type"] /* Google Cloud */
/* data volumes */
ac_data_vol_size = local.ws["data_vol_size"]
do_data_vol_size = local.ws["data_vol_size"]
gc_data_vol_size = local.ws["data_vol_size"]
/* firewall */
open_tcp_ports = [
"80", /* certbot */
"443", /* p2p websockify */
"8000", /* p2p websocket */
]
open_udp_ports = [
"9000", /* discovery v5 */
]
}

17
main.tf
View File

@ -8,24 +8,9 @@ terraform {
/* KV store has a limit of 512KB */
gzip = true
/* WARNING This needs to be changed for every repo. */
path = "terraform/codex/"
path = "terraform/shards/"
ca_file = "ansible/files/consul-ca.crt"
cert_file = "ansible/files/consul-client.crt"
key_file = "ansible/files/consul-client.key"
}
}
/* CF Zones ------------------------------------*/
/* CloudFlare Zone IDs required for records */
data "cloudflare_zones" "active" {
filter { status = "active" }
}
/* For easier access to zone ID by domain name */
locals {
zones = {
for zone in data.cloudflare_zones.active.zones :
zone.name => zone.id
}
}

View File

@ -4,5 +4,23 @@ provider "cloudflare" {
account_id = data.pass_password.cloudflare_account.password
}
provider "digitalocean" {
token = data.pass_password.digitalocean_token.password
spaces_access_id = data.pass_password.digitalocean_spaces_id.password
spaces_secret_key = data.pass_password.digitalocean_spaces_key.password
}
provider "google" {
credentials = data.pass_password.google_cloud_cred_json.full
project = "russia-servers"
region = "us-central1"
}
provider "alicloud" {
access_key = data.pass_password.alicloud_access_key.password
secret_key = data.pass_password.alicloud_secret_key.password
region = "cn-hongkong"
}
# Uses PASSWORD_STORE_DIR environment variable
provider "pass" {}

View File

@ -12,3 +12,34 @@ data "pass_password" "cloudflare_email" {
data "pass_password" "cloudflare_account" {
path = "cloud/Cloudflare/account"
}
/* Token for interacting with DigitalOcean API. */
data "pass_password" "digitalocean_token" {
path = "cloud/DigitalOcean/token"
}
/* Access key for Digital Ocean Spaces API. */
data "pass_password" "digitalocean_spaces_id" {
path = "cloud/DigitalOcean/spaces-id"
}
/* Secret key for Digital Ocean Spaces API. */
data "pass_password" "digitalocean_spaces_key" {
path = "cloud/DigitalOcean/spaces-key"
}
/* Alibaba Cloud API access key. */
data "pass_password" "alicloud_access_key" {
path = "cloud/Alibaba/access-key"
}
/* Alibaba Cloud API secret key. */
data "pass_password" "alicloud_secret_key" {
path = "cloud/Alibaba/secret-key"
}
/* Google Cloud API auth JSON */
data "pass_password" "google_cloud_cred_json" {
path = "cloud/GoogleCloud/json"
}

View File

@ -1,5 +1,5 @@
terraform {
required_version = "~> 1.2.0"
required_version = "> 1.4.0"
required_providers {
cloudflare = {
source = "cloudflare/cloudflare"
@ -9,5 +9,14 @@ terraform {
source = "camptocamp/pass"
version = " = 2.0.0"
}
digitalocean = {
source = "digitalocean/digitalocean"
}
alicloud = {
source = "aliyun/alicloud"
}
google = {
source = "hashicorp/google"
}
}
}

View File

@ -10,6 +10,14 @@ locals {
env = {
defaults = {
/* Default settings for all fleets/workspaces. */
boot_hosts_count = 2
do_type = "s-1vcpu-2gb" /* DigitalOcean */
ac_type = "ecs.t5-lc1m2.small" /* Alibaba Cloud */
gc_type = "g1-small" /* Google Cloud */
data_vol_size = 40
}
test = {