From 1ba18ae134131099da7487f441ca37411782fbd7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Soko=C5=82owski?= Date: Mon, 3 Feb 2020 12:00:27 +0100 Subject: [PATCH] log-store: deploy an ElasticSearch cluster MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jakub SokoĊ‚owski --- ansible/group_vars/log-store.yml | 71 ++++++++++++++++++++++++++++++++ ansible/logs.yml | 16 +++++++ ansible/requirements.yml | 20 +++++++++ logs.tf | 36 ++++++++++++++++ variables.tf | 7 +++- 5 files changed, 149 insertions(+), 1 deletion(-) create mode 100644 ansible/group_vars/log-store.yml create mode 100644 ansible/logs.yml create mode 100644 logs.tf diff --git a/ansible/group_vars/log-store.yml b/ansible/group_vars/log-store.yml new file mode 100644 index 0000000..08b294c --- /dev/null +++ b/ansible/group_vars/log-store.yml @@ -0,0 +1,71 @@ +--- +# Mount extra volume for ElasticSearch container +bootstrap_extra_volume_path: '/docker' + +# ElasticSearch Cluster +es_master_nodes: | + {{ ansible_play_hosts + | map('extract', hostvars) + | list + | json_query( + '[].{ + name: hostname, + addr: ansible_local.tinc.vpn_ip, + port: to_string(es_node_port) + }') }} +es_cluster_name: 'nimbus-logs-search' +es_cont_name: 'elasticsearch' + +es_image: 'docker.elastic.co/elasticsearch/elasticsearch-oss:7.5.2' +es_api_port: 9200 +es_node_port: 9300 + +# Since Logstash stores 1 index per day this is fine +# See: https://www.elastic.co/blog/how-many-shards-should-i-have-in-my-elasticsearch-cluster +es_number_of_replicas: 1 +es_number_of_shards: 3 + +# JVM memory settings +es_jvm_mem_ratio: 0.6 +es_jvm_mem: '{{ (ansible_memtotal_mb * es_jvm_mem_ratio) | round(0, "floor") }}' +es_jvm_min_heap: '{{ es_jvm_mem | int }}m' +es_jvm_max_heap: '{{ es_jvm_mem | int }}m' + +# ElasticSearch HQ +es_hq_lb_cont_name: '{{ es_cont_name }}' + +# oauth access +oauth_domain: 'nimbus-es.status.im' +oauth_cont_name: '{{ es_hq_cont_name }}-oauth' +oauth_upstream_cont: '{{ es_hq_cont_name }}' +oauth_upstream_port: '{{ es_hq_cont_port }}' +oauth_public_port: 443 +oauth_local_port: 8090 +oauth_cookie_secret: '{{ lookup("passwordstore", "services/cookie-secret") }}' +oauth_id: '{{ lookup("passwordstore", "services/elastic-hq-nimbus/oauth-id") }}' +oauth_secret: '{{ lookup("passwordstore", "services/elastic-hq-nimbus/oauth-secret") }}' + +# Proxy for ES HQ +nginx_sites: + elasticsearch_hq_http: + - listen 80 + - server_name {{ oauth_domain }} + - return 302 https://$server_name$request_uri + elasticsearch_hq_https: + - listen 80 + - listen 443 ssl + + - server_name {{ oauth_domain }} + + - ssl_certificate /certs/status.im/origin.crt + - ssl_certificate_key /certs/status.im/origin.key + + - location / { + proxy_set_header Host $host; + proxy_pass http://127.0.0.1:{{ oauth_local_port }}/; + } + +# Open Nginx Ports +open_ports_protocol: tcp +open_ports_comment: 'HTTP & HTTPS' +open_ports_list: [80, 443] diff --git a/ansible/logs.yml b/ansible/logs.yml new file mode 100644 index 0000000..52f26a7 --- /dev/null +++ b/ansible/logs.yml @@ -0,0 +1,16 @@ +--- +- name: Configure ElasticSearch servers + hosts: log-store + roles: + - role: origin-certs + tags: origin-certs + - role: elasticsearch + tags: elasticsearch + - role: elasticsearch-hq + tags: elasticsearch-hq + - role: oauth-proxy + tags: oauth-proxy + - role: nginx + tags: nginx + - role: open-ports + tags: open-ports diff --git a/ansible/requirements.yml b/ansible/requirements.yml index 4de4202..b95f2e9 100644 --- a/ansible/requirements.yml +++ b/ansible/requirements.yml @@ -1,7 +1,15 @@ +--- +- name: nginx + src: jdauphant.nginx + - name: origin-certs src: git@github.com:status-im/infra-role-origin-certs.git scm: git +- name: open-ports + src: git@github.com:status-im/infra-role-open-ports.git + scm: git + - name: infra-role-tinc src: git@github.com:status-im/infra-role-tinc.git scm: git @@ -10,6 +18,10 @@ src: git@github.com:status-im/infra-role-bootstrap.git scm: git +- name: oauth-proxy + src: git@github.com:status-im/infra-role-oauth-proxy.git + scm: git + - name: consul-service src: git@github.com:status-im/infra-role-consul-service.git scm: git @@ -21,3 +33,11 @@ - name: infra-role-eth2-testnet-site src: git@github.com:status-im/infra-role-eth2-testnet-site.git scm: git + +- name: elasticsearch + src: git@github.com:status-im/infra-role-elasticsearch.git + scm: git + +- name: elasticsearch-hq + src: git@github.com:status-im/infra-role-elasticsearch-hq.git + scm: git diff --git a/logs.tf b/logs.tf new file mode 100644 index 0000000..8fe4b4d --- /dev/null +++ b/logs.tf @@ -0,0 +1,36 @@ +module "nimbus_log_store" { + source = "github.com/status-im/infra-tf-amazon-web-services" + + name = "node" + env = "log-store" + group = "log-store" + stage = "nimbus" + domain = var.domain + + /* Scaling */ + host_count = var.log_stores_count + instance_type = "t3a.medium" /* 4GB RAM at least */ + data_vol_size = 500 /* We'll be storing TRACE logs */ + data_vol_type = "sc1" /* Change to gp2 for SSD */ + + /* Firewall */ + open_tcp_ports = [ + "80", /* HTTP */ + "443", /* HTTPS */ + ] + + /* Plumbing */ + vpc_id = module.nimbus_network.vpc_id + subnet_id = module.nimbus_network.subnet_id + secgroup_id = module.nimbus_network.secgroup_id + keypair_name = aws_key_pair.jakubgs.key_name +} + +resource "cloudflare_record" "nimbus_log_store" { + zone_id = local.zones["status.im"] + name = "nimbus-es" + value = module.nimbus_log_store.public_ips[count.index] + count = var.log_stores_count + type = "A" + proxied = true +} diff --git a/variables.tf b/variables.tf index 90c2d89..6a3af5d 100644 --- a/variables.tf +++ b/variables.tf @@ -40,6 +40,11 @@ variable "domain" { /* RESOURCES ------------------------------------*/ variable "hosts_count" { - description = "Count of hosts in nimbus cluster" + description = "Count of hosts in nimbus cluster." default = 9 } + +variable "log_stores_count" { + description = "Count of ElasticSearch cluster hosts." + default = 3 +}