destory the Nimbus ElasticSearch cluster and dashboard
Signed-off-by: Jakub Sokołowski <jakub@status.im>
This commit is contained in:
parent
b3773f51b4
commit
118b780312
|
@ -1,60 +0,0 @@
|
|||
---
|
||||
# Kibana Dashboard
|
||||
kibana_domain: 'nimbus-logs.status.im'
|
||||
kibana_version: '7.5.2'
|
||||
kibana_cont_name: kibana
|
||||
kibana_cont_port: 5601
|
||||
|
||||
# Kibana LogTrail Plugin
|
||||
logtrail_kibana_version: '{{ kibana_version }}'
|
||||
logtrail_kibana_plugins_path: '{{ kibana_cont_vol }}/plugins'
|
||||
|
||||
# oauth access
|
||||
oauth_domain: '{{ kibana_domain }}'
|
||||
oauth_cont_name: '{{ kibana_cont_name }}-oauth'
|
||||
oauth_upstream_cont: '{{ kibana_cont_name }}'
|
||||
oauth_upstream_port: '{{ kibana_cont_port }}'
|
||||
oauth_public_port: 443
|
||||
oauth_local_port: 8090
|
||||
oauth_cookie_secret: '{{ lookup("passwordstore", "services/cookie-secret") }}'
|
||||
oauth_id: '{{ lookup("passwordstore", "services/nimbus-kibana/oauth-id") }}'
|
||||
oauth_secret: '{{ lookup("passwordstore", "services/nimbus-kibana/oauth-secret") }}'
|
||||
|
||||
# ElasticSearch Load Balancer
|
||||
es_lb_image: 'docker.elastic.co/elasticsearch/elasticsearch-oss:7.5.2'
|
||||
es_lb_cont_name: elasticsearch-lb
|
||||
es_lb_data_center: do-ams3
|
||||
es_lb_cluster_name: 'nimbus-logs-search'
|
||||
es_lb_api_port: 9200
|
||||
|
||||
logclean_es_host: 'localhost'
|
||||
logclean_es_port: '{{ es_lb_api_port }}'
|
||||
|
||||
# JVM Memory settings
|
||||
es_lb_jvm_min_heap: 2g
|
||||
es_lb_jvm_max_heap: 2g
|
||||
|
||||
# Nginx Proxy config
|
||||
nginx_sites:
|
||||
kibana_http:
|
||||
- listen 80
|
||||
- server_name {{ oauth_domain }}
|
||||
- return 302 https://$server_name$request_uri
|
||||
kibana_ssl:
|
||||
- listen 443 ssl
|
||||
|
||||
- server_name {{ oauth_domain }}
|
||||
|
||||
- ssl_certificate /certs/origin.crt
|
||||
- ssl_certificate_key /certs/origin.key
|
||||
|
||||
- location / {
|
||||
proxy_set_header Host $host;
|
||||
proxy_pass http://127.0.0.1:{{ oauth_local_port }}/;
|
||||
}
|
||||
|
||||
# Open Nginx Ports
|
||||
open_ports_comment: 'HTTP & HTTPS'
|
||||
open_ports_list:
|
||||
- { port: 80, protocol: 'tcp' }
|
||||
- { port: 443, protocol: 'tcp' }
|
|
@ -1,72 +0,0 @@
|
|||
---
|
||||
# Mount extra volume for ElasticSearch container
|
||||
bootstrap__extra_volume_path: '/docker'
|
||||
|
||||
# ElasticSearch Cluster
|
||||
es_master_nodes: |
|
||||
{{ ansible_play_hosts
|
||||
| map('extract', hostvars)
|
||||
| list
|
||||
| json_query(
|
||||
'[].{
|
||||
name: hostname,
|
||||
addr: ansible_local.tinc.vpn_ip,
|
||||
port: to_string(es_node_port)
|
||||
}') }}
|
||||
es_cluster_name: 'nimbus-logs-search'
|
||||
es_cont_name: 'elasticsearch'
|
||||
|
||||
es_image: 'docker.elastic.co/elasticsearch/elasticsearch-oss:7.5.2'
|
||||
es_api_port: 9200
|
||||
es_node_port: 9300
|
||||
|
||||
# Since Logstash stores 1 index per day this is fine
|
||||
# See: https://www.elastic.co/blog/how-many-shards-should-i-have-in-my-elasticsearch-cluster
|
||||
es_number_of_replicas: 1
|
||||
es_number_of_shards: 3
|
||||
|
||||
# JVM memory settings
|
||||
es_jvm_mem_ratio: 0.6
|
||||
es_jvm_mem: '{{ (ansible_memtotal_mb * es_jvm_mem_ratio) | round(0, "floor") }}'
|
||||
es_jvm_min_heap: '{{ es_jvm_mem | int }}m'
|
||||
es_jvm_max_heap: '{{ es_jvm_mem | int }}m'
|
||||
|
||||
# ElasticSearch HQ
|
||||
es_hq_lb_cont_name: '{{ es_cont_name }}'
|
||||
|
||||
# oauth access
|
||||
oauth_domain: 'nimbus-es.status.im'
|
||||
oauth_cont_name: '{{ es_hq_cont_name }}-oauth'
|
||||
oauth_upstream_cont: '{{ es_hq_cont_name }}'
|
||||
oauth_upstream_port: '{{ es_hq_cont_port }}'
|
||||
oauth_public_port: 443
|
||||
oauth_local_port: 8090
|
||||
oauth_cookie_secret: '{{ lookup("passwordstore", "services/cookie-secret") }}'
|
||||
oauth_id: '{{ lookup("passwordstore", "services/elastic-hq-nimbus/oauth-id") }}'
|
||||
oauth_secret: '{{ lookup("passwordstore", "services/elastic-hq-nimbus/oauth-secret") }}'
|
||||
|
||||
# Proxy for ES HQ
|
||||
nginx_sites:
|
||||
elasticsearch_hq_http:
|
||||
- listen 80
|
||||
- server_name {{ oauth_domain }}
|
||||
- return 302 https://$server_name$request_uri
|
||||
elasticsearch_hq_https:
|
||||
- listen 80
|
||||
- listen 443 ssl
|
||||
|
||||
- server_name {{ oauth_domain }}
|
||||
|
||||
- ssl_certificate /certs/status.im/origin.crt
|
||||
- ssl_certificate_key /certs/status.im/origin.key
|
||||
|
||||
- location / {
|
||||
proxy_set_header Host $host;
|
||||
proxy_pass http://127.0.0.1:{{ oauth_local_port }}/;
|
||||
}
|
||||
|
||||
# Open Nginx Ports
|
||||
open_ports_comment: 'HTTP & HTTPS'
|
||||
open_ports_list:
|
||||
- { port: 80, protocol: 'tcp' }
|
||||
- { port: 443, protocol: 'tcp' }
|
|
@ -2,12 +2,8 @@
|
|||
# For emergency use when Consul fails
|
||||
[all]
|
||||
master-01.aws-eu-central-1a.nimbus.test hostname=master-01.aws-eu-central-1a.nimbus.test ansible_host=18.185.109.239 env=nimbus stage=test data_center=aws-eu-central-1a region=eu-central-1a dns_entry=master-01.aws-eu-central-1a.nimbus.test.statusim.net
|
||||
node-01.aws-eu-central-1a.dash.nimbus hostname=node-01.aws-eu-central-1a.dash.nimbus ansible_host=3.121.189.153 env=dash stage=nimbus data_center=aws-eu-central-1a region=eu-central-1a dns_entry=node-01.aws-eu-central-1a.dash.nimbus.statusim.net
|
||||
node-01.aws-eu-central-1a.log-store.nimbus hostname=node-01.aws-eu-central-1a.log-store.nimbus ansible_host=3.125.8.32 env=log-store stage=nimbus data_center=aws-eu-central-1a region=eu-central-1a dns_entry=node-01.aws-eu-central-1a.log-store.nimbus.statusim.net
|
||||
node-01.aws-eu-central-1a.nimbus.test hostname=node-01.aws-eu-central-1a.nimbus.test ansible_host=3.126.82.94 env=nimbus stage=test data_center=aws-eu-central-1a region=eu-central-1a dns_entry=node-01.aws-eu-central-1a.nimbus.test.statusim.net
|
||||
node-02.aws-eu-central-1a.log-store.nimbus hostname=node-02.aws-eu-central-1a.log-store.nimbus ansible_host=18.195.148.202 env=log-store stage=nimbus data_center=aws-eu-central-1a region=eu-central-1a dns_entry=node-02.aws-eu-central-1a.log-store.nimbus.statusim.net
|
||||
node-02.aws-eu-central-1a.nimbus.test hostname=node-02.aws-eu-central-1a.nimbus.test ansible_host=54.93.83.30 env=nimbus stage=test data_center=aws-eu-central-1a region=eu-central-1a dns_entry=node-02.aws-eu-central-1a.nimbus.test.statusim.net
|
||||
node-03.aws-eu-central-1a.log-store.nimbus hostname=node-03.aws-eu-central-1a.log-store.nimbus ansible_host=3.125.49.40 env=log-store stage=nimbus data_center=aws-eu-central-1a region=eu-central-1a dns_entry=node-03.aws-eu-central-1a.log-store.nimbus.statusim.net
|
||||
node-03.aws-eu-central-1a.nimbus.test hostname=node-03.aws-eu-central-1a.nimbus.test ansible_host=3.123.41.197 env=nimbus stage=test data_center=aws-eu-central-1a region=eu-central-1a dns_entry=node-03.aws-eu-central-1a.nimbus.test.statusim.net
|
||||
node-04.aws-eu-central-1a.nimbus.test hostname=node-04.aws-eu-central-1a.nimbus.test ansible_host=52.59.244.94 env=nimbus stage=test data_center=aws-eu-central-1a region=eu-central-1a dns_entry=node-04.aws-eu-central-1a.nimbus.test.statusim.net
|
||||
node-05.aws-eu-central-1a.nimbus.test hostname=node-05.aws-eu-central-1a.nimbus.test ansible_host=54.93.37.91 env=nimbus stage=test data_center=aws-eu-central-1a region=eu-central-1a dns_entry=node-05.aws-eu-central-1a.nimbus.test.statusim.net
|
||||
|
@ -18,12 +14,8 @@ node-09.aws-eu-central-1a.nimbus.test hostname=node-09.aws-eu-central-1a.nimbus.
|
|||
|
||||
[aws-eu-central-1a]
|
||||
master-01.aws-eu-central-1a.nimbus.test
|
||||
node-01.aws-eu-central-1a.dash.nimbus
|
||||
node-01.aws-eu-central-1a.log-store.nimbus
|
||||
node-01.aws-eu-central-1a.nimbus.test
|
||||
node-02.aws-eu-central-1a.log-store.nimbus
|
||||
node-02.aws-eu-central-1a.nimbus.test
|
||||
node-03.aws-eu-central-1a.log-store.nimbus
|
||||
node-03.aws-eu-central-1a.nimbus.test
|
||||
node-04.aws-eu-central-1a.nimbus.test
|
||||
node-05.aws-eu-central-1a.nimbus.test
|
||||
|
@ -32,22 +24,6 @@ node-07.aws-eu-central-1a.nimbus.test
|
|||
node-08.aws-eu-central-1a.nimbus.test
|
||||
node-09.aws-eu-central-1a.nimbus.test
|
||||
|
||||
[dash.nimbus]
|
||||
node-01.aws-eu-central-1a.dash.nimbus
|
||||
|
||||
[log-dash]
|
||||
node-01.aws-eu-central-1a.dash.nimbus
|
||||
|
||||
[log-store]
|
||||
node-01.aws-eu-central-1a.log-store.nimbus
|
||||
node-02.aws-eu-central-1a.log-store.nimbus
|
||||
node-03.aws-eu-central-1a.log-store.nimbus
|
||||
|
||||
[log-store.nimbus]
|
||||
node-01.aws-eu-central-1a.log-store.nimbus
|
||||
node-02.aws-eu-central-1a.log-store.nimbus
|
||||
node-03.aws-eu-central-1a.log-store.nimbus
|
||||
|
||||
[nimbus-master]
|
||||
master-01.aws-eu-central-1a.nimbus.test
|
||||
|
||||
|
|
|
@ -34,26 +34,6 @@
|
|||
src: git@github.com:status-im/infra-role-eth2-testnet-site.git
|
||||
scm: git
|
||||
|
||||
- name: kibana
|
||||
src: git@github.com:status-im/infra-role-kibana.git
|
||||
scm: git
|
||||
|
||||
- name: kibana-logtrail
|
||||
src: git@github.com:status-im/infra-role-kibana-logtrail.git
|
||||
scm: git
|
||||
|
||||
- name: elasticsearch
|
||||
src: git@github.com:status-im/infra-role-elasticsearch.git
|
||||
scm: git
|
||||
|
||||
- name: elasticsearch-hq
|
||||
src: git@github.com:status-im/infra-role-elasticsearch-hq.git
|
||||
scm: git
|
||||
|
||||
- name: elasticsearch-lb
|
||||
src: git@github.com:status-im/infra-role-elasticsearch-lb.git
|
||||
scm: git
|
||||
|
||||
- name: systemd-timer
|
||||
src: git@github.com:status-im/infra-role-systemd-timer.git
|
||||
scm: git
|
||||
|
|
33
dash.tf
33
dash.tf
|
@ -1,33 +0,0 @@
|
|||
module "nimbus_dashboard" {
|
||||
source = "github.com/status-im/infra-tf-amazon-web-services"
|
||||
|
||||
name = "node"
|
||||
env = "dash"
|
||||
group = "log-dash"
|
||||
stage = "nimbus"
|
||||
domain = var.domain
|
||||
|
||||
/* Scaling */
|
||||
host_count = 1
|
||||
instance_type = "t3a.medium" /* 4GB RAM at least */
|
||||
|
||||
/* Firewall */
|
||||
open_tcp_ports = [
|
||||
"80", /* HTTP */
|
||||
"443", /* HTTPS */
|
||||
]
|
||||
|
||||
/* Plumbing */
|
||||
vpc_id = module.nimbus_network.vpc.id
|
||||
subnet_id = module.nimbus_network.subnets[0].id
|
||||
secgroup_id = module.nimbus_network.secgroup.id
|
||||
keypair_name = aws_key_pair.jakubgs.key_name
|
||||
}
|
||||
|
||||
resource "cloudflare_record" "nimbus_dashboard" {
|
||||
zone_id = local.zones["status.im"]
|
||||
name = "nimbus-logs"
|
||||
value = module.nimbus_dashboard.public_ips[0]
|
||||
type = "A"
|
||||
proxied = true
|
||||
}
|
36
logs.tf
36
logs.tf
|
@ -1,36 +0,0 @@
|
|||
module "nimbus_log_store" {
|
||||
source = "github.com/status-im/infra-tf-amazon-web-services"
|
||||
|
||||
name = "node"
|
||||
env = "log-store"
|
||||
group = "log-store"
|
||||
stage = "nimbus"
|
||||
domain = var.domain
|
||||
|
||||
/* Scaling */
|
||||
host_count = var.log_stores_count
|
||||
instance_type = "t3a.medium" /* 4GB RAM at least */
|
||||
data_vol_size = 500 /* We'll be storing TRACE logs */
|
||||
data_vol_type = "st1" /* Change to gp2 for SSD */
|
||||
|
||||
/* Firewall */
|
||||
open_tcp_ports = [
|
||||
"80", /* HTTP */
|
||||
"443", /* HTTPS */
|
||||
]
|
||||
|
||||
/* Plumbing */
|
||||
vpc_id = module.nimbus_network.vpc.id
|
||||
subnet_id = module.nimbus_network.subnets[0].id
|
||||
secgroup_id = module.nimbus_network.secgroup.id
|
||||
keypair_name = aws_key_pair.jakubgs.key_name
|
||||
}
|
||||
|
||||
resource "cloudflare_record" "nimbus_log_store" {
|
||||
zone_id = local.zones["status.im"]
|
||||
name = "nimbus-es"
|
||||
value = module.nimbus_log_store.public_ips[count.index]
|
||||
count = var.log_stores_count
|
||||
type = "A"
|
||||
proxied = true
|
||||
}
|
Loading…
Reference in New Issue