mirror of
https://github.com/status-im/infra-nimbus.git
synced 2025-01-21 19:20:46 +00:00
Jakub Sokołowski
c474a3311d
https://github.com/status-im/infra-nimbus/issues/96 "The moral of the story is this: even when you have memory to spare, try to avoid crossing the 32 GB heap boundary. It wastes memory, reduces CPU performance, and makes the GC struggle with large heaps." https://www.elastic.co/guide/en/elasticsearch/guide/current/heap-sizing.html#compressed_oops https://www.elastic.co/guide/en/elasticsearch/guide/current/heap-sizing.html#_give_less_than_half_your_memory_to_lucene Signed-off-by: Jakub Sokołowski <jakub@status.im>
73 lines
2.4 KiB
YAML
73 lines
2.4 KiB
YAML
---
|
|
# CloudFlare Origin certificates
|
|
origin_certs:
|
|
- domain: 'status.im'
|
|
crt: '{{lookup("bitwarden", "Cloudflare/status.im", file="origin.crt")}}'
|
|
key: '{{lookup("bitwarden", "Cloudflare/status.im", file="origin.key")}}'
|
|
default: true
|
|
|
|
# Syncing can use a lot of mamory
|
|
swap_file_path: '/main.swap'
|
|
swap_file_size_mb: 2048
|
|
|
|
# ElasticSearch Cluster
|
|
es_service_name: 'elasticsearch'
|
|
es_service_path: '/docker/{{ es_service_name }}'
|
|
es_cluster_name: 'nimbus-logs-search'
|
|
es_docker_network_name: '{{ es_service_name }}'
|
|
es_api_port: 9200
|
|
es_node_port: 9300
|
|
es_node_cont_tag: '7.17.4'
|
|
es_exp_cont_port: 9114
|
|
es_master_nodes: |
|
|
{{ ansible_play_hosts
|
|
| map('extract', hostvars)
|
|
| list
|
|
| json_query(
|
|
'[].{
|
|
name: hostname,
|
|
addr: ansible_local.wireguard.vpn_ip,
|
|
port: to_string(es_node_port)
|
|
}') }}
|
|
|
|
# Since Logstash stores 1 index per day this is fine.
|
|
# See: https://www.elastic.co/blog/how-many-shards-should-i-have-in-my-elasticsearch-cluster
|
|
es_number_of_shards: 3
|
|
# Since Nimbus logs are low-value we don't need replicas.
|
|
es_number_of_replicas: 0
|
|
|
|
# JVM memory settings
|
|
es_jvm_min_heap: '32g'
|
|
es_jvm_max_heap: '32g'
|
|
|
|
# ElasticSearch HQ
|
|
es_hq_cont_enabled: true
|
|
es_hq_cont_port: 5000
|
|
|
|
# oauth access
|
|
oauth_service_name: '{{ es_hq_cont_name }}-oauth'
|
|
oauth_service_path: '{{ es_service_path }}/oauth'
|
|
oauth_domain: 'nimbus-es.infra.status.im'
|
|
oauth_cont_networks: ['{{ es_docker_network_name }}']
|
|
oauth_upstream_addr: 'hq'
|
|
oauth_upstream_port: '{{ es_hq_cont_port }}'
|
|
oauth_local_port: 4180
|
|
oauth_provider: 'github'
|
|
oauth_id: '{{ lookup("bitwarden", "nimbus/elastic-hq/oauth", field="client-id") }}'
|
|
oauth_secret: '{{ lookup("bitwarden", "nimbus/elastic-hq/oauth", field="secret") }}'
|
|
oauth_cookie_secret: '{{ lookup("bitwarden", "nimbus/elastic-hq/oauth", field="cookie-secret") }}'
|
|
|
|
# Open Ports
|
|
open_ports_default_comment: 'ElasticSearch'
|
|
open_ports_default_chain: 'VPN'
|
|
open_ports_list:
|
|
- { port: 80, chain: 'SERVICES' }
|
|
- { port: 443, chain: 'SERVICES' }
|
|
- { port: '{{ es_api_port }}', ipset: 'logs.nimbus' }
|
|
- { port: '{{ es_api_port }}', ipset: 'dash.nimbus' }
|
|
- { port: '{{ es_api_port }}', ipset: 'log-aggr.hq' }
|
|
- { port: '{{ es_node_port }}', ipset: 'logs.nimbus' }
|
|
- { port: '{{ es_node_port }}', ipset: 'dash.nimbus' }
|
|
- { port: '{{ es_exp_cont_port }}', ipset: 'metrics.hq' }
|
|
- { port: '{{ oauth_local_port }}', ipset: 'proxy.misc' }
|