Update telemetry to ES 8.2.0 and simplify config a bit

This commit is contained in:
Alex 2022-05-04 16:27:46 +02:00
parent 1b4f96ffb2
commit 9cae8c8fc2
Signed by: lx
GPG key ID: 0E496D15096376BE
5 changed files with 30 additions and 46 deletions

View file

@ -8,8 +8,8 @@ output.elasticsearch:
# In case you specify and additional path, the scheme is required: `http://localhost:9200/path`. # In case you specify and additional path, the scheme is required: `http://localhost:9200/path`.
# IPv6 addresses should always be defined as: `https://[2001:db8::1]:9200`. # IPv6 addresses should always be defined as: `https://[2001:db8::1]:9200`.
hosts: ["localhost:9200"] hosts: ["localhost:9200"]
username: "apm" username: "elastic"
password: "{{ key "secrets/telemetry/elastic_passwords/apm" }}" password: "{{ key "secrets/telemetry/elastic_passwords/elastic" }}"
instrumentation: instrumentation:
enabled: true enabled: true

View file

@ -5,13 +5,13 @@ datasources:
type: elasticsearch type: elasticsearch
access: proxy access: proxy
url: http://localhost:9200 url: http://localhost:9200
password: '{{ key "secrets/telemetry/elastic_passwords/grafana" }}' password: '{{ key "secrets/telemetry/elastic_passwords/elastic" }}'
user: 'grafana' user: 'elastic'
database: apm-* database: metrics-*
basicAuth: false basicAuth: false
isDefault: true isDefault: true
jsonData: jsonData:
esVersion: "7.10.0" esVersion: "8.2.0"
includeFrozen: false includeFrozen: false
logLevelField: '' logLevelField: ''
logMessageField: '' logMessageField: ''

View file

@ -15,10 +15,11 @@ job "telemetry-system" {
task "elastic" { task "elastic" {
driver = "docker" driver = "docker"
config { config {
image = "docker.elastic.co/elasticsearch/elasticsearch:7.17.0" image = "docker.elastic.co/elasticsearch/elasticsearch:8.2.0"
network_mode = "host" network_mode = "host"
volumes = [ volumes = [
"/mnt/ssd/telemetry/es_data:/usr/share/elasticsearch/data", "/mnt/ssd/telemetry/es_data:/usr/share/elasticsearch/data",
"secrets/elastic-certificates.p12:/usr/share/elasticsearch/config/elastic-certificates.p12",
] ]
ports = [ "elastic", "elastic_internal" ] ports = [ "elastic", "elastic_internal" ]
sysctl = { sysctl = {
@ -29,11 +30,18 @@ job "telemetry-system" {
} }
} }
user = "1000"
resources { resources {
memory = 1500 memory = 1500
cpu = 500 cpu = 500
} }
template {
data = "{{ key \"secrets/telemetry/elasticsearch/elastic-certificates.p12\" }}"
destination = "secrets/elastic-certificates.p12"
}
template { template {
data = <<EOH data = <<EOH
node.name={{ env "attr.unique.hostname" }} node.name={{ env "attr.unique.hostname" }}
@ -48,8 +56,8 @@ xpack.security.authc.api_key.enabled=true
xpack.security.transport.ssl.enabled=true xpack.security.transport.ssl.enabled=true
xpack.security.transport.ssl.verification_mode=certificate xpack.security.transport.ssl.verification_mode=certificate
xpack.security.transport.ssl.client_authentication=required xpack.security.transport.ssl.client_authentication=required
xpack.security.transport.ssl.keystore.path=/usr/share/elasticsearch/data/elastic-certificates.p12 xpack.security.transport.ssl.keystore.path=/usr/share/elasticsearch/config/elastic-certificates.p12
xpack.security.transport.ssl.truststore.path=/usr/share/elasticsearch/data/elastic-certificates.p12 xpack.security.transport.ssl.truststore.path=/usr/share/elasticsearch/config/elastic-certificates.p12
cluster.routing.allocation.disk.watermark.high=75% cluster.routing.allocation.disk.watermark.high=75%
cluster.routing.allocation.disk.watermark.low=65% cluster.routing.allocation.disk.watermark.low=65%
ES_JAVA_OPTS=-Xms512M -Xmx512M ES_JAVA_OPTS=-Xms512M -Xmx512M
@ -101,7 +109,7 @@ EOH
task "apm" { task "apm" {
driver = "docker" driver = "docker"
config { config {
image = "docker.elastic.co/apm/apm-server:7.17.1" image = "docker.elastic.co/apm/apm-server:8.2.0"
network_mode = "host" network_mode = "host"
ports = [ "apm" ] ports = [ "apm" ]
args = [ "--strict.perms=false" ] args = [ "--strict.perms=false" ]
@ -144,7 +152,7 @@ EOH
task "filebeat" { task "filebeat" {
driver = "docker" driver = "docker"
config { config {
image = "docker.elastic.co/beats/filebeat:7.17.1" image = "docker.elastic.co/beats/filebeat:8.2.0"
network_mode = "host" network_mode = "host"
volumes = [ volumes = [
"/mnt/ssd/telemetry/filebeat:/usr/share/filebeat/data", "/mnt/ssd/telemetry/filebeat:/usr/share/filebeat/data",

View file

@ -14,7 +14,7 @@ job "telemetry" {
task "kibana" { task "kibana" {
driver = "docker" driver = "docker"
config { config {
image = "docker.elastic.co/kibana/kibana:7.17.0" image = "docker.elastic.co/kibana/kibana:8.2.0"
network_mode = "host" network_mode = "host"
ports = [ "kibana" ] ports = [ "kibana" ]
} }

View file

@ -1,13 +1,6 @@
# create elasticsearch folders on all nodes
```bash
mkdir -p /mnt/ssd/telemetry/es_data/nodes
chown 1000 /mnt/ssd/telemetry/es_data/nodes
```
# generate ca and tls certs for elasticsearch cluster # generate ca and tls certs for elasticsearch cluster
start a `bash` in an elasticsearch image, such as `docker.elastic.co/elasticsearch/elasticsearch:7.17.0`: `docker run -ti docker.elastic.co/elasticsearch/elasticsearch:7.17.0 bash` start a `bash` in an elasticsearch image, such as `docker.elastic.co/elasticsearch/elasticsearch:8.2.0`: `docker run -ti docker.elastic.co/elasticsearch/elasticsearch:8.2.0 bash`
generate a ca and node certs: generate a ca and node certs:
@ -16,46 +9,29 @@ generate a ca and node certs:
./bin/elasticsearch-certutil cert --ca elastic-stack-ca.p12 ./bin/elasticsearch-certutil cert --ca elastic-stack-ca.p12
``` ```
copy `elastic-certificates.p12` to `/mnt/ssd/telemetry/es_data` in all nodes, and chown it: write these files in Consul at `secrets/telemetry/elasticsearch/elastic-certificates.p12` and `secrets/telemetry/elasticsearch/elastic-stack-ca.p12`
# start nomad services
```bash ```bash
chown 1000 /mnt/ssd/telemetry/es_data/elastic-certificates.p12 nomad run telemetry-system.hcl
nomad run telemetry.hcl
``` ```
# create elasticsearch passwords # create elasticsearch passwords
in elasticsearch container in an elasticsearch container that was launched by Nomad:
```bash ```bash
./bin/elasticsearch-setup-passwords auto ./bin/elasticsearch-reset-password -u elastic
./bin/elasticsearch-reset-password -u kibana
``` ```
save passwords in consul, at: save passwords in consul, at:
- `secrets/telemetry/elastic_passwords/apm_system` for user `apm_system`
- `secrets/telemetry/elastic_passwords/kibana_system` for user `kibana_system` - `secrets/telemetry/elastic_passwords/kibana_system` for user `kibana_system`
- `secrets/telemetry/elastic_passwords/elastic` for user `elastic` - `secrets/telemetry/elastic_passwords/elastic` for user `elastic`
check kibana works, login to kibana with user `elastic` check kibana works, login to kibana with user `elastic`
# create role and user for apm grafana and apm-server will use the elastic user (the admin) to write data to elasticsearch
create role `apm_writer`, give privileges:
- cluster privileges `manage_ilm`, `read_ilm`, `manage_ingest_pipelines`, `manage_index_templates`
- on index `apm-*` privileges `create_doc`, `create_index`, `view_index_metadata`, `manage`
- on index `apm-*sourcemap` privilege `read_cross_cluster`
create user `apm` with roles `apm_writer` and `apm_system`. give it a randomly generated password that you save in `secrets/telemetry/elastic_passwords/apm`
check apm data is ingested correctly (visible in kibana)
# create role and user for grafana
create role `grafana`, give privileges:
- on index `apm-*` privileges `read` and `view_index_metadata`
create user `grafana` with role `grafana`. give it a randomly generated password that you save in `secrets/telemetry/elastic_passwords/grafana`
check grafana works