Elasticsearch
Checking LogStash logs
$ ssh NODE_NAME.NODE_DOMAIN
...
Last login: Wed Apr 7 13:59:53 2021 from 10.250.18.236
$ sudo -i
root@NODE_NAME:~#
root@NODE_NAME:~# . .bashrc
root@NODE_NAME:~# export NOMAD_NAMESPACE=infra
root@NODE_NAME:~# printenv | grep -i nomad
NOMAD_CACERT=/etc/nomad.d/ssl/nomad-ca.pem
NOMAD_CLIENT_CERT=/etc/nomad.d/ssl/cli.pem
NOMAD_CLIENT_KEY=/etc/nomad.d/ssl/cli-key.pem
NOMAD_ADDR=https://localhost:4646
NOMAD_NAMESPACE=infra
NOMAD_TOKEN=MASKED
root@NODE_NAME:~# nomad status
ID Type Priority Status Submit Date
infra-apps-proxy-pam service 50 running 2021-03-18T18:12:24+01:00
infra-ca service 50 running 2021-02-25T15:56:15+01:00
infra-cadvisor system 50 running 2021-03-12T14:55:40+01:00
infra-cluster-broccoli service 50 running 2021-03-29T00:03:13+02:00
infra-dsp-production-curator batch/periodic 50 running 2020-10-01T14:49:37+02:00
infra-dsp-production-curator/periodic-1617799800 batch 50 dead 2021-04-07T14:50:00+02:00
infra-dsp-production-elasticsearch service 50 running 2021-02-08T18:27:54+01:00
infra-dsp-production-kibana service 50 running 2020-08-24T09:56:05+02:00
infra-dsp-production-logstash service 50 running 2021-02-09T08:48:06+01:00
infra-ldap-exporter system 50 running 2021-03-12T14:55:37+01:00
infra-node-exporter system 50 running 2021-02-24T10:32:36+01:00
infra-prometheus service 50 running 2021-02-19T08:32:32+01:00
infra-security-iam service 50 running 2020-12-15T10:50:18+01:00
infra-ssh-key-distribution service 50 running 2020-12-15T13:33:55+01:00
root@NODE_NAME:~# nomad job status infra-dsp-production-logstash
ID = infra-dsp-production-logstash
Name = infra-dsp-production-logstash
Submit Date = 2021-02-09T08:48:06+01:00
Type = service
Priority = 50
Datacenters = dc1
Status = running
Periodic = false
Parameterized = false
Summary
Task Group Queued Starting Running Failed Complete Lost
infra-dsp-production-logstash 0 0 1 0 0 0
Allocations
ID Node ID Task Group Version Desired Status Created Modified
34b2a613 e0aca27a infra-dsp-production-logstash 7 run running 1mo27d ago 21m2s ago
root@NODE_NAME:~# nomad alloc logs -tail 34b2a613
[INFO ] 2021-04-07 12:34:33.799 [[main]>worker0] elasticsearch - retrying failed action with response code: 403 ({"type"=>"cluster_block_exception", "reason"=>"blocked by: [FORBIDDEN/12/index read-only / allow delete (api)];"})
[INFO ] 2021-04-07 12:34:33.799 [[main]>worker0] elasticsearch - retrying failed action with response code: 403 ({"type"=>"cluster_block_exception", "reason"=>"blocked by: [FORBIDDEN/12/index read-only / allow delete (api)];"})
[INFO ] 2021-04-07 12:34:33.799 [[main]>worker0] elasticsearch - retrying failed action with response code: 403 ({"type"=>"cluster_block_exception", "reason"=>"blocked by: [FORBIDDEN/12/index read-only / allow delete (api)];"})
[INFO ] 2021-04-07 12:34:33.800 [[main]>worker0] elasticsearch - Retrying individual bulk actions that failed or were rejected by the previous bulk request. {:count=>23}
Releasing read-only indices
After we make sure that there is enough space on VMs:
root@NODE_NAME:~# df -h /elastic/
Filesystem Size Used Avail Use% Mounted on
/dev/vda1 985G 576G 359G 62% /
We can change the indices to be writeable again:
# ES_PWD is in infra corp vault
root@NODE_NAME:~# curl -u "elasticsearch_admin:$ES_PWD" -XPUT "https://infra-dsp-production-elasticsearch.service.NODE_DOMAIN:1704/_all/_settings?pretty" -H 'Content-Type: application/json' -d'
{
"index.blocks.read_only_allow_delete": null
}
'
Troubleshooting ES from kibana
Log into Kibana as elasticsearch_admin
. The password in the Corp Vault.
Disk usage from ES
GET _cat/allocation?v
Sorted indices' size
GET _cat/indices?v&s=store.size
Get cluster settings
GET /_cluster/settings
Get settings of all indices
GET /_settings/_all
Get shards sorted by size
GET _cat/shards?v&s=store
Get health using curl
root@NODE_NAME:~# curl -u "elasticsearch_admin:$ES_PWD" -XGET "https://infra-dsp-staging-elasticsearch.service.staging.NODE_DOMAIN:1704/_cat/health?pretty" -H 'Content-Type: application/json' -d'
Creating a password hash for secure guard
On a node (e.g.: dsp-infra-06 for prod or NODE_DOMAIN for staging) with elasticsearch execute bash in the elasticsearch container:
ubuntuNODE_NAME:~$ sudo docker ps | grep elasticsearch
ee8808488fea internal.docker.NODE_DOMAIN/elasticsearch-sg-dsp:6.7.1 "/usr/local/bin/dockā¦" 3 months ago Up 3 months infra-dsp-staging-elasticsearch-1187a5f8-4e80-7e45-4d4a-bf61043f89c0
ubuntuNODE_NAME:~$ sudo docker exec -it infra-dsp-staging-elasticsearch-1187a5f8-4e80-7e45-4d4a-bf61043f89c0 /bin/bash
[root@infra-dsp-staging-elasticsearch elasticsearch]#
In the container run:
[root@infra-dsp-production-elasticsearch elasticsearch]# /usr/share/elasticsearch/plugins/search-guard-6/tools/hash.sh
[Password:]
After the pipeline is successfully executed, we need to trigger the import of SG credentials. In elasticsearch container we run the following:
Be careful with the ip address of the elasticsearch for which you are triggering the import.
/usr/share/elasticsearch/plugins/search-guard-6/tools/sgadmin.sh -cert /usr/share/elasticsearch/config/sg/sgadmin.cert -cacert /usr/share/elasticsearch/config/sg/elasticsearch-ca.pem -h 10.250.18.49 -key /usr/share/elasticsearch/config/sg/sgadmin.key -icl -cd /usr/share/elasticsearch/config/sg