elastic / helm-charts

You know, for Kubernetes
Apache License 2.0
1.89k stars 1.93k forks source link

[elasticsearch] Pod keeps restarting because of Readiness exit 1 #361

Closed holms closed 4 years ago

holms commented 5 years ago

Chart version:

elasticsearch-7.4.1

Kubernetes version:

1.15.5-do.0

Kubernetes provider:

Digital Ocean K8s

Helm Version:

Not sure, the one which terraform is using in their provider.

helm get release output

REVISION: 1
RELEASED: Wed Nov  6 17:17:35 2019
CHART: elasticsearch-7.4.1
USER-SUPPLIED VALUES:
esJavaOpts: -Xmx1g -Xms1g
extraInitContainers: |
  - name: create
    image: busybox:1.28
    command: ['mkdir', '-p', '/usr/share/elasticsearch/data/nodes/']
    securityContext:
      runAsUser: 0
    volumeMounts:
     - mountPath: /usr/share/elasticsearch/data
       name: elasticsearch-master
  - name: file-permissions
    image: busybox:1.28
    command: ['chown', '-R', '1000:1000', '/usr/share/elasticsearch/']
    securityContext:
       runAsUser: 0
    volumeMounts:
     - mountPath: /usr/share/elasticsearch/data
       name: elasticsearch-master
nodeSelector:
  doks.digitalocean.com/node-pool: elasticsearch
readinessProbe:
  initialDelaySeconds: 200
resources:
  limits:
    cpu: 1000m
    memory: 2G
  requests:
    cpu: 100m
    memory: 2G
volumeClaimTemplate:
  accessModes:
  - ReadWriteOnce
  resources:
    requests:
      storage: 20G
  storageClassName: do-block-storage

COMPUTED VALUES:
antiAffinity: hard
antiAffinityTopologyKey: kubernetes.io/hostname
clusterHealthCheckParams: wait_for_status=green&timeout=1s
clusterName: elasticsearch
esConfig: {}
esJavaOpts: -Xmx1g -Xms1g
esMajorVersion: ""
extraEnvs: []
extraInitContainers: |
  - name: create
    image: busybox:1.28
    command: ['mkdir', '-p', '/usr/share/elasticsearch/data/nodes/']
    securityContext:
      runAsUser: 0
    volumeMounts:
     - mountPath: /usr/share/elasticsearch/data
       name: elasticsearch-master
  - name: file-permissions
    image: busybox:1.28
    command: ['chown', '-R', '1000:1000', '/usr/share/elasticsearch/']
    securityContext:
       runAsUser: 0
    volumeMounts:
     - mountPath: /usr/share/elasticsearch/data
       name: elasticsearch-master
extraVolumeMounts: ""
extraVolumes: ""
fsGroup: ""
fullnameOverride: ""
httpPort: 9200
image: docker.elastic.co/elasticsearch/elasticsearch
imagePullPolicy: IfNotPresent
imagePullSecrets: []
imageTag: 7.4.1
ingress:
  annotations: {}
  enabled: false
  hosts:
  - chart-example.local
  path: /
  tls: []
initResources: {}
keystore: []
labels: {}
lifecycle: {}
masterService: ""
masterTerminationFix: false
maxUnavailable: 1
minimumMasterNodes: 2
nameOverride: ""
networkHost: 0.0.0.0
nodeAffinity: {}
nodeGroup: master
nodeSelector:
  doks.digitalocean.com/node-pool: elasticsearch
persistence:
  annotations: {}
  enabled: true
podAnnotations: {}
podManagementPolicy: Parallel
podSecurityContext:
  fsGroup: 1000
  runAsUser: 1000
podSecurityPolicy:
  create: false
  name: ""
  spec:
    fsGroup:
      rule: RunAsAny
    privileged: true
    runAsUser:
      rule: RunAsAny
    seLinux:
      rule: RunAsAny
    supplementalGroups:
      rule: RunAsAny
    volumes:
    - secret
    - configMap
    - persistentVolumeClaim
priorityClassName: ""
protocol: http
rbac:
  create: false
  serviceAccountName: ""
readinessProbe:
  failureThreshold: 3
  initialDelaySeconds: 200
  periodSeconds: 10
  successThreshold: 3
  timeoutSeconds: 5
replicas: 3
resources:
  limits:
    cpu: 1000m
    memory: 2G
  requests:
    cpu: 100m
    memory: 2G
roles:
  data: "true"
  ingest: "true"
  master: "true"
schedulerName: ""
secretMounts: []
securityContext:
  capabilities:
    drop:
    - ALL
  runAsNonRoot: true
  runAsUser: 1000
service:
  annotations: {}
  httpPortName: http
  nodePort: ""
  transportPortName: transport
  type: ClusterIP
sidecarResources: {}
sysctlInitContainer:
  enabled: true
sysctlVmMaxMapCount: 262144
terminationGracePeriod: 120
tolerations: []
transportPort: 9300
updateStrategy: RollingUpdate
volumeClaimTemplate:
  accessModes:
  - ReadWriteOnce
  resources:
    requests:
      storage: 20G
  storageClassName: do-block-storage

HOOKS:
---
# elasticsearch-tncit-test
apiVersion: v1
kind: Pod
metadata:
  name: "elasticsearch-tncit-test"
  annotations:
    "helm.sh/hook": test-success
spec:
  containers:
  - name: "elasticsearch-myxea-test"
    image: "docker.elastic.co/elasticsearch/elasticsearch:7.4.1"
    command:
      - "sh"
      - "-c"
      - |
        #!/usr/bin/env bash -e
        curl -XGET --fail 'elasticsearch-master:9200/_cluster/health?wait_for_status=green&timeout=1s'
  restartPolicy: Never
MANIFEST:

---
# Source: elasticsearch/templates/poddisruptionbudget.yaml
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
  name: "elasticsearch-master-pdb"
spec:
  maxUnavailable: 1
  selector:
    matchLabels:
      app: "elasticsearch-master"
---
# Source: elasticsearch/templates/service.yaml
kind: Service
apiVersion: v1
metadata:
  name: elasticsearch-master
  labels:
    heritage: "Tiller"
    release: "elasticsearch"
    chart: "elasticsearch"
    app: "elasticsearch-master"
  annotations:
    {}

spec:
  type: ClusterIP
  selector:
    heritage: "Tiller"
    release: "elasticsearch"
    chart: "elasticsearch"
    app: "elasticsearch-master"
  ports:
  - name: http
    protocol: TCP
    port: 9200
  - name: transport
    protocol: TCP
    port: 9300
---
# Source: elasticsearch/templates/service.yaml
kind: Service
apiVersion: v1
metadata:
  name: elasticsearch-master-headless
  labels:
    heritage: "Tiller"
    release: "elasticsearch"
    chart: "elasticsearch"
    app: "elasticsearch-master"
  annotations:
    service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
  clusterIP: None # This is needed for statefulset hostnames like elasticsearch-0 to resolve
  # Create endpoints also if the related pod isn't ready
  publishNotReadyAddresses: true
  selector:
    app: "elasticsearch-master"
  ports:
  - name: http
    port: 9200
  - name: transport
    port: 9300
---
# Source: elasticsearch/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: elasticsearch-master
  labels:
    heritage: "Tiller"
    release: "elasticsearch"
    chart: "elasticsearch"
    app: "elasticsearch-master"
  annotations:
    esMajorVersion: "7"
spec:
  serviceName: elasticsearch-master-headless
  selector:
    matchLabels:
      app: "elasticsearch-master"
  replicas: 3
  podManagementPolicy: Parallel
  updateStrategy:
    type: RollingUpdate
  volumeClaimTemplates:
  - metadata:
      name: elasticsearch-master
    spec:
      accessModes:
      - ReadWriteOnce
      resources:
        requests:
          storage: 20G
      storageClassName: do-block-storage

  template:
    metadata:
      name: "elasticsearch-master"
      labels:
        heritage: "Tiller"
        release: "elasticsearch"
        chart: "elasticsearch"
        app: "elasticsearch-master"
      annotations:

    spec:
      securityContext:
        fsGroup: 1000
        runAsUser: 1000

      nodeSelector:
        doks.digitalocean.com/node-pool: elasticsearch

      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
          - labelSelector:
              matchExpressions:
              - key: app
                operator: In
                values:
                - "elasticsearch-master"
            topologyKey: kubernetes.io/hostname
      terminationGracePeriodSeconds: 120
      volumes:
      initContainers:
      - name: configure-sysctl
        securityContext:
          runAsUser: 0
          privileged: true
        image: "docker.elastic.co/elasticsearch/elasticsearch:7.4.1"
        command: ["sysctl", "-w", "vm.max_map_count=262144"]
        resources:
          {}

      - name: create
        image: busybox:1.28
        command: ['mkdir', '-p', '/usr/share/elasticsearch/data/nodes/']
        securityContext:
          runAsUser: 0
        volumeMounts:
         - mountPath: /usr/share/elasticsearch/data
           name: elasticsearch-master
      - name: file-permissions
        image: busybox:1.28
        command: ['chown', '-R', '1000:1000', '/usr/share/elasticsearch/']
        securityContext:
           runAsUser: 0
        volumeMounts:
         - mountPath: /usr/share/elasticsearch/data
           name: elasticsearch-master

      containers:
      - name: "elasticsearch"
        securityContext:
          capabilities:
            drop:
            - ALL
          runAsNonRoot: true
          runAsUser: 1000

        image: "docker.elastic.co/elasticsearch/elasticsearch:7.4.1"
        imagePullPolicy: "IfNotPresent"
        readinessProbe:
          failureThreshold: 3
          initialDelaySeconds: 200
          periodSeconds: 10
          successThreshold: 3
          timeoutSeconds: 5

          exec:
            command:
              - sh
              - -c
              - |
                #!/usr/bin/env bash -e
                # If the node is starting up wait for the cluster to be ready (request params: 'wait_for_status=green&timeout=1s' )
                # Once it has started only check that the node itself is responding
                START_FILE=/tmp/.es_start_file

                http () {
                    local path="${1}"
                    if [ -n "${ELASTIC_USERNAME}" ] && [ -n "${ELASTIC_PASSWORD}" ]; then
                      BASIC_AUTH="-u ${ELASTIC_USERNAME}:${ELASTIC_PASSWORD}"
                    else
                      BASIC_AUTH=''
                    fi
                    curl -XGET -s -k --fail ${BASIC_AUTH} http://127.0.0.1:9200${path}
                }

                if [ -f "${START_FILE}" ]; then
                    echo 'Elasticsearch is already running, lets check the node is healthy'
                    http "/"
                else
                    echo 'Waiting for elasticsearch cluster to become cluster to be ready (request params: "wait_for_status=green&timeout=1s" )'
                    if http "/_cluster/health?wait_for_status=green&timeout=1s" ; then
                        touch ${START_FILE}
                        exit 0
                    else
                        echo 'Cluster is not yet ready (request params: "wait_for_status=green&timeout=1s" )'
                        exit 1
                    fi
                fi
        ports:
        - name: http
          containerPort: 9200
        - name: transport
          containerPort: 9300
        resources:
          limits:
            cpu: 1000m
            memory: 2G
          requests:
            cpu: 100m
            memory: 2G

        env:
          - name: node.name
            valueFrom:
              fieldRef:
                fieldPath: metadata.name
          - name: cluster.initial_master_nodes
            value: "elasticsearch-master-0,elasticsearch-master-1,elasticsearch-master-2,"
          - name: discovery.seed_hosts
            value: "elasticsearch-master-headless"
          - name: cluster.name
            value: "elasticsearch"
          - name: network.host
            value: "0.0.0.0"
          - name: ES_JAVA_OPTS
            value: "-Xmx1g -Xms1g"
          - name: node.data
            value: "true"
          - name: node.ingest
            value: "true"
          - name: node.master
            value: "true"
        volumeMounts:
          - name: "elasticsearch-master"
            mountPath: /usr/share/elasticsearch/data

Describe the bug:

When pod starts I've got error that readiness failed, and then pod exists. Seems to be it's because your template contains exit 1 when readiness fails, but why? This forces to restart a pod and readiness never happens. The only way I could fix this is to increase readiness timeout up to 200 seconds.

Steps to reproduce:

  1. Deploy your chart with any config from examples
  2. Check logs of the pod which will tell you that readiness fails all over again.

Expected behavior:

AFAIK there's no need for exit 1 in readiness, pod should just hang there and readiness should retry after some period of time.

Provide logs and/or server output (if relevant):

OpenJDK 64-Bit Server VM warning: Option UseConcMarkSweepGC was deprecated in version 9.0 and will likely be removed in a future release.
{"type": "server", "timestamp": "2019-11-06T14:53:04,902Z", "level": "INFO", "component": "o.e.e.NodeEnvironment", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "using [1] data paths, mounts [[/usr/share/elasticsearch/data (/dev/disk/by-id/scsi-0DO_Volume_pvc-4d379d22-643b-49d4-becb-ab966508919e)]], net usable_space [16.6gb], net total_space [17.5gb], types [ext4]" }
{"type": "server", "timestamp": "2019-11-06T14:53:04,906Z", "level": "INFO", "component": "o.e.e.NodeEnvironment", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "heap size [1015.6mb], compressed ordinary object pointers [true]" }
{"type": "server", "timestamp": "2019-11-06T14:53:04,910Z", "level": "INFO", "component": "o.e.n.Node", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "node name [elasticsearch-master-0], node ID [Gi4HuSwOSxmwVip1J0jnoQ], cluster name [elasticsearch]" }
{"type": "server", "timestamp": "2019-11-06T14:53:04,912Z", "level": "INFO", "component": "o.e.n.Node", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "version[7.4.1], pid[1], build[default/docker/fc0eeb6e2c25915d63d871d344e3d0b45ea0ea1e/2019-10-22T17:16:35.176724Z], OS[Linux/4.19.0-0.bpo.6-amd64/amd64], JVM[AdoptOpenJDK/OpenJDK 64-Bit Server VM/13/13+33]" }
{"type": "server", "timestamp": "2019-11-06T14:53:04,913Z", "level": "INFO", "component": "o.e.n.Node", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "JVM home [/usr/share/elasticsearch/jdk]" }
{"type": "server", "timestamp": "2019-11-06T14:53:04,913Z", "level": "INFO", "component": "o.e.n.Node", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "JVM arguments [-Xms1g, -Xmx1g, -XX:+UseConcMarkSweepGC, -XX:CMSInitiatingOccupancyFraction=75, -XX:+UseCMSInitiatingOccupancyOnly, -Des.networkaddress.cache.ttl=60, -Des.networkaddress.cache.negative.ttl=10, -XX:+AlwaysPreTouch, -Xss1m, -Djava.awt.headless=true, -Dfile.encoding=UTF-8, -Djna.nosys=true, -XX:-OmitStackTraceInFastThrow, -Dio.netty.noUnsafe=true, -Dio.netty.noKeySetOptimization=true, -Dio.netty.recycler.maxCapacityPerThread=0, -Dio.netty.allocator.numDirectArenas=0, -Dlog4j.shutdownHookEnabled=false, -Dlog4j2.disable.jmx=true, -Djava.io.tmpdir=/tmp/elasticsearch-8666035286165417967, -XX:+HeapDumpOnOutOfMemoryError, -XX:HeapDumpPath=data, -XX:ErrorFile=logs/hs_err_pid%p.log, -Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m, -Djava.locale.providers=COMPAT, -Des.cgroups.hierarchy.override=/, -Xmx1g, -Xms1g, -Dio.netty.allocator.type=unpooled, -XX:MaxDirectMemorySize=536870912, -Des.path.home=/usr/share/elasticsearch, -Des.path.conf=/usr/share/elasticsearch/config, -Des.distribution.flavor=default, -Des.distribution.type=docker, -Des.bundled_jdk=true]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,298Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [aggs-matrix-stats]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,298Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [analysis-common]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,299Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [data-frame]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,299Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [flattened]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,299Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [frozen-indices]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,299Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [ingest-common]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,300Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [ingest-geoip]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,300Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [ingest-user-agent]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,300Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [lang-expression]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,300Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [lang-mustache]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,300Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [lang-painless]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,300Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [mapper-extras]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,301Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [parent-join]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,301Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [percolator]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,301Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [rank-eval]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,301Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [reindex]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,301Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [repository-url]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,302Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [search-business-rules]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,302Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [spatial]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,302Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [transport-netty4]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,302Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [vectors]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,302Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-analytics]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,303Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-ccr]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,303Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-core]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,303Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-deprecation]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,303Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-graph]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,304Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-ilm]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,304Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-logstash]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,304Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-ml]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,305Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-monitoring]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,305Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-rollup]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,305Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-security]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,305Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-sql]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,305Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-voting-only-node]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,306Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-watcher]" }
{"type": "server", "timestamp": "2019-11-06T14:53:10,306Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "no plugins loaded" }
{"type": "server", "timestamp": "2019-11-06T14:53:21,719Z", "level": "INFO", "component": "o.e.x.s.a.s.FileRolesStore", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "parsed [0] roles from file [/usr/share/elasticsearch/config/roles.yml]" }
{"type": "server", "timestamp": "2019-11-06T14:53:23,236Z", "level": "INFO", "component": "o.e.x.m.p.l.CppLogMessageHandler", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "[controller/87] [Main.cc@110] controller (64 bit): Version 7.4.1 (Build 973380bdacc5e8) Copyright (c) 2019 Elasticsearch BV" }
{"type": "server", "timestamp": "2019-11-06T14:53:24,639Z", "level": "DEBUG", "component": "o.e.a.ActionModule", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "Using REST wrapper from plugin org.elasticsearch.xpack.security.Security" }
{"type": "server", "timestamp": "2019-11-06T14:53:25,800Z", "level": "INFO", "component": "o.e.d.DiscoveryModule", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "using discovery type [zen] and seed hosts providers [settings]" }
{"type": "server", "timestamp": "2019-11-06T14:53:28,212Z", "level": "INFO", "component": "o.e.n.Node", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "initialized" }
{"type": "server", "timestamp": "2019-11-06T14:53:28,213Z", "level": "INFO", "component": "o.e.n.Node", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "starting ..." }
{"type": "server", "timestamp": "2019-11-06T14:53:28,542Z", "level": "INFO", "component": "o.e.t.TransportService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "publish_address {10.244.3.46:9300}, bound_addresses {[::]:9300}" }
{"type": "server", "timestamp": "2019-11-06T14:53:28,611Z", "level": "INFO", "component": "o.e.b.BootstrapChecks", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "bound or publishing to a non-loopback address, enforcing bootstrap checks" }
{"type": "server", "timestamp": "2019-11-06T14:53:28,947Z", "level": "INFO", "component": "o.e.c.c.Coordinator", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "setting initial configuration to VotingConfiguration{{bootstrap-placeholder}-elasticsearch-master-1,Gi4HuSwOSxmwVip1J0jnoQ,FntcS1PtQqmA07fuKdqf8Q}" }
{"type": "server", "timestamp": "2019-11-06T14:53:32,641Z", "level": "INFO", "component": "o.e.c.s.ClusterApplierService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "master node changed {previous [], current [{elasticsearch-master-1}{onfBHnjRSeWSvE07eqoIxg}{MjCRAJr7Rbu-1rkKqXPJXQ}{10.244.4.85}{10.244.4.85:9300}{dilm}{ml.machine_memory=1999998976, ml.max_open_jobs=20, xpack.installed=true}]}, added {{elasticsearch-master-1}{onfBHnjRSeWSvE07eqoIxg}{MjCRAJr7Rbu-1rkKqXPJXQ}{10.244.4.85}{10.244.4.85:9300}{dilm}{ml.machine_memory=1999998976, ml.max_open_jobs=20, xpack.installed=true},{elasticsearch-master-2}{FntcS1PtQqmA07fuKdqf8Q}{_3ildTSjSseZnoJHRiKW4w}{10.244.5.59}{10.244.5.59:9300}{dilm}{ml.machine_memory=1999998976, ml.max_open_jobs=20, xpack.installed=true},}, term: 1, version: 13, reason: ApplyCommitRequest{term=1, version=13, sourceNode={elasticsearch-master-1}{onfBHnjRSeWSvE07eqoIxg}{MjCRAJr7Rbu-1rkKqXPJXQ}{10.244.4.85}{10.244.4.85:9300}{dilm}{ml.machine_memory=1999998976, ml.max_open_jobs=20, xpack.installed=true}}" }
{"type": "server", "timestamp": "2019-11-06T14:53:32,720Z", "level": "INFO", "component": "o.e.x.s.a.TokenService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "refresh keys" }
{"type": "server", "timestamp": "2019-11-06T14:53:33,004Z", "level": "INFO", "component": "o.e.x.s.a.TokenService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "refreshed keys" }
{"type": "server", "timestamp": "2019-11-06T14:53:33,100Z", "level": "INFO", "component": "o.e.h.AbstractHttpServerTransport", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "publish_address {10.244.3.46:9200}, bound_addresses {[::]:9200}", "cluster.uuid": "5qGtycsKQ_mCV1VNl4Fyng", "node.id": "Gi4HuSwOSxmwVip1J0jnoQ"  }
{"type": "server", "timestamp": "2019-11-06T14:53:33,101Z", "level": "INFO", "component": "o.e.n.Node", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "started", "cluster.uuid": "5qGtycsKQ_mCV1VNl4Fyng", "node.id": "Gi4HuSwOSxmwVip1J0jnoQ"  }
{"type": "server", "timestamp": "2019-11-06T14:53:33,329Z", "level": "INFO", "component": "o.e.x.m.e.l.LocalExporter", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "waiting for elected master node [{elasticsearch-master-1}{onfBHnjRSeWSvE07eqoIxg}{MjCRAJr7Rbu-1rkKqXPJXQ}{10.244.4.85}{10.244.4.85:9300}{dilm}{ml.machine_memory=1999998976, ml.max_open_jobs=20, xpack.installed=true}] to setup local exporter [default_local] (does it have x-pack installed?)", "cluster.uuid": "5qGtycsKQ_mCV1VNl4Fyng", "node.id": "Gi4HuSwOSxmwVip1J0jnoQ"  }
{"type": "server", "timestamp": "2019-11-06T14:53:33,563Z", "level": "INFO", "component": "o.e.x.m.e.l.LocalExporter", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "waiting for elected master node [{elasticsearch-master-1}{onfBHnjRSeWSvE07eqoIxg}{MjCRAJr7Rbu-1rkKqXPJXQ}{10.244.4.85}{10.244.4.85:9300}{dilm}{ml.machine_memory=1999998976, ml.max_open_jobs=20, xpack.installed=true}] to setup local exporter [default_local] (does it have x-pack installed?)", "cluster.uuid": "5qGtycsKQ_mCV1VNl4Fyng", "node.id": "Gi4HuSwOSxmwVip1J0jnoQ"  }
{"type": "server", "timestamp": "2019-11-06T14:53:33,899Z", "level": "INFO", "component": "o.e.x.m.e.l.LocalExporter", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "waiting for elected master node [{elasticsearch-master-1}{onfBHnjRSeWSvE07eqoIxg}{MjCRAJr7Rbu-1rkKqXPJXQ}{10.244.4.85}{10.244.4.85:9300}{dilm}{ml.machine_memory=1999998976, ml.max_open_jobs=20, xpack.installed=true}] to setup local exporter [default_local] (does it have x-pack installed?)", "cluster.uuid": "5qGtycsKQ_mCV1VNl4Fyng", "node.id": "Gi4HuSwOSxmwVip1J0jnoQ"  }
{"type": "server", "timestamp": "2019-11-06T14:53:34,109Z", "level": "INFO", "component": "o.e.l.LicenseService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "license [baaa7636-5068-489a-9caa-ded29f12af51] mode [basic] - valid", "cluster.uuid": "5qGtycsKQ_mCV1VNl4Fyng", "node.id": "Gi4HuSwOSxmwVip1J0jnoQ"  }
{"type": "server", "timestamp": "2019-11-06T14:53:34,111Z", "level": "INFO", "component": "o.e.x.s.s.SecurityStatusChangeListener", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "Active license is now [BASIC]; Security is disabled", "cluster.uuid": "5qGtycsKQ_mCV1VNl4Fyng", "node.id": "Gi4HuSwOSxmwVip1J0jnoQ"  }
{"type": "server", "timestamp": "2019-11-06T14:53:34,121Z", "level": "INFO", "component": "o.e.x.m.e.l.LocalExporter", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "waiting for elected master node [{elasticsearch-master-1}{onfBHnjRSeWSvE07eqoIxg}{MjCRAJr7Rbu-1rkKqXPJXQ}{10.244.4.85}{10.244.4.85:9300}{dilm}{ml.machine_memory=1999998976, ml.max_open_jobs=20, xpack.installed=true}] to setup local exporter [default_local] (does it have x-pack installed?)", "cluster.uuid": "5qGtycsKQ_mCV1VNl4Fyng", "node.id": "Gi4HuSwOSxmwVip1J0jnoQ"  }
{"type": "server", "timestamp": "2019-11-06T14:53:34,798Z", "level": "INFO", "component": "o.e.x.m.e.l.LocalExporter", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "waiting for elected master node [{elasticsearch-master-1}{onfBHnjRSeWSvE07eqoIxg}{MjCRAJr7Rbu-1rkKqXPJXQ}{10.244.4.85}{10.244.4.85:9300}{dilm}{ml.machine_memory=1999998976, ml.max_open_jobs=20, xpack.installed=true}] to setup local exporter [default_local] (does it have x-pack installed?)", "cluster.uuid": "5qGtycsKQ_mCV1VNl4Fyng", "node.id": "Gi4HuSwOSxmwVip1J0jnoQ"  }
fatmcgav commented 5 years ago

@holms Thank-you for raising this issue.

From what I can see based on the above logs, it appears that the the Elasticsearch service is taking more than 30 seconds to start up, which is why the pod is failing the readinessProbe check.

Would you be able to provide the output from kubectl describe nodes so we can get an idea of the make-up of the nodes being used in this cluster?

WRT the behaviour of issuing an exit 1 at the end of the readinessProbe command; this is intentional, as this is how Kubernetes knows whether a pod is ready to serve traffic or not. Further details on the expected behaviour can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes

Combined with the additional readinessProbe configuration that defines the initial delay for checking readiness (initialDelaySeconds), how often to check for readiness (periodSeconds) and the number of times to check for readiness (failureThreshold) this means that, with the default configuration, if the readinessProbe command doesn't succeed on the 3rd attempt, which translates to a ~30 seconds pod lifetime, then the pod will be terminated by Kubernetes and re-created.

In order to prevent this pod termination, there are a couple of options:

  1. Resolve the issue that is causing elasticsearch to take more than 30 seconds to become ready.
  2. Tweak the readinessProbe configuration to allow more time for elasticsearch to become healthy, either by adding a bigger initialDelaySeconds, trying more times by increasing the failureThreshold value or checking less frequently by increasing the periodSeconds.

It's worth adding that the default values shipped with this chart should be considered just that; defaults that may need to be tweaked to better fit the environment within which the chart is being deployed.

Let me know if any of the above isn't clear, or if we can assist you further.

holms commented 5 years ago

@fatmcgav

I've increased node pool for ES to 2vcpu and 4gb RAM, still increasing interval was necessary.

Name:               elasticsearch-eqmo
Roles:              <none>
Labels:             beta.kubernetes.io/arch=amd64
                    beta.kubernetes.io/instance-type=s-2vcpu-4gb
                    beta.kubernetes.io/os=linux
                    doks.digitalocean.com/node-id=9d6cc7f6-da95-423c-9219-e05b2aa813f2
                    doks.digitalocean.com/node-pool=elasticsearch
                    doks.digitalocean.com/node-pool-id=111efabc-7649-40eb-b06d-997f047fc6c7
                    doks.digitalocean.com/version=1.15.5-do.0
                    failure-domain.beta.kubernetes.io/region=fra1
                    kubernetes.io/arch=amd64
                    kubernetes.io/hostname=elasticsearch-eqmo
                    kubernetes.io/os=linux
                    region=fra1
Annotations:        csi.volume.kubernetes.io/nodeid: {"dobs.csi.digitalocean.com":"165833085"}
                    io.cilium.network.ipv4-cilium-host: 10.244.5.1
                    io.cilium.network.ipv4-health-ip: 10.244.5.120
                    io.cilium.network.ipv4-pod-cidr: 10.244.5.0/24
                    node.alpha.kubernetes.io/ttl: 0
                    volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp:  Wed, 06 Nov 2019 16:44:37 +0200
Taints:             <none>
Unschedulable:      false
Conditions:
  Type                 Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
  ----                 ------  -----------------                 ------------------                ------                       -------
  NetworkUnavailable   False   Wed, 06 Nov 2019 16:44:45 +0200   Wed, 06 Nov 2019 16:44:45 +0200   CiliumIsUp                   Cilium is running on this node
  MemoryPressure       False   Tue, 12 Nov 2019 01:45:13 +0200   Wed, 06 Nov 2019 16:44:36 +0200   KubeletHasSufficientMemory   kubelet has sufficient memory available
  DiskPressure         False   Tue, 12 Nov 2019 01:45:13 +0200   Wed, 06 Nov 2019 16:44:36 +0200   KubeletHasNoDiskPressure     kubelet has no disk pressure
  PIDPressure          False   Tue, 12 Nov 2019 01:45:13 +0200   Wed, 06 Nov 2019 16:44:36 +0200   KubeletHasSufficientPID      kubelet has sufficient PID available
  Ready                True    Tue, 12 Nov 2019 01:45:13 +0200   Wed, 06 Nov 2019 16:44:47 +0200   KubeletReady                 kubelet is posting ready status
Addresses:
  Hostname:    elasticsearch-eqmo
  InternalIP:  10.135.206.117
  ExternalIP:  134.209.232.251
Capacity:
 attachable-volumes-csi-dobs.csi.digitalocean.com:  7
 cpu:                                               2
 ephemeral-storage:                                 82535812Ki
 hugepages-1Gi:                                     0
 hugepages-2Mi:                                     0
 memory:                                            4041632Ki
 pods:                                              110
Allocatable:
 attachable-volumes-csi-dobs.csi.digitalocean.com:  7
 cpu:                                               2
 ephemeral-storage:                                 82535812Ki
 hugepages-1Gi:                                     0
 hugepages-2Mi:                                     0
 memory:                                            3110Mi
 pods:                                              110
System Info:
 Machine ID:                 50652cec04e3424893c0c2739ea3ba91
 System UUID:                50652cec-04e3-4248-93c0-c2739ea3ba91
 Boot ID:                    d01bca30-6cf4-4ccd-8e27-9d596d98ffb9
 Kernel Version:             4.19.0-0.bpo.6-amd64
 OS Image:                   Debian GNU/Linux 9 (stretch)
 Operating System:           linux
 Architecture:               amd64
 Container Runtime Version:  docker://18.9.2
 Kubelet Version:            v1.15.5
 Kube-Proxy Version:         v1.15.5
PodCIDR:                     10.244.5.0/24
ProviderID:                  digitalocean://165833085
Non-terminated Pods:         (6 in total)
  Namespace                  Name                       CPU Requests  CPU Limits  Memory Requests  Memory Limits  AGE
  ---------                  ----                       ------------  ----------  ---------------  -------------  ---
  default                    spotify-docker-gc-nbbsn    0 (0%)        0 (0%)      0 (0%)           0 (0%)         5d9h
  elasticsearch              elasticsearch-master-2     100m (5%)     1 (50%)     2G (61%)         2G (61%)       5d8h
  kube-system                cilium-w2nc5               300m (15%)    0 (0%)      300Mi (9%)       0 (0%)         5d9h
  kube-system                csi-do-node-mm7cn          0 (0%)        0 (0%)      70Mi (2%)        0 (0%)         5d9h
  kube-system                do-node-agent-s6mv8        102m (5%)     102m (5%)   80Mi (2%)        100Mi (3%)     5d9h
  kube-system                kube-proxy-6dnlj           0 (0%)        0 (0%)      125Mi (4%)       0 (0%)         5d9h
Allocated resources:
  (Total limits may be over 100 percent, i.e., overcommitted.)
  Resource                                          Requests          Limits
  --------                                          --------          ------
  cpu                                               502m (25%)        1102m (55%)
  memory                                            2602931200 (79%)  2104857600 (64%)
  ephemeral-storage                                 0 (0%)            0 (0%)
  attachable-volumes-csi-dobs.csi.digitalocean.com  0                 0
Events:                                             <none>

Name:               elasticsearch-eqmx
Roles:              <none>
Labels:             beta.kubernetes.io/arch=amd64
                    beta.kubernetes.io/instance-type=s-2vcpu-4gb
                    beta.kubernetes.io/os=linux
                    doks.digitalocean.com/node-id=1e0362f1-3315-4a7f-9b91-877669f8a5ca
                    doks.digitalocean.com/node-pool=elasticsearch
                    doks.digitalocean.com/node-pool-id=111efabc-7649-40eb-b06d-997f047fc6c7
                    doks.digitalocean.com/version=1.15.5-do.0
                    failure-domain.beta.kubernetes.io/region=fra1
                    kubernetes.io/arch=amd64
                    kubernetes.io/hostname=elasticsearch-eqmx
                    kubernetes.io/os=linux
                    region=fra1
Annotations:        csi.volume.kubernetes.io/nodeid: {"dobs.csi.digitalocean.com":"165833086"}
                    io.cilium.network.ipv4-cilium-host: 10.244.4.1
                    io.cilium.network.ipv4-health-ip: 10.244.4.75
                    io.cilium.network.ipv4-pod-cidr: 10.244.4.0/24
                    node.alpha.kubernetes.io/ttl: 0
                    volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp:  Wed, 06 Nov 2019 16:44:24 +0200
Taints:             <none>
Unschedulable:      false
Conditions:
  Type                 Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
  ----                 ------  -----------------                 ------------------                ------                       -------
  NetworkUnavailable   False   Thu, 07 Nov 2019 00:47:02 +0200   Thu, 07 Nov 2019 00:47:02 +0200   CiliumIsUp                   Cilium is running on this node
  MemoryPressure       False   Tue, 12 Nov 2019 01:45:32 +0200   Thu, 07 Nov 2019 00:46:21 +0200   KubeletHasSufficientMemory   kubelet has sufficient memory available
  DiskPressure         False   Tue, 12 Nov 2019 01:45:32 +0200   Thu, 07 Nov 2019 00:46:21 +0200   KubeletHasNoDiskPressure     kubelet has no disk pressure
  PIDPressure          False   Tue, 12 Nov 2019 01:45:32 +0200   Thu, 07 Nov 2019 00:46:21 +0200   KubeletHasSufficientPID      kubelet has sufficient PID available
  Ready                True    Tue, 12 Nov 2019 01:45:32 +0200   Thu, 07 Nov 2019 00:46:21 +0200   KubeletReady                 kubelet is posting ready status
Addresses:
  Hostname:    elasticsearch-eqmx
  InternalIP:  10.135.105.220
  ExternalIP:  167.172.164.54
Capacity:
 attachable-volumes-csi-dobs.csi.digitalocean.com:  7
 cpu:                                               2
 ephemeral-storage:                                 82535812Ki
 hugepages-1Gi:                                     0
 hugepages-2Mi:                                     0
 memory:                                            4041632Ki
 pods:                                              110
Allocatable:
 attachable-volumes-csi-dobs.csi.digitalocean.com:  7
 cpu:                                               2
 ephemeral-storage:                                 82535812Ki
 hugepages-1Gi:                                     0
 hugepages-2Mi:                                     0
 memory:                                            3184624Ki
 pods:                                              110
System Info:
 Machine ID:                 b2ecb62f130f434fa2bbdb9094f1cf0c
 System UUID:                8aa19372-bb34-4f97-b938-bf5c91639558
 Boot ID:                    4df3a84f-1f8e-463f-9e96-44594d0dfd38
 Kernel Version:             4.19.0-0.bpo.6-amd64
 OS Image:                   Debian GNU/Linux 9 (stretch)
 Operating System:           linux
 Architecture:               amd64
 Container Runtime Version:  docker://18.9.2
 Kubelet Version:            v1.15.5
 Kube-Proxy Version:         v1.15.5
PodCIDR:                     10.244.4.0/24
ProviderID:                  digitalocean://165833086
Non-terminated Pods:         (6 in total)
  Namespace                  Name                       CPU Requests  CPU Limits  Memory Requests  Memory Limits  AGE
  ---------                  ----                       ------------  ----------  ---------------  -------------  ---
  default                    spotify-docker-gc-jm7q5    0 (0%)        0 (0%)      0 (0%)           0 (0%)         5d9h
  elasticsearch              elasticsearch-master-0     100m (5%)     1 (50%)     2G (61%)         2G (61%)       5d
  kube-system                cilium-hprd2               300m (15%)    0 (0%)      300Mi (9%)       0 (0%)         5d9h
  kube-system                csi-do-node-ksb9h          0 (0%)        0 (0%)      70Mi (2%)        0 (0%)         5d9h
  kube-system                do-node-agent-scp87        102m (5%)     102m (5%)   80Mi (2%)        100Mi (3%)     5d9h
  kube-system                kube-proxy-7ljlp           0 (0%)        0 (0%)      125Mi (4%)       0 (0%)         5d9h
Allocated resources:
  (Total limits may be over 100 percent, i.e., overcommitted.)
  Resource                                          Requests          Limits
  --------                                          --------          ------
  cpu                                               502m (25%)        1102m (55%)
  memory                                            2602931200 (79%)  2104857600 (64%)
  ephemeral-storage                                 0 (0%)            0 (0%)
  attachable-volumes-csi-dobs.csi.digitalocean.com  0                 0
Events:                                             <none>

Name:               elasticsearch-eqmy
Roles:              <none>
Labels:             beta.kubernetes.io/arch=amd64
                    beta.kubernetes.io/instance-type=s-2vcpu-4gb
                    beta.kubernetes.io/os=linux
                    doks.digitalocean.com/node-id=037ade00-11a1-4ac0-b7d2-7a355cb097b1
                    doks.digitalocean.com/node-pool=elasticsearch
                    doks.digitalocean.com/node-pool-id=111efabc-7649-40eb-b06d-997f047fc6c7
                    doks.digitalocean.com/version=1.15.5-do.0
                    failure-domain.beta.kubernetes.io/region=fra1
                    kubernetes.io/arch=amd64
                    kubernetes.io/hostname=elasticsearch-eqmy
                    kubernetes.io/os=linux
                    region=fra1
Annotations:        csi.volume.kubernetes.io/nodeid: {"dobs.csi.digitalocean.com":"165833087"}
                    io.cilium.network.ipv4-cilium-host: 10.244.3.1
                    io.cilium.network.ipv4-health-ip: 10.244.3.34
                    io.cilium.network.ipv4-pod-cidr: 10.244.3.0/24
                    node.alpha.kubernetes.io/ttl: 0
                    volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp:  Wed, 06 Nov 2019 16:44:01 +0200
Taints:             <none>
Unschedulable:      false
Conditions:
  Type                 Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
  ----                 ------  -----------------                 ------------------                ------                       -------
  NetworkUnavailable   False   Wed, 06 Nov 2019 16:44:11 +0200   Wed, 06 Nov 2019 16:44:11 +0200   CiliumIsUp                   Cilium is running on this node
  MemoryPressure       False   Tue, 12 Nov 2019 01:45:27 +0200   Wed, 06 Nov 2019 16:44:01 +0200   KubeletHasSufficientMemory   kubelet has sufficient memory available
  DiskPressure         False   Tue, 12 Nov 2019 01:45:27 +0200   Wed, 06 Nov 2019 16:44:01 +0200   KubeletHasNoDiskPressure     kubelet has no disk pressure
  PIDPressure          False   Tue, 12 Nov 2019 01:45:27 +0200   Wed, 06 Nov 2019 16:44:01 +0200   KubeletHasSufficientPID      kubelet has sufficient PID available
  Ready                True    Tue, 12 Nov 2019 01:45:27 +0200   Wed, 06 Nov 2019 16:44:21 +0200   KubeletReady                 kubelet is posting ready status
Addresses:
  Hostname:    elasticsearch-eqmy
  InternalIP:  10.135.120.68
  ExternalIP:  178.128.195.191
Capacity:
 attachable-volumes-csi-dobs.csi.digitalocean.com:  7
 cpu:                                               2
 ephemeral-storage:                                 82535812Ki
 hugepages-1Gi:                                     0
 hugepages-2Mi:                                     0
 memory:                                            4041632Ki
 pods:                                              110
Allocatable:
 attachable-volumes-csi-dobs.csi.digitalocean.com:  7
 cpu:                                               2
 ephemeral-storage:                                 82535812Ki
 hugepages-1Gi:                                     0
 hugepages-2Mi:                                     0
 memory:                                            3110Mi
 pods:                                              110
System Info:
 Machine ID:                 cb2ba44c6ee748dfa37182ee15718eb3
 System UUID:                cb2ba44c-6ee7-48df-a371-82ee15718eb3
 Boot ID:                    f84b7aef-e84f-47d1-bd8b-c6b61b2280e5
 Kernel Version:             4.19.0-0.bpo.6-amd64
 OS Image:                   Debian GNU/Linux 9 (stretch)
 Operating System:           linux
 Architecture:               amd64
 Container Runtime Version:  docker://18.9.2
 Kubelet Version:            v1.15.5
 Kube-Proxy Version:         v1.15.5
PodCIDR:                     10.244.3.0/24
ProviderID:                  digitalocean://165833087
Non-terminated Pods:         (6 in total)
  Namespace                  Name                       CPU Requests  CPU Limits  Memory Requests  Memory Limits  AGE
  ---------                  ----                       ------------  ----------  ---------------  -------------  ---
  default                    spotify-docker-gc-hzjgx    0 (0%)        0 (0%)      0 (0%)           0 (0%)         5d9h
  elasticsearch              elasticsearch-master-1     100m (5%)     1 (50%)     2G (61%)         2G (61%)       5d8h
  kube-system                cilium-98f7v               300m (15%)    0 (0%)      300Mi (9%)       0 (0%)         5d9h
  kube-system                csi-do-node-tzg4r          0 (0%)        0 (0%)      70Mi (2%)        0 (0%)         5d9h
  kube-system                do-node-agent-nr8r4        102m (5%)     102m (5%)   80Mi (2%)        100Mi (3%)     5d9h
  kube-system                kube-proxy-jx75n           0 (0%)        0 (0%)      125Mi (4%)       0 (0%)         5d9h
Allocated resources:
  (Total limits may be over 100 percent, i.e., overcommitted.)
  Resource                                          Requests          Limits
  --------                                          --------          ------
  cpu                                               502m (25%)        1102m (55%)
  memory                                            2602931200 (79%)  2104857600 (64%)
  ephemeral-storage                                 0 (0%)            0 (0%)
  attachable-volumes-csi-dobs.csi.digitalocean.com  0                 0
Events:                                             <none>

Name:               prebid-server-pool-g5v0
Roles:              <none>
Labels:             beta.kubernetes.io/arch=amd64
                    beta.kubernetes.io/instance-type=s-2vcpu-4gb
                    beta.kubernetes.io/os=linux
                    doks.digitalocean.com/node-id=c35e39fa-d51d-4b1a-9485-4a3184afec5a
                    doks.digitalocean.com/node-pool=prebid-server-pool
                    doks.digitalocean.com/node-pool-id=03b7eda4-1206-402b-b0f2-75d672976afb
                    doks.digitalocean.com/version=1.15.5-do.0
                    failure-domain.beta.kubernetes.io/region=fra1
                    kubernetes.io/arch=amd64
                    kubernetes.io/hostname=prebid-server-pool-g5v0
                    kubernetes.io/os=linux
                    region=fra1
Annotations:        csi.volume.kubernetes.io/nodeid: {"dobs.csi.digitalocean.com":"164929545"}
                    io.cilium.network.ipv4-cilium-host: 10.244.0.1
                    io.cilium.network.ipv4-health-ip: 10.244.0.188
                    io.cilium.network.ipv4-pod-cidr: 10.244.0.0/24
                    node.alpha.kubernetes.io/ttl: 0
                    volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp:  Wed, 30 Oct 2019 11:54:28 +0200
Taints:             <none>
Unschedulable:      false
Conditions:
  Type                 Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
  ----                 ------  -----------------                 ------------------                ------                       -------
  NetworkUnavailable   False   Wed, 30 Oct 2019 11:54:36 +0200   Wed, 30 Oct 2019 11:54:36 +0200   CiliumIsUp                   Cilium is running on this node
  MemoryPressure       False   Tue, 12 Nov 2019 01:44:59 +0200   Wed, 30 Oct 2019 11:54:28 +0200   KubeletHasSufficientMemory   kubelet has sufficient memory available
  DiskPressure         False   Tue, 12 Nov 2019 01:44:59 +0200   Wed, 30 Oct 2019 11:54:28 +0200   KubeletHasNoDiskPressure     kubelet has no disk pressure
  PIDPressure          False   Tue, 12 Nov 2019 01:44:59 +0200   Wed, 30 Oct 2019 11:54:28 +0200   KubeletHasSufficientPID      kubelet has sufficient PID available
  Ready                True    Tue, 12 Nov 2019 01:44:59 +0200   Wed, 30 Oct 2019 11:54:38 +0200   KubeletReady                 kubelet is posting ready status
Addresses:
  Hostname:    prebid-server-pool-g5v0
  InternalIP:  10.135.69.84
  ExternalIP:  165.22.67.137
Capacity:
 attachable-volumes-csi-dobs.csi.digitalocean.com:  7
 cpu:                                               2
 ephemeral-storage:                                 82535812Ki
 hugepages-1Gi:                                     0
 hugepages-2Mi:                                     0
 memory:                                            4041632Ki
 pods:                                              110
Allocatable:
 attachable-volumes-csi-dobs.csi.digitalocean.com:  7
 cpu:                                               2
 ephemeral-storage:                                 82535812Ki
 hugepages-1Gi:                                     0
 hugepages-2Mi:                                     0
 memory:                                            3110Mi
 pods:                                              110
System Info:
 Machine ID:                 b197673de1cb46209dc878820d766f1d
 System UUID:                b197673d-e1cb-4620-9dc8-78820d766f1d
 Boot ID:                    a7e411e1-c042-4186-8b07-c1cf86528913
 Kernel Version:             4.19.0-0.bpo.6-amd64
 OS Image:                   Debian GNU/Linux 9 (stretch)
 Operating System:           linux
 Architecture:               amd64
 Container Runtime Version:  docker://18.9.2
 Kubelet Version:            v1.15.5
 Kube-Proxy Version:         v1.15.5
PodCIDR:                     10.244.0.0/24
ProviderID:                  digitalocean://164929545
Non-terminated Pods:         (10 in total)
  Namespace                  Name                                     CPU Requests  CPU Limits  Memory Requests  Memory Limits  AGE
  ---------                  ----                                     ------------  ----------  ---------------  -------------  ---
  default                    prebid-server-7c44c5df99-lzv6g           0 (0%)        0 (0%)      0 (0%)           0 (0%)         12d
  default                    spotify-docker-gc-g2hh2                  0 (0%)        0 (0%)      0 (0%)           0 (0%)         7d1h
  kube-system                cilium-operator-78d9dd6447-68rp2         0 (0%)        0 (0%)      0 (0%)           0 (0%)         12d
  kube-system                cilium-xz2sf                             300m (15%)    0 (0%)      300Mi (9%)       0 (0%)         12d
  kube-system                coredns-cf4ff8c4-cfzpz                   100m (5%)     0 (0%)      70Mi (2%)        170Mi (5%)     12d
  kube-system                coredns-cf4ff8c4-g6dcj                   100m (5%)     0 (0%)      70Mi (2%)        170Mi (5%)     12d
  kube-system                csi-do-node-jfw5x                        0 (0%)        0 (0%)      70Mi (2%)        0 (0%)         12d
  kube-system                do-node-agent-4vhrz                      102m (5%)     102m (5%)   80Mi (2%)        100Mi (3%)     12d
  kube-system                kube-proxy-dfw7k                         0 (0%)        0 (0%)      125Mi (4%)       0 (0%)         12d
  kube-system                kubelet-rubber-stamp-76bd547767-b4pch    0 (0%)        0 (0%)      0 (0%)           0 (0%)         12d
Allocated resources:
  (Total limits may be over 100 percent, i.e., overcommitted.)
  Resource                                          Requests     Limits
  --------                                          --------     ------
  cpu                                               602m (30%)   102m (5%)
  memory                                            715Mi (22%)  440Mi (14%)
  ephemeral-storage                                 0 (0%)       0 (0%)
  attachable-volumes-csi-dobs.csi.digitalocean.com  0            0
Events:                                             <none>

Name:               prebid-server-pool-g5v1
Roles:              <none>
Labels:             beta.kubernetes.io/arch=amd64
                    beta.kubernetes.io/instance-type=s-2vcpu-4gb
                    beta.kubernetes.io/os=linux
                    doks.digitalocean.com/node-id=7ba37b1a-2c05-4b81-af63-a5ea045b2d00
                    doks.digitalocean.com/node-pool=prebid-server-pool
                    doks.digitalocean.com/node-pool-id=03b7eda4-1206-402b-b0f2-75d672976afb
                    doks.digitalocean.com/version=1.15.5-do.0
                    failure-domain.beta.kubernetes.io/region=fra1
                    kubernetes.io/arch=amd64
                    kubernetes.io/hostname=prebid-server-pool-g5v1
                    kubernetes.io/os=linux
                    region=fra1
Annotations:        csi.volume.kubernetes.io/nodeid: {"dobs.csi.digitalocean.com":"164929544"}
                    io.cilium.network.ipv4-cilium-host: 10.244.1.1
                    io.cilium.network.ipv4-health-ip: 10.244.1.85
                    io.cilium.network.ipv4-pod-cidr: 10.244.1.0/24
                    node.alpha.kubernetes.io/ttl: 0
                    volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp:  Wed, 30 Oct 2019 11:54:38 +0200
Taints:             <none>
Unschedulable:      false
Conditions:
  Type                 Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
  ----                 ------  -----------------                 ------------------                ------                       -------
  NetworkUnavailable   False   Wed, 30 Oct 2019 11:54:45 +0200   Wed, 30 Oct 2019 11:54:45 +0200   CiliumIsUp                   Cilium is running on this node
  MemoryPressure       False   Tue, 12 Nov 2019 01:45:32 +0200   Wed, 30 Oct 2019 11:54:37 +0200   KubeletHasSufficientMemory   kubelet has sufficient memory available
  DiskPressure         False   Tue, 12 Nov 2019 01:45:32 +0200   Wed, 30 Oct 2019 11:54:37 +0200   KubeletHasNoDiskPressure     kubelet has no disk pressure
  PIDPressure          False   Tue, 12 Nov 2019 01:45:32 +0200   Wed, 30 Oct 2019 11:54:37 +0200   KubeletHasSufficientPID      kubelet has sufficient PID available
  Ready                True    Tue, 12 Nov 2019 01:45:32 +0200   Wed, 30 Oct 2019 11:54:48 +0200   KubeletReady                 kubelet is posting ready status
Addresses:
  Hostname:    prebid-server-pool-g5v1
  InternalIP:  10.135.70.114
  ExternalIP:  167.71.62.15
Capacity:
 attachable-volumes-csi-dobs.csi.digitalocean.com:  7
 cpu:                                               2
 ephemeral-storage:                                 82535812Ki
 hugepages-1Gi:                                     0
 hugepages-2Mi:                                     0
 memory:                                            4041632Ki
 pods:                                              110
Allocatable:
 attachable-volumes-csi-dobs.csi.digitalocean.com:  7
 cpu:                                               2
 ephemeral-storage:                                 82535812Ki
 hugepages-1Gi:                                     0
 hugepages-2Mi:                                     0
 memory:                                            3110Mi
 pods:                                              110
System Info:
 Machine ID:                 69d6a5947b654ca4b06e6a0605d44019
 System UUID:                69d6a594-7b65-4ca4-b06e-6a0605d44019
 Boot ID:                    9da784b7-0887-4daa-b7a4-af79ac67627d
 Kernel Version:             4.19.0-0.bpo.6-amd64
 OS Image:                   Debian GNU/Linux 9 (stretch)
 Operating System:           linux
 Architecture:               amd64
 Container Runtime Version:  docker://18.9.2
 Kubelet Version:            v1.15.5
 Kube-Proxy Version:         v1.15.5
PodCIDR:                     10.244.1.0/24
ProviderID:                  digitalocean://164929544
Non-terminated Pods:         (9 in total)
  Namespace                  Name                                              CPU Requests  CPU Limits  Memory Requests  Memory Limits  AGE
  ---------                  ----                                              ------------  ----------  ---------------  -------------  ---
  default                    nginx-ingress-default-backend-576b86996d-pkp55    0 (0%)        0 (0%)      0 (0%)           0 (0%)         12d
  default                    prebid-server-7c44c5df99-bkjmw                    0 (0%)        0 (0%)      0 (0%)           0 (0%)         12d
  default                    spotify-docker-gc-hhm24                           0 (0%)        0 (0%)      0 (0%)           0 (0%)         7d1h
  elasticsearch              kibana-kibana-7b9c744857-j8cg6                    100m (5%)     1 (50%)     500Mi (16%)      1Gi (32%)      5d7h
  kube-system                cilium-pfdd4                                      300m (15%)    0 (0%)      300Mi (9%)       0 (0%)         12d
  kube-system                csi-do-node-lkc5m                                 0 (0%)        0 (0%)      70Mi (2%)        0 (0%)         12d
  kube-system                do-node-agent-dss84                               102m (5%)     102m (5%)   80Mi (2%)        100Mi (3%)     12d
  kube-system                kube-proxy-dtpfk                                  0 (0%)        0 (0%)      125Mi (4%)       0 (0%)         12d
  kube-system                tiller-deploy-7bf78cdbf7-nvq4g                    0 (0%)        0 (0%)      0 (0%)           0 (0%)         12d
Allocated resources:
  (Total limits may be over 100 percent, i.e., overcommitted.)
  Resource                                          Requests      Limits
  --------                                          --------      ------
  cpu                                               502m (25%)    1102m (55%)
  memory                                            1075Mi (34%)  1124Mi (36%)
  ephemeral-storage                                 0 (0%)        0 (0%)
  attachable-volumes-csi-dobs.csi.digitalocean.com  0             0
Events:                                             <none>

Name:               prebid-server-pool-g5vd
Roles:              <none>
Labels:             beta.kubernetes.io/arch=amd64
                    beta.kubernetes.io/instance-type=s-2vcpu-4gb
                    beta.kubernetes.io/os=linux
                    doks.digitalocean.com/node-id=63d961b9-e585-49a6-a5f2-26485826c5dd
                    doks.digitalocean.com/node-pool=prebid-server-pool
                    doks.digitalocean.com/node-pool-id=03b7eda4-1206-402b-b0f2-75d672976afb
                    doks.digitalocean.com/version=1.15.5-do.0
                    failure-domain.beta.kubernetes.io/region=fra1
                    kubernetes.io/arch=amd64
                    kubernetes.io/hostname=prebid-server-pool-g5vd
                    kubernetes.io/os=linux
                    region=fra1
Annotations:        csi.volume.kubernetes.io/nodeid: {"dobs.csi.digitalocean.com":"164929546"}
                    io.cilium.network.ipv4-cilium-host: 10.244.2.1
                    io.cilium.network.ipv4-health-ip: 10.244.2.178
                    io.cilium.network.ipv4-pod-cidr: 10.244.2.0/24
                    node.alpha.kubernetes.io/ttl: 0
                    volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp:  Wed, 30 Oct 2019 11:54:49 +0200
Taints:             <none>
Unschedulable:      false
Conditions:
  Type                 Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
  ----                 ------  -----------------                 ------------------                ------                       -------
  NetworkUnavailable   False   Wed, 30 Oct 2019 11:55:02 +0200   Wed, 30 Oct 2019 11:55:02 +0200   CiliumIsUp                   Cilium is running on this node
  MemoryPressure       False   Tue, 12 Nov 2019 01:45:01 +0200   Wed, 30 Oct 2019 11:54:49 +0200   KubeletHasSufficientMemory   kubelet has sufficient memory available
  DiskPressure         False   Tue, 12 Nov 2019 01:45:01 +0200   Wed, 30 Oct 2019 11:54:49 +0200   KubeletHasNoDiskPressure     kubelet has no disk pressure
  PIDPressure          False   Tue, 12 Nov 2019 01:45:01 +0200   Wed, 30 Oct 2019 11:54:49 +0200   KubeletHasSufficientPID      kubelet has sufficient PID available
  Ready                True    Tue, 12 Nov 2019 01:45:01 +0200   Wed, 30 Oct 2019 11:55:09 +0200   KubeletReady                 kubelet is posting ready status
Addresses:
  Hostname:    prebid-server-pool-g5vd
  InternalIP:  10.135.97.136
  ExternalIP:  167.172.160.17
Capacity:
 attachable-volumes-csi-dobs.csi.digitalocean.com:  7
 cpu:                                               2
 ephemeral-storage:                                 82535812Ki
 hugepages-1Gi:                                     0
 hugepages-2Mi:                                     0
 memory:                                            4041632Ki
 pods:                                              110
Allocatable:
 attachable-volumes-csi-dobs.csi.digitalocean.com:  7
 cpu:                                               2
 ephemeral-storage:                                 82535812Ki
 hugepages-1Gi:                                     0
 hugepages-2Mi:                                     0
 memory:                                            3110Mi
 pods:                                              110
System Info:
 Machine ID:                 2761bbcb9ec848c3aab4f389a095f7ce
 System UUID:                2761bbcb-9ec8-48c3-aab4-f389a095f7ce
 Boot ID:                    ceead340-efb7-47ef-bbb2-d7def6d07e9d
 Kernel Version:             4.19.0-0.bpo.6-amd64
 OS Image:                   Debian GNU/Linux 9 (stretch)
 Operating System:           linux
 Architecture:               amd64
 Container Runtime Version:  docker://18.9.2
 Kubelet Version:            v1.15.5
 Kube-Proxy Version:         v1.15.5
PodCIDR:                     10.244.2.0/24
ProviderID:                  digitalocean://164929546
Non-terminated Pods:         (9 in total)
  Namespace                  Name                                               CPU Requests  CPU Limits  Memory Requests  Memory Limits  AGE
  ---------                  ----                                               ------------  ----------  ---------------  -------------  ---
  default                    nginx-ingress-controller-57668b5c4c-95cft          0 (0%)        0 (0%)      0 (0%)           0 (0%)         12d
  default                    prebid-server-7c44c5df99-hjqqd                     0 (0%)        0 (0%)      0 (0%)           0 (0%)         12d
  default                    spotify-docker-gc-gtw99                            0 (0%)        0 (0%)      0 (0%)           0 (0%)         7d1h
  kube-system                cilium-kqljb                                       300m (15%)    0 (0%)      300Mi (9%)       0 (0%)         12d
  kube-system                csi-do-node-5kdkq                                  0 (0%)        0 (0%)      70Mi (2%)        0 (0%)         12d
  kube-system                dashboard-kubernetes-dashboard-69f4679d8c-t8qcv    100m (5%)     100m (5%)   100Mi (3%)       100Mi (3%)     7d1h
  kube-system                do-node-agent-4hxs7                                102m (5%)     102m (5%)   80Mi (2%)        100Mi (3%)     12d
  kube-system                kube-proxy-vzq66                                   0 (0%)        0 (0%)      125Mi (4%)       0 (0%)         12d
  kube-system                kube-state-metrics-7678f4fc5c-6mrcg                0 (0%)        0 (0%)      0 (0%)           0 (0%)         12d
Allocated resources:
  (Total limits may be over 100 percent, i.e., overcommitted.)
  Resource                                          Requests     Limits
  --------                                          --------     ------
  cpu                                               502m (25%)   202m (10%)
  memory                                            675Mi (21%)  200Mi (6%)
  ephemeral-storage                                 0 (0%)       0 (0%)
  attachable-volumes-csi-dobs.csi.digitalocean.com  0            0
Events:                                             <none>
fatmcgav commented 5 years ago

@holms Thank you for the node info. Nothing jumps out as being a potential issue.

Are you able to attached a kubectl describe and kubectl logs for one of the elastic-master pods? I'm curious to see how long it actually took to get to a healthy state...

For reference, I've just tried to deploy a new ES cluster on a GKE cluster with a node pool consisting of 3x 1vCPU/4GB nodes, and the cluster came up at the first time of asking with no pod restarts...

The timeline for one of my test pods looks like:

During this time, the readinessProbe did fail twice, so it's possible that the default config might be a bit "aggressive" for lower resourced deployments.

holms commented 5 years ago

Sadly my logs are now polluted lots of exceptions due to another issue. I've been fine tuning readiness probe timeout, ended up setting it 200 :) 2vcpu 4gb ram :) I'll come back to you, because I'll have to redeploy cluster anyway, now some metadata fails to write into object storage somehow :/

fatmcgav commented 4 years ago

@holms I've been discussing this issue with one of my colleagues today, and he advised me that a failing readinessProbe will not result in the pod being restarted by K8s. So my apologies if any of the above gave that impression.

Sometimes, applications are temporarily unable to serve traffic. For example, an application might need to load large data or configuration files during startup, or depend on external services after startup. In such cases, you don’t want to kill the application, but you don’t want to send it requests either. Kubernetes provides readiness probes to detect and mitigate these situations. A pod with containers reporting that they are not ready does not receive traffic through Kubernetes Services.

Source for this is https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes

In order to debug this further, we ideally need the following logs captured during a deployment or upgrade activity:

holms commented 4 years ago

Using DOKS with droplet size of s-2vcpu-4gb, with own namespace and own node pool of 3 nodes.

helm get elasticsearch

REVISION: 1
RELEASED: Thu Nov 21 15:10:47 2019
CHART: elasticsearch-7.4.1
USER-SUPPLIED VALUES:
esJavaOpts: -Xmx1g -Xms1g
extraInitContainers: |
  - name: create
    image: busybox:1.28
    command: ['mkdir', '-p', '/usr/share/elasticsearch/data/nodes/']
    securityContext:
      runAsUser: 0
    volumeMounts:
     - mountPath: /usr/share/elasticsearch/data
       name: elasticsearch-master
  - name: file-permissions
    image: busybox:1.28
    command: ['chown', '-R', '1000:1000', '/usr/share/elasticsearch/']
    securityContext:
       runAsUser: 0
    volumeMounts:
     - mountPath: /usr/share/elasticsearch/data
       name: elasticsearch-master
nodeSelector:
  doks.digitalocean.com/node-pool: elasticsearch
resources:
  limits:
    cpu: 1000m
    memory: 2G
  requests:
    cpu: 100m
    memory: 2G
volumeClaimTemplate:
  accessModes:
  - ReadWriteOnce
  resources:
    requests:
      storage: 20G
  storageClassName: do-block-storage

COMPUTED VALUES:
antiAffinity: hard
antiAffinityTopologyKey: kubernetes.io/hostname
clusterHealthCheckParams: wait_for_status=green&timeout=1s
clusterName: elasticsearch
esConfig: {}
esJavaOpts: -Xmx1g -Xms1g
esMajorVersion: ""
extraEnvs: []
extraInitContainers: |
  - name: create
    image: busybox:1.28
    command: ['mkdir', '-p', '/usr/share/elasticsearch/data/nodes/']
    securityContext:
      runAsUser: 0
    volumeMounts:
     - mountPath: /usr/share/elasticsearch/data
       name: elasticsearch-master
  - name: file-permissions
    image: busybox:1.28
    command: ['chown', '-R', '1000:1000', '/usr/share/elasticsearch/']
    securityContext:
       runAsUser: 0
    volumeMounts:
     - mountPath: /usr/share/elasticsearch/data
       name: elasticsearch-master
extraVolumeMounts: ""
extraVolumes: ""
fsGroup: ""
fullnameOverride: ""
httpPort: 9200
image: docker.elastic.co/elasticsearch/elasticsearch
imagePullPolicy: IfNotPresent
imagePullSecrets: []
imageTag: 7.4.1
ingress:
  annotations: {}
  enabled: false
  hosts:
  - chart-example.local
  path: /
  tls: []
initResources: {}
keystore: []
labels: {}
lifecycle: {}
masterService: ""
masterTerminationFix: false
maxUnavailable: 1
minimumMasterNodes: 2
nameOverride: ""
networkHost: 0.0.0.0
nodeAffinity: {}
nodeGroup: master
nodeSelector:
  doks.digitalocean.com/node-pool: elasticsearch
persistence:
  annotations: {}
  enabled: true
podAnnotations: {}
podManagementPolicy: Parallel
podSecurityContext:
  fsGroup: 1000
  runAsUser: 1000
podSecurityPolicy:
  create: false
  name: ""
  spec:
    fsGroup:
      rule: RunAsAny
    privileged: true
    runAsUser:
      rule: RunAsAny
    seLinux:
      rule: RunAsAny
    supplementalGroups:
      rule: RunAsAny
    volumes:
    - secret
    - configMap
    - persistentVolumeClaim
priorityClassName: ""
protocol: http
rbac:
  create: false
  serviceAccountName: ""
readinessProbe:
  failureThreshold: 3
  initialDelaySeconds: 10
  periodSeconds: 10
  successThreshold: 3
  timeoutSeconds: 5
replicas: 3
resources:
  limits:
    cpu: 1000m
    memory: 2G
  requests:
    cpu: 100m
    memory: 2G
roles:
  data: "true"
  ingest: "true"
  master: "true"
schedulerName: ""
secretMounts: []
securityContext:
  capabilities:
    drop:
    - ALL
  runAsNonRoot: true
  runAsUser: 1000
service:
  annotations: {}
  httpPortName: http
  nodePort: ""
  transportPortName: transport
  type: ClusterIP
sidecarResources: {}
sysctlInitContainer:
  enabled: true
sysctlVmMaxMapCount: 262144
terminationGracePeriod: 120
tolerations: []
transportPort: 9300
updateStrategy: RollingUpdate
volumeClaimTemplate:
  accessModes:
  - ReadWriteOnce
  resources:
    requests:
      storage: 20G
  storageClassName: do-block-storage

HOOKS:
---
# elasticsearch-sxuwh-test
apiVersion: v1
kind: Pod
metadata:
  name: "elasticsearch-sxuwh-test"
  annotations:
    "helm.sh/hook": test-success
spec:
  containers:
  - name: "elasticsearch-elexk-test"
    image: "docker.elastic.co/elasticsearch/elasticsearch:7.4.1"
    command:
      - "sh"
      - "-c"
      - |
        #!/usr/bin/env bash -e
        curl -XGET --fail 'elasticsearch-master:9200/_cluster/health?wait_for_status=green&timeout=1s'
  restartPolicy: Never
MANIFEST:

---
# Source: elasticsearch/templates/poddisruptionbudget.yaml
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
  name: "elasticsearch-master-pdb"
spec:
  maxUnavailable: 1
  selector:
    matchLabels:
      app: "elasticsearch-master"
---
# Source: elasticsearch/templates/service.yaml
kind: Service
apiVersion: v1
metadata:
  name: elasticsearch-master
  labels:
    heritage: "Tiller"
    release: "elasticsearch"
    chart: "elasticsearch"
    app: "elasticsearch-master"
  annotations:
    {}

spec:
  type: ClusterIP
  selector:
    heritage: "Tiller"
    release: "elasticsearch"
    chart: "elasticsearch"
    app: "elasticsearch-master"
  ports:
  - name: http
    protocol: TCP
    port: 9200
  - name: transport
    protocol: TCP
    port: 9300
---
# Source: elasticsearch/templates/service.yaml
kind: Service
apiVersion: v1
metadata:
  name: elasticsearch-master-headless
  labels:
    heritage: "Tiller"
    release: "elasticsearch"
    chart: "elasticsearch"
    app: "elasticsearch-master"
  annotations:
    service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
  clusterIP: None # This is needed for statefulset hostnames like elasticsearch-0 to resolve
  # Create endpoints also if the related pod isn't ready
  publishNotReadyAddresses: true
  selector:
    app: "elasticsearch-master"
  ports:
  - name: http
    port: 9200
  - name: transport
    port: 9300
---
# Source: elasticsearch/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: elasticsearch-master
  labels:
    heritage: "Tiller"
    release: "elasticsearch"
    chart: "elasticsearch"
    app: "elasticsearch-master"
  annotations:
    esMajorVersion: "7"
spec:
  serviceName: elasticsearch-master-headless
  selector:
    matchLabels:
      app: "elasticsearch-master"
  replicas: 3
  podManagementPolicy: Parallel
  updateStrategy:
    type: RollingUpdate
  volumeClaimTemplates:
  - metadata:
      name: elasticsearch-master
    spec:
      accessModes:
      - ReadWriteOnce
      resources:
        requests:
          storage: 20G
      storageClassName: do-block-storage

  template:
    metadata:
      name: "elasticsearch-master"
      labels:
        heritage: "Tiller"
        release: "elasticsearch"
        chart: "elasticsearch"
        app: "elasticsearch-master"
      annotations:

    spec:
      securityContext:
        fsGroup: 1000
        runAsUser: 1000

      nodeSelector:
        doks.digitalocean.com/node-pool: elasticsearch

      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
          - labelSelector:
              matchExpressions:
              - key: app
                operator: In
                values:
                - "elasticsearch-master"
            topologyKey: kubernetes.io/hostname
      terminationGracePeriodSeconds: 120
      volumes:
      initContainers:
      - name: configure-sysctl
        securityContext:
          runAsUser: 0
          privileged: true
        image: "docker.elastic.co/elasticsearch/elasticsearch:7.4.1"
        command: ["sysctl", "-w", "vm.max_map_count=262144"]
        resources:
          {}

      - name: create
        image: busybox:1.28
        command: ['mkdir', '-p', '/usr/share/elasticsearch/data/nodes/']
        securityContext:
          runAsUser: 0
        volumeMounts:
         - mountPath: /usr/share/elasticsearch/data
           name: elasticsearch-master
      - name: file-permissions
        image: busybox:1.28
        command: ['chown', '-R', '1000:1000', '/usr/share/elasticsearch/']
        securityContext:
           runAsUser: 0
        volumeMounts:
         - mountPath: /usr/share/elasticsearch/data
           name: elasticsearch-master

      containers:
      - name: "elasticsearch"
        securityContext:
          capabilities:
            drop:
            - ALL
          runAsNonRoot: true
          runAsUser: 1000

        image: "docker.elastic.co/elasticsearch/elasticsearch:7.4.1"
        imagePullPolicy: "IfNotPresent"
        readinessProbe:
          failureThreshold: 3
          initialDelaySeconds: 10
          periodSeconds: 10
          successThreshold: 3
          timeoutSeconds: 5

          exec:
            command:
              - sh
              - -c
              - |
                #!/usr/bin/env bash -e
                # If the node is starting up wait for the cluster to be ready (request params: 'wait_for_status=green&timeout=1s' )
                # Once it has started only check that the node itself is responding
                START_FILE=/tmp/.es_start_file

                http () {
                    local path="${1}"
                    if [ -n "${ELASTIC_USERNAME}" ] && [ -n "${ELASTIC_PASSWORD}" ]; then
                      BASIC_AUTH="-u ${ELASTIC_USERNAME}:${ELASTIC_PASSWORD}"
                    else
                      BASIC_AUTH=''
                    fi
                    curl -XGET -s -k --fail ${BASIC_AUTH} http://127.0.0.1:9200${path}
                }

                if [ -f "${START_FILE}" ]; then
                    echo 'Elasticsearch is already running, lets check the node is healthy'
                    http "/"
                else
                    echo 'Waiting for elasticsearch cluster to become cluster to be ready (request params: "wait_for_status=green&timeout=1s" )'
                    if http "/_cluster/health?wait_for_status=green&timeout=1s" ; then
                        touch ${START_FILE}
                        exit 0
                    else
                        echo 'Cluster is not yet ready (request params: "wait_for_status=green&timeout=1s" )'
                        exit 1
                    fi
                fi
        ports:
        - name: http
          containerPort: 9200
        - name: transport
          containerPort: 9300
        resources:
          limits:
            cpu: 1000m
            memory: 2G
          requests:
            cpu: 100m
            memory: 2G

        env:
          - name: node.name
            valueFrom:
              fieldRef:
                fieldPath: metadata.name
          - name: cluster.initial_master_nodes
            value: "elasticsearch-master-0,elasticsearch-master-1,elasticsearch-master-2,"
          - name: discovery.seed_hosts
            value: "elasticsearch-master-headless"
          - name: cluster.name
            value: "elasticsearch"
          - name: network.host
            value: "0.0.0.0"
          - name: ES_JAVA_OPTS
            value: "-Xmx1g -Xms1g"
          - name: node.data
            value: "true"
          - name: node.ingest
            value: "true"
          - name: node.master
            value: "true"
        volumeMounts:
          - name: "elasticsearch-master"
            mountPath: /usr/share/elasticsearch/data

Here's kubectl describe:

Name:           elasticsearch-master-0
Namespace:      logging
Priority:       0
Node:           elasticsearch-eqmy/10.135.120.68
Start Time:     Thu, 21 Nov 2019 15:10:52 +0200
Labels:         app=elasticsearch-master
                chart=elasticsearch
                controller-revision-hash=elasticsearch-master-75b5858989
                heritage=Tiller
                release=elasticsearch
                statefulset.kubernetes.io/pod-name=elasticsearch-master-0
Annotations:    <none>
Status:         Running
IP:             10.244.3.26
IPs:            <none>
Controlled By:  StatefulSet/elasticsearch-master
Init Containers:
  configure-sysctl:
    Container ID:  docker://866bd8660cc29c54f3c24e5fca1d2bf9a761431a40af1c5e71343cae57b2a66e
    Image:         docker.elastic.co/elasticsearch/elasticsearch:7.4.1
    Image ID:      docker-pullable://docker.elastic.co/elasticsearch/elasticsearch@sha256:88c2ee30115f378b8f7e66662ec26bca0c8778c69096bee6b161128ce833585f
    Port:          <none>
    Host Port:     <none>
    Command:
      sysctl
      -w
      vm.max_map_count=262144
    State:          Terminated
      Reason:       Completed
      Exit Code:    0
      Started:      Thu, 21 Nov 2019 15:11:02 +0200
      Finished:     Thu, 21 Nov 2019 15:11:02 +0200
    Ready:          True
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-tkg5b (ro)
  create:
    Container ID:  docker://06580d0f93e4a96e3f1cd0b22d43fb93b314bccc75c52a61c6265c7c6fcd4e97
    Image:         busybox:1.28
    Image ID:      docker-pullable://busybox@sha256:141c253bc4c3fd0a201d32dc1f493bcf3fff003b6df416dea4f41046e0f37d47
    Port:          <none>
    Host Port:     <none>
    Command:
      mkdir
      -p
      /usr/share/elasticsearch/data/nodes/
    State:          Terminated
      Reason:       Completed
      Exit Code:    0
      Started:      Thu, 21 Nov 2019 15:11:03 +0200
      Finished:     Thu, 21 Nov 2019 15:11:03 +0200
    Ready:          True
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /usr/share/elasticsearch/data from elasticsearch-master (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-tkg5b (ro)
  file-permissions:
    Container ID:  docker://45c5472f995bbb975773767a6f91368e213a756e2fbd6023b5b1522b15372cb0
    Image:         busybox:1.28
    Image ID:      docker-pullable://busybox@sha256:141c253bc4c3fd0a201d32dc1f493bcf3fff003b6df416dea4f41046e0f37d47
    Port:          <none>
    Host Port:     <none>
    Command:
      chown
      -R
      1000:1000
      /usr/share/elasticsearch/
    State:          Terminated
      Reason:       Completed
      Exit Code:    0
      Started:      Thu, 21 Nov 2019 15:11:04 +0200
      Finished:     Thu, 21 Nov 2019 15:11:04 +0200
    Ready:          True
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /usr/share/elasticsearch/data from elasticsearch-master (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-tkg5b (ro)
Containers:
  elasticsearch:
    Container ID:   docker://e5d62ae6a4f8e886745da430ddb7f9c9e0fa6673400ba86af4a3cd377edf88d2
    Image:          docker.elastic.co/elasticsearch/elasticsearch:7.4.1
    Image ID:       docker-pullable://docker.elastic.co/elasticsearch/elasticsearch@sha256:88c2ee30115f378b8f7e66662ec26bca0c8778c69096bee6b161128ce833585f
    Ports:          9200/TCP, 9300/TCP
    Host Ports:     0/TCP, 0/TCP
    State:          Running
      Started:      Thu, 21 Nov 2019 15:11:05 +0200
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     1
      memory:  2G
    Requests:
      cpu:      100m
      memory:   2G
    Readiness:  exec [sh -c #!/usr/bin/env bash -e
# If the node is starting up wait for the cluster to be ready (request params: 'wait_for_status=green&timeout=1s' )
# Once it has started only check that the node itself is responding
START_FILE=/tmp/.es_start_file

http () {
    local path="${1}"
    if [ -n "${ELASTIC_USERNAME}" ] && [ -n "${ELASTIC_PASSWORD}" ]; then
      BASIC_AUTH="-u ${ELASTIC_USERNAME}:${ELASTIC_PASSWORD}"
    else
      BASIC_AUTH=''
    fi
    curl -XGET -s -k --fail ${BASIC_AUTH} http://127.0.0.1:9200${path}
}

if [ -f "${START_FILE}" ]; then
    echo 'Elasticsearch is already running, lets check the node is healthy'
    http "/"
else
    echo 'Waiting for elasticsearch cluster to become cluster to be ready (request params: "wait_for_status=green&timeout=1s" )'
    if http "/_cluster/health?wait_for_status=green&timeout=1s" ; then
        touch ${START_FILE}
        exit 0
    else
        echo 'Cluster is not yet ready (request params: "wait_for_status=green&timeout=1s" )'
        exit 1
    fi
fi
] delay=10s timeout=5s period=10s #success=3 #failure=3
    Environment:
      node.name:                     elasticsearch-master-0 (v1:metadata.name)
      cluster.initial_master_nodes:  elasticsearch-master-0,elasticsearch-master-1,elasticsearch-master-2,
      discovery.seed_hosts:          elasticsearch-master-headless
      cluster.name:                  elasticsearch
      network.host:                  0.0.0.0
      ES_JAVA_OPTS:                  -Xmx1g -Xms1g
      node.data:                     true
      node.ingest:                   true
      node.master:                   true
    Mounts:
      /usr/share/elasticsearch/data from elasticsearch-master (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-tkg5b (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  elasticsearch-master:
    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
    ClaimName:  elasticsearch-master-elasticsearch-master-0
    ReadOnly:   false
  default-token-tkg5b:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-tkg5b
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  doks.digitalocean.com/node-pool=elasticsearch
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason                  Age                    From                         Message
  ----     ------                  ----                   ----                         -------
  Warning  FailedScheduling        3m52s (x3 over 3m53s)  default-scheduler            pod has unbound immediate PersistentVolumeClaims (repeated 3 times)
  Normal   Scheduled               3m49s                  default-scheduler            Successfully assigned logging/elasticsearch-master-0 to elasticsearch-eqmy
  Normal   SuccessfulAttachVolume  3m42s                  attachdetach-controller      AttachVolume.Attach succeeded for volume "pvc-43d3e4a1-0b71-4dff-923e-121d85459fe7"
  Normal   Pulled                  3m39s                  kubelet, elasticsearch-eqmy  Container image "docker.elastic.co/elasticsearch/elasticsearch:7.4.1" already present on machine
  Normal   Created                 3m39s                  kubelet, elasticsearch-eqmy  Created container configure-sysctl
  Normal   Started                 3m39s                  kubelet, elasticsearch-eqmy  Started container configure-sysctl
  Normal   Pulled                  3m38s                  kubelet, elasticsearch-eqmy  Container image "busybox:1.28" already present on machine
  Normal   Created                 3m38s                  kubelet, elasticsearch-eqmy  Created container create
  Normal   Started                 3m38s                  kubelet, elasticsearch-eqmy  Started container create
  Normal   Pulled                  3m37s                  kubelet, elasticsearch-eqmy  Container image "busybox:1.28" already present on machine
  Normal   Created                 3m37s                  kubelet, elasticsearch-eqmy  Created container file-permissions
  Normal   Started                 3m37s                  kubelet, elasticsearch-eqmy  Started container file-permissions
  Normal   Pulled                  3m36s                  kubelet, elasticsearch-eqmy  Container image "docker.elastic.co/elasticsearch/elasticsearch:7.4.1" already present on machine
  Normal   Created                 3m36s                  kubelet, elasticsearch-eqmy  Created container elasticsearch
  Normal   Started                 3m36s                  kubelet, elasticsearch-eqmy  Started container elasticsearch
  Warning  Unhealthy               3m3s (x3 over 3m23s)   kubelet, elasticsearch-eqmy  Readiness probe failed: Waiting for elasticsearch cluster to become cluster to be ready (request params: "wait_for_status=green&timeout=1s" )
Cluster is not yet ready (request params: "wait_for_status=green&timeout=1s" )

And kubectl get events

LAST SEEN   TYPE      REASON                   OBJECT                       MESSAGE
4m19s       Warning   FailedScheduling         pod/elasticsearch-master-0   pod has unbound immediate PersistentVolumeClaims (repeated 3 times)
4m16s       Normal    Scheduled                pod/elasticsearch-master-0   Successfully assigned logging/elasticsearch-master-0 to elasticsearch-eqmy
4m9s        Normal    SuccessfulAttachVolume   pod/elasticsearch-master-0   AttachVolume.Attach succeeded for volume "pvc-43d3e4a1-0b71-4dff-923e-121d85459fe7"
4m6s        Normal    Pulled                   pod/elasticsearch-master-0   Container image "docker.elastic.co/elasticsearch/elasticsearch:7.4.1" already present on machine
4m6s        Normal    Created                  pod/elasticsearch-master-0   Created container configure-sysctl
4m6s        Normal    Started                  pod/elasticsearch-master-0   Started container configure-sysctl
4m5s        Normal    Pulled                   pod/elasticsearch-master-0   Container image "busybox:1.28" already present on machine
4m5s        Normal    Created                  pod/elasticsearch-master-0   Created container create
4m5s        Normal    Started                  pod/elasticsearch-master-0   Started container create
4m4s        Normal    Pulled                   pod/elasticsearch-master-0   Container image "busybox:1.28" already present on machine
4m4s        Normal    Created                  pod/elasticsearch-master-0   Created container file-permissions
4m4s        Normal    Started                  pod/elasticsearch-master-0   Started container file-permissions
4m3s        Normal    Pulled                   pod/elasticsearch-master-0   Container image "docker.elastic.co/elasticsearch/elasticsearch:7.4.1" already present on machine
4m3s        Normal    Created                  pod/elasticsearch-master-0   Created container elasticsearch
4m3s        Normal    Started                  pod/elasticsearch-master-0   Started container elasticsearch
3m30s       Warning   Unhealthy                pod/elasticsearch-master-0   Readiness probe failed: Waiting for elasticsearch cluster to become cluster to be ready (request params: "wait_for_status=green&timeout=1s" )
Cluster is not yet ready (request params: "wait_for_status=green&timeout=1s" )
4m19s       Warning   FailedScheduling         pod/elasticsearch-master-1   pod has unbound immediate PersistentVolumeClaims (repeated 3 times)
4m16s       Normal    Scheduled                pod/elasticsearch-master-1   Successfully assigned logging/elasticsearch-master-1 to elasticsearch-eqmo
4m9s        Normal    SuccessfulAttachVolume   pod/elasticsearch-master-1   AttachVolume.Attach succeeded for volume "pvc-e9273b4b-5a24-4935-a556-d82252969323"
3m57s       Normal    Pulled                   pod/elasticsearch-master-1   Container image "docker.elastic.co/elasticsearch/elasticsearch:7.4.1" already present on machine
3m57s       Normal    Created                  pod/elasticsearch-master-1   Created container configure-sysctl
3m57s       Normal    Started                  pod/elasticsearch-master-1   Started container configure-sysctl
3m56s       Normal    Pulled                   pod/elasticsearch-master-1   Container image "busybox:1.28" already present on machine
3m56s       Normal    Created                  pod/elasticsearch-master-1   Created container create
3m55s       Normal    Started                  pod/elasticsearch-master-1   Started container create
3m55s       Normal    Pulled                   pod/elasticsearch-master-1   Container image "busybox:1.28" already present on machine
3m55s       Normal    Created                  pod/elasticsearch-master-1   Created container file-permissions
3m54s       Normal    Started                  pod/elasticsearch-master-1   Started container file-permissions
3m54s       Normal    Pulled                   pod/elasticsearch-master-1   Container image "docker.elastic.co/elasticsearch/elasticsearch:7.4.1" already present on machine
3m54s       Normal    Created                  pod/elasticsearch-master-1   Created container elasticsearch
3m53s       Normal    Started                  pod/elasticsearch-master-1   Started container elasticsearch
3m18s       Warning   Unhealthy                pod/elasticsearch-master-1   Readiness probe failed: Waiting for elasticsearch cluster to become cluster to be ready (request params: "wait_for_status=green&timeout=1s" )
Cluster is not yet ready (request params: "wait_for_status=green&timeout=1s" )
4m18s       Warning   FailedScheduling         pod/elasticsearch-master-2   pod has unbound immediate PersistentVolumeClaims (repeated 3 times)
4m16s       Normal    Scheduled                pod/elasticsearch-master-2   Successfully assigned logging/elasticsearch-master-2 to elasticsearch-eqmx
4m10s       Normal    SuccessfulAttachVolume   pod/elasticsearch-master-2   AttachVolume.Attach succeeded for volume "pvc-77aca0f2-e0f4-4380-b15a-fa78a41eee38"
4m5s        Normal    Pulled                   pod/elasticsearch-master-2   Container image "docker.elastic.co/elasticsearch/elasticsearch:7.4.1" already present on machine
4m4s        Normal    Created                  pod/elasticsearch-master-2   Created container configure-sysctl
4m4s        Normal    Started                  pod/elasticsearch-master-2   Started container configure-sysctl
4m4s        Normal    Pulled                   pod/elasticsearch-master-2   Container image "busybox:1.28" already present on machine
4m4s        Normal    Created                  pod/elasticsearch-master-2   Created container create
4m4s        Normal    Started                  pod/elasticsearch-master-2   Started container create
4m3s        Normal    Pulled                   pod/elasticsearch-master-2   Container image "busybox:1.28" already present on machine
4m3s        Normal    Created                  pod/elasticsearch-master-2   Created container file-permissions
4m3s        Normal    Started                  pod/elasticsearch-master-2   Started container file-permissions
4m2s        Normal    Pulled                   pod/elasticsearch-master-2   Container image "docker.elastic.co/elasticsearch/elasticsearch:7.4.1" already present on machine
4m2s        Normal    Created                  pod/elasticsearch-master-2   Created container elasticsearch
4m1s        Normal    Started                  pod/elasticsearch-master-2   Started container elasticsearch
3m32s       Warning   Unhealthy                pod/elasticsearch-master-2   Readiness probe failed: Waiting for elasticsearch cluster to become cluster to be ready (request params: "wait_for_status=green&timeout=1s" )
Cluster is not yet ready (request params: "wait_for_status=green&timeout=1s" )
4m20s       Normal    ExternalProvisioning     persistentvolumeclaim/elasticsearch-master-elasticsearch-master-0   waiting for a volume to be created, either by external provisioner "dobs.csi.digitalocean.com" or manually created by system administrator
4m20s       Normal    Provisioning             persistentvolumeclaim/elasticsearch-master-elasticsearch-master-0   External provisioner is provisioning volume for claim "logging/elasticsearch-master-elasticsearch-master-0"
4m20s       Normal    ExternalProvisioning     persistentvolumeclaim/elasticsearch-master-elasticsearch-master-1   waiting for a volume to be created, either by external provisioner "dobs.csi.digitalocean.com" or manually created by system administrator
4m20s       Normal    Provisioning             persistentvolumeclaim/elasticsearch-master-elasticsearch-master-1   External provisioner is provisioning volume for claim "logging/elasticsearch-master-elasticsearch-master-1"
4m20s       Normal    ExternalProvisioning     persistentvolumeclaim/elasticsearch-master-elasticsearch-master-2   waiting for a volume to be created, either by external provisioner "dobs.csi.digitalocean.com" or manually created by system administrator
4m20s       Normal    Provisioning             persistentvolumeclaim/elasticsearch-master-elasticsearch-master-2   External provisioner is provisioning volume for claim "logging/elasticsearch-master-elasticsearch-master-2"
4m21s       Normal    NoPods                   poddisruptionbudget/elasticsearch-master-pdb                        No matching pods found
4m20s       Normal    SuccessfulCreate         statefulset/elasticsearch-master                                    create Claim elasticsearch-master-elasticsearch-master-0 Pod elasticsearch-master-0 in StatefulSet elasticsearch-master success
4m20s       Normal    SuccessfulCreate         statefulset/elasticsearch-master                                    create Pod elasticsearch-master-0 in StatefulSet elasticsearch-master successful
4m20s       Normal    SuccessfulCreate         statefulset/elasticsearch-master                                    create Claim elasticsearch-master-elasticsearch-master-1 Pod elasticsearch-master-1 in StatefulSet elasticsearch-master success
4m20s       Normal    SuccessfulCreate         statefulset/elasticsearch-master                                    create Pod elasticsearch-master-1 in StatefulSet elasticsearch-master successful
4m20s       Normal    SuccessfulCreate         statefulset/elasticsearch-master                                    create Claim elasticsearch-master-elasticsearch-master-2 Pod elasticsearch-master-2 in StatefulSet elasticsearch-master success
4m20s       Normal    SuccessfulCreate         statefulset/elasticsearch-master                                    create Pod elasticsearch-master-2 in StatefulSet elasticsearch-master successful

Logs:

 kubectl logs -l elasticsearch --namespace=logging

This returns nothing so showing specific pod:

> kubectl logs  elasticsearch-master-0 --namespace logging
OpenJDK 64-Bit Server VM warning: Option UseConcMarkSweepGC was deprecated in version 9.0 and will likely be removed in a future release.
{"type": "server", "timestamp": "2019-11-21T13:11:11,697Z", "level": "INFO", "component": "o.e.e.NodeEnvironment", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "using [1] data paths, mounts [[/usr/share/elasticsearch/data (/dev/disk/by-id/scsi-0DO_Volume_pvc-43d3e4a1-0b71-4dff-923e-121d85459fe7)]], net usable_space [16.6gb], net total_space [17.5gb], types [ext4]" }
{"type": "server", "timestamp": "2019-11-21T13:11:11,703Z", "level": "INFO", "component": "o.e.e.NodeEnvironment", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "heap size [1015.6mb], compressed ordinary object pointers [true]" }
{"type": "server", "timestamp": "2019-11-21T13:11:11,708Z", "level": "INFO", "component": "o.e.n.Node", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "node name [elasticsearch-master-0], node ID [C_nHx1VvTJKaPcAzOWY7AQ], cluster name [elasticsearch]" }
{"type": "server", "timestamp": "2019-11-21T13:11:11,709Z", "level": "INFO", "component": "o.e.n.Node", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "version[7.4.1], pid[1], build[default/docker/fc0eeb6e2c25915d63d871d344e3d0b45ea0ea1e/2019-10-22T17:16:35.176724Z], OS[Linux/4.19.0-0.bpo.6-amd64/amd64], JVM[AdoptOpenJDK/OpenJDK 64-Bit Server VM/13/13+33]" }
{"type": "server", "timestamp": "2019-11-21T13:11:11,714Z", "level": "INFO", "component": "o.e.n.Node", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "JVM home [/usr/share/elasticsearch/jdk]" }
{"type": "server", "timestamp": "2019-11-21T13:11:11,714Z", "level": "INFO", "component": "o.e.n.Node", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "JVM arguments [-Xms1g, -Xmx1g, -XX:+UseConcMarkSweepGC, -XX:CMSInitiatingOccupancyFraction=75, -XX:+UseCMSInitiatingOccupancyOnly, -Des.networkaddress.cache.ttl=60, -Des.networkaddress.cache.negative.ttl=10, -XX:+AlwaysPreTouch, -Xss1m, -Djava.awt.headless=true, -Dfile.encoding=UTF-8, -Djna.nosys=true, -XX:-OmitStackTraceInFastThrow, -Dio.netty.noUnsafe=true, -Dio.netty.noKeySetOptimization=true, -Dio.netty.recycler.maxCapacityPerThread=0, -Dio.netty.allocator.numDirectArenas=0, -Dlog4j.shutdownHookEnabled=false, -Dlog4j2.disable.jmx=true, -Djava.io.tmpdir=/tmp/elasticsearch-2210672167454109916, -XX:+HeapDumpOnOutOfMemoryError, -XX:HeapDumpPath=data, -XX:ErrorFile=logs/hs_err_pid%p.log, -Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m, -Djava.locale.providers=COMPAT, -Des.cgroups.hierarchy.override=/, -Xmx1g, -Xms1g, -Dio.netty.allocator.type=unpooled, -XX:MaxDirectMemorySize=536870912, -Des.path.home=/usr/share/elasticsearch, -Des.path.conf=/usr/share/elasticsearch/config, -Des.distribution.flavor=default, -Des.distribution.type=docker, -Des.bundled_jdk=true]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,498Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [aggs-matrix-stats]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,498Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [analysis-common]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,501Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [data-frame]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,501Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [flattened]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,501Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [frozen-indices]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,501Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [ingest-common]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,506Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [ingest-geoip]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,507Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [ingest-user-agent]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,507Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [lang-expression]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,508Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [lang-mustache]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,508Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [lang-painless]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,508Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [mapper-extras]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,509Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [parent-join]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,510Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [percolator]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,510Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [rank-eval]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,511Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [reindex]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,512Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [repository-url]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,512Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [search-business-rules]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,513Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [spatial]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,514Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [transport-netty4]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,516Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [vectors]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,517Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-analytics]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,517Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-ccr]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,517Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-core]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,518Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-deprecation]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,519Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-graph]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,519Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-ilm]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,519Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-logstash]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,519Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-ml]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,520Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-monitoring]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,520Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-rollup]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,520Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-security]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,521Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-sql]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,521Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-voting-only-node]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,521Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "loaded module [x-pack-watcher]" }
{"type": "server", "timestamp": "2019-11-21T13:11:17,522Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "no plugins loaded" }
{"type": "server", "timestamp": "2019-11-21T13:11:28,072Z", "level": "INFO", "component": "o.e.x.s.a.s.FileRolesStore", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "parsed [0] roles from file [/usr/share/elasticsearch/config/roles.yml]" }
{"type": "server", "timestamp": "2019-11-21T13:11:29,521Z", "level": "INFO", "component": "o.e.x.m.p.l.CppLogMessageHandler", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "[controller/86] [Main.cc@110] controller (64 bit): Version 7.4.1 (Build 973380bdacc5e8) Copyright (c) 2019 Elasticsearch BV" }
{"type": "server", "timestamp": "2019-11-21T13:11:31,117Z", "level": "DEBUG", "component": "o.e.a.ActionModule", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "Using REST wrapper from plugin org.elasticsearch.xpack.security.Security" }
{"type": "server", "timestamp": "2019-11-21T13:11:31,898Z", "level": "INFO", "component": "o.e.d.DiscoveryModule", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "using discovery type [zen] and seed hosts providers [settings]" }
{"type": "server", "timestamp": "2019-11-21T13:11:34,405Z", "level": "INFO", "component": "o.e.n.Node", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "initialized" }
{"type": "server", "timestamp": "2019-11-21T13:11:34,405Z", "level": "INFO", "component": "o.e.n.Node", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "starting ..." }
{"type": "server", "timestamp": "2019-11-21T13:11:34,733Z", "level": "INFO", "component": "o.e.t.TransportService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "publish_address {10.244.3.26:9300}, bound_addresses {[::]:9300}" }
{"type": "server", "timestamp": "2019-11-21T13:11:34,799Z", "level": "INFO", "component": "o.e.b.BootstrapChecks", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "bound or publishing to a non-loopback address, enforcing bootstrap checks" }
{"type": "server", "timestamp": "2019-11-21T13:11:43,043Z", "level": "INFO", "component": "o.e.c.c.Coordinator", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "setting initial configuration to VotingConfiguration{{bootstrap-placeholder}-elasticsearch-master-1,C_nHx1VvTJKaPcAzOWY7AQ,ZJSi-XhYS6q4yl847ER_sQ}" }
{"type": "server", "timestamp": "2019-11-21T13:11:43,302Z", "level": "INFO", "component": "o.e.c.s.MasterService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "elected-as-master ([2] nodes joined)[{elasticsearch-master-0}{C_nHx1VvTJKaPcAzOWY7AQ}{h0vJDEjxT3y0U_hHGpcD-A}{10.244.3.26}{10.244.3.26:9300}{dilm}{ml.machine_memory=1999998976, xpack.installed=true, ml.max_open_jobs=20} elect leader, {elasticsearch-master-2}{ZJSi-XhYS6q4yl847ER_sQ}{lfakg92cTGqF0ZY62V1nZA}{10.244.4.209}{10.244.4.209:9300}{dilm}{ml.machine_memory=1999998976, ml.max_open_jobs=20, xpack.installed=true} elect leader, _BECOME_MASTER_TASK_, _FINISH_ELECTION_], term: 1, version: 1, reason: master node changed {previous [], current [{elasticsearch-master-0}{C_nHx1VvTJKaPcAzOWY7AQ}{h0vJDEjxT3y0U_hHGpcD-A}{10.244.3.26}{10.244.3.26:9300}{dilm}{ml.machine_memory=1999998976, xpack.installed=true, ml.max_open_jobs=20}]}, added {{elasticsearch-master-2}{ZJSi-XhYS6q4yl847ER_sQ}{lfakg92cTGqF0ZY62V1nZA}{10.244.4.209}{10.244.4.209:9300}{dilm}{ml.machine_memory=1999998976, ml.max_open_jobs=20, xpack.installed=true},}" }
{"type": "server", "timestamp": "2019-11-21T13:11:43,498Z", "level": "INFO", "component": "o.e.c.c.CoordinationState", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "cluster UUID set to [k4zx5tmdRdik8ovYRkiEtA]" }
{"type": "server", "timestamp": "2019-11-21T13:11:43,727Z", "level": "INFO", "component": "o.e.c.s.ClusterApplierService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "master node changed {previous [], current [{elasticsearch-master-0}{C_nHx1VvTJKaPcAzOWY7AQ}{h0vJDEjxT3y0U_hHGpcD-A}{10.244.3.26}{10.244.3.26:9300}{dilm}{ml.machine_memory=1999998976, xpack.installed=true, ml.max_open_jobs=20}]}, added {{elasticsearch-master-2}{ZJSi-XhYS6q4yl847ER_sQ}{lfakg92cTGqF0ZY62V1nZA}{10.244.4.209}{10.244.4.209:9300}{dilm}{ml.machine_memory=1999998976, ml.max_open_jobs=20, xpack.installed=true},}, term: 1, version: 1, reason: Publication{term=1, version=1}" }
{"type": "server", "timestamp": "2019-11-21T13:11:43,899Z", "level": "INFO", "component": "o.e.h.AbstractHttpServerTransport", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "publish_address {10.244.3.26:9200}, bound_addresses {[::]:9200}", "cluster.uuid": "k4zx5tmdRdik8ovYRkiEtA", "node.id": "C_nHx1VvTJKaPcAzOWY7AQ"  }
{"type": "server", "timestamp": "2019-11-21T13:11:43,900Z", "level": "INFO", "component": "o.e.n.Node", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "started", "cluster.uuid": "k4zx5tmdRdik8ovYRkiEtA", "node.id": "C_nHx1VvTJKaPcAzOWY7AQ"  }
{"type": "server", "timestamp": "2019-11-21T13:11:44,240Z", "level": "INFO", "component": "o.e.g.GatewayService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "recovered [0] indices into cluster_state", "cluster.uuid": "k4zx5tmdRdik8ovYRkiEtA", "node.id": "C_nHx1VvTJKaPcAzOWY7AQ"  }
{"type": "server", "timestamp": "2019-11-21T13:11:45,228Z", "level": "INFO", "component": "o.e.c.m.MetaDataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "adding template [.triggered_watches] for index patterns [.triggered_watches*]", "cluster.uuid": "k4zx5tmdRdik8ovYRkiEtA", "node.id": "C_nHx1VvTJKaPcAzOWY7AQ"  }
{"type": "server", "timestamp": "2019-11-21T13:11:45,431Z", "level": "INFO", "component": "o.e.c.m.MetaDataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "adding template [.watch-history-10] for index patterns [.watcher-history-10*]", "cluster.uuid": "k4zx5tmdRdik8ovYRkiEtA", "node.id": "C_nHx1VvTJKaPcAzOWY7AQ"  }
{"type": "server", "timestamp": "2019-11-21T13:11:45,633Z", "level": "INFO", "component": "o.e.c.m.MetaDataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "adding template [.watches] for index patterns [.watches*]", "cluster.uuid": "k4zx5tmdRdik8ovYRkiEtA", "node.id": "C_nHx1VvTJKaPcAzOWY7AQ"  }
{"type": "server", "timestamp": "2019-11-21T13:11:45,847Z", "level": "INFO", "component": "o.e.c.m.MetaDataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "adding template [.slm-history] for index patterns [.slm-history-1*]", "cluster.uuid": "k4zx5tmdRdik8ovYRkiEtA", "node.id": "C_nHx1VvTJKaPcAzOWY7AQ"  }
{"type": "server", "timestamp": "2019-11-21T13:11:45,950Z", "level": "INFO", "component": "o.e.c.m.MetaDataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "adding template [.monitoring-logstash] for index patterns [.monitoring-logstash-7-*]", "cluster.uuid": "k4zx5tmdRdik8ovYRkiEtA", "node.id": "C_nHx1VvTJKaPcAzOWY7AQ"  }
{"type": "server", "timestamp": "2019-11-21T13:11:46,159Z", "level": "INFO", "component": "o.e.c.m.MetaDataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "adding template [.monitoring-es] for index patterns [.monitoring-es-7-*]", "cluster.uuid": "k4zx5tmdRdik8ovYRkiEtA", "node.id": "C_nHx1VvTJKaPcAzOWY7AQ"  }
{"type": "server", "timestamp": "2019-11-21T13:11:46,313Z", "level": "INFO", "component": "o.e.c.m.MetaDataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "adding template [.monitoring-beats] for index patterns [.monitoring-beats-7-*]", "cluster.uuid": "k4zx5tmdRdik8ovYRkiEtA", "node.id": "C_nHx1VvTJKaPcAzOWY7AQ"  }
{"type": "server", "timestamp": "2019-11-21T13:11:46,433Z", "level": "INFO", "component": "o.e.c.m.MetaDataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "adding template [.monitoring-alerts-7] for index patterns [.monitoring-alerts-7]", "cluster.uuid": "k4zx5tmdRdik8ovYRkiEtA", "node.id": "C_nHx1VvTJKaPcAzOWY7AQ"  }
{"type": "server", "timestamp": "2019-11-21T13:11:46,644Z", "level": "INFO", "component": "o.e.c.m.MetaDataIndexTemplateService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "adding template [.monitoring-kibana] for index patterns [.monitoring-kibana-7-*]", "cluster.uuid": "k4zx5tmdRdik8ovYRkiEtA", "node.id": "C_nHx1VvTJKaPcAzOWY7AQ"  }
{"type": "server", "timestamp": "2019-11-21T13:11:46,896Z", "level": "INFO", "component": "o.e.x.i.a.TransportPutLifecycleAction", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "adding index lifecycle policy [watch-history-ilm-policy]", "cluster.uuid": "k4zx5tmdRdik8ovYRkiEtA", "node.id": "C_nHx1VvTJKaPcAzOWY7AQ"  }
{"type": "server", "timestamp": "2019-11-21T13:11:47,101Z", "level": "INFO", "component": "o.e.x.i.a.TransportPutLifecycleAction", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "adding index lifecycle policy [slm-history-ilm-policy]", "cluster.uuid": "k4zx5tmdRdik8ovYRkiEtA", "node.id": "C_nHx1VvTJKaPcAzOWY7AQ"  }
{"type": "server", "timestamp": "2019-11-21T13:11:47,795Z", "level": "INFO", "component": "o.e.l.LicenseService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "license [352fec6c-081d-4309-b260-236597a84d73] mode [basic] - valid", "cluster.uuid": "k4zx5tmdRdik8ovYRkiEtA", "node.id": "C_nHx1VvTJKaPcAzOWY7AQ"  }
{"type": "server", "timestamp": "2019-11-21T13:11:47,797Z", "level": "INFO", "component": "o.e.x.s.s.SecurityStatusChangeListener", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "Active license is now [BASIC]; Security is disabled", "cluster.uuid": "k4zx5tmdRdik8ovYRkiEtA", "node.id": "C_nHx1VvTJKaPcAzOWY7AQ"  }
{"type": "server", "timestamp": "2019-11-21T13:11:50,952Z", "level": "INFO", "component": "o.e.c.s.MasterService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "node-join[{elasticsearch-master-1}{RqtlG7ozSiO_RC18-cEVEg}{IM5CBtNmSgaN910OBsS5fQ}{10.244.5.194}{10.244.5.194:9300}{dilm}{ml.machine_memory=1999998976, ml.max_open_jobs=20, xpack.installed=true} join existing leader], term: 1, version: 18, reason: added {{elasticsearch-master-1}{RqtlG7ozSiO_RC18-cEVEg}{IM5CBtNmSgaN910OBsS5fQ}{10.244.5.194}{10.244.5.194:9300}{dilm}{ml.machine_memory=1999998976, ml.max_open_jobs=20, xpack.installed=true},}", "cluster.uuid": "k4zx5tmdRdik8ovYRkiEtA", "node.id": "C_nHx1VvTJKaPcAzOWY7AQ"  }
{"type": "server", "timestamp": "2019-11-21T13:11:52,445Z", "level": "INFO", "component": "o.e.c.s.ClusterApplierService", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "added {{elasticsearch-master-1}{RqtlG7ozSiO_RC18-cEVEg}{IM5CBtNmSgaN910OBsS5fQ}{10.244.5.194}{10.244.5.194:9300}{dilm}{ml.machine_memory=1999998976, ml.max_open_jobs=20, xpack.installed=true},}, term: 1, version: 18, reason: Publication{term=1, version=18}", "cluster.uuid": "k4zx5tmdRdik8ovYRkiEtA", "node.id": "C_nHx1VvTJKaPcAzOWY7AQ"  }

As you can see probe failed :)

fatmcgav commented 4 years ago

@holms Cheers for the info...

As you can see probe failed :)

Yeh, the readiness probe did fail a couple of times, however that's to be expected whilst the cluster forms.

If you look at the elasticsearch container within the kubectl describe elastic-master-0 output above, it shows Ready: True, which means the pod has started and passed the readiness probe check.

From the kubectl get events output, I'm not seeing any occurrences of elasticsearch pods being restarted... So I'm struggling to see an issue with that latest test...

Edit: So to look at this a different way - What issue(s) did you encounter with the above test @holms?

holms commented 4 years ago

@fatmcgav Hmm well alright, then seems to be I've misunderstood something from logs previously.

Currently I have this situation in logs and I haven't changed anything since I've launched ES 6 days ago. Not sure if it's related would be nice to see what you think. Although container didn't restart but master node is failed to be found..? So probe is never green..?

"stacktrace": ["org.elasticsearch.cluster.block.ClusterBlockException: blocked by: [SERVICE_UNAVAILABLE/2/no master];",
"at org.elasticsearch.cluster.block.ClusterBlocks.globalBlockedException(ClusterBlocks.java:189) ~[elasticsearch-7.4.1.jar:7.4.1]",
"at org.elasticsearch.action.bulk.TransportBulkAction$BulkOperation.handleBlockExceptions(TransportBulkAction.java:476) [elasticsearch-7.4.1.jar:7.4.1]",
"at org.elasticsearch.action.bulk.TransportBulkAction$BulkOperation.doRun(TransportBulkAction.java:357) [elasticsearch-7.4.1.jar:7.4.1]",
"at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.4.1.jar:7.4.1]",
"at org.elasticsearch.action.bulk.TransportBulkAction$BulkOperation$2.onTimeout(TransportBulkAction.java:510) [elasticsearch-7.4.1.jar:7.4.1]",
"at org.elasticsearch.cluster.ClusterStateObserver$ContextPreservingListener.onTimeout(ClusterStateObserver.java:325) [elasticsearch-7.4.1.jar:7.4.1]",
"at org.elasticsearch.cluster.ClusterStateObserver$ObserverClusterStateListener.onTimeout(ClusterStateObserver.java:252) [elasticsearch-7.4.1.jar:7.4.1]",
"at org.elasticsearch.cluster.service.ClusterApplierService$NotifyTimeout.run(ClusterApplierService.java:598) [elasticsearch-7.4.1.jar:7.4.1]",
"at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:703) [elasticsearch-7.4.1.jar:7.4.1]",
"at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?]",
"at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?]",
"at java.lang.Thread.run(Thread.java:830) [?:?]",
"Suppressed: org.elasticsearch.discovery.MasterNotDiscoveredException",
"\tat org.elasticsearch.action.support.master.TransportMasterNodeAction$AsyncSingleAction$2.onTimeout(TransportMasterNodeAction.java:214) ~[elasticsearch-7.4.1.jar:7.4.1]",
"\tat org.elasticsearch.cluster.ClusterStateObserver$ContextPreservingListener.onTimeout(ClusterStateObserver.java:325) [elasticsearch-7.4.1.jar:7.4.1]",
"\tat org.elasticsearch.cluster.ClusterStateObserver$ObserverClusterStateListener.onTimeout(ClusterStateObserver.java:252) [elasticsearch-7.4.1.jar:7.4.1]",
"\tat org.elasticsearch.cluster.service.ClusterApplierService$NotifyTimeout.run(ClusterApplierService.java:598) [elasticsearch-7.4.1.jar:7.4.1]",
"\tat org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:703) [elasticsearch-7.4.1.jar:7.4.1]",
"\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?]",
"\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?]",
"\tat java.lang.Thread.run(Thread.java:830) [?:?]"] }
{"type": "server", "timestamp": "2019-11-27T21:00:45,012Z", "level": "DEBUG", "component": "o.e.a.a.i.c.TransportCreateIndexAction", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "no known master node, scheduling a retry", "cluster.uuid": "k4zx5tmdRdik8ovYRkiEtA", "node.id": "C_nHx1VvTJKaPcAzOWY7AQ"  }
holms commented 4 years ago

@fatmcgav can I ping for an issue above please :)? Ended up still adding that probe timeout, which resolved this problem

botelastic[bot] commented 4 years ago

This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.

botelastic[bot] commented 4 years ago

This issue has been automatically closed because it has not had recent activity since being marked as stale.