Closed Hewei6765 closed 2 years ago
[root@m1 ~]# kubectl describe elasticsearches.elasticsearch.k8s.elastic.co quickstart
Name: quickstart
Namespace: default
Labels: <none>
Annotations: common.k8s.elastic.co/controller-version: 1.5.0
elasticsearch.k8s.elastic.co/cluster-uuid: Ovtp-ccRSoefseYp51yhvQ
API Version: elasticsearch.k8s.elastic.co/v1
Kind: Elasticsearch
Metadata:
Creation Timestamp: 2021-04-22T05:49:55Z
Generation: 5
Managed Fields:
API Version: elasticsearch.k8s.elastic.co/v1
Fields Type: FieldsV1
fieldsV1:
f:metadata:
f:annotations:
.:
f:kubectl.kubernetes.io/last-applied-configuration:
f:spec:
.:
f:http:
.:
f:tls:
.:
f:selfSignedCertificate:
.:
f:disabled:
f:version:
f:volumeClaimDeletePolicy:
Manager: kubectl
Operation: Update
Time: 2021-04-22T07:16:25Z
API Version: elasticsearch.k8s.elastic.co/v1
Fields Type: FieldsV1
fieldsV1:
f:metadata:
f:annotations:
f:common.k8s.elastic.co/controller-version:
f:elasticsearch.k8s.elastic.co/cluster-uuid:
f:spec:
f:auth:
f:http:
f:service:
.:
f:metadata:
.:
f:creationTimestamp:
f:spec:
f:tls:
f:certificate:
f:nodeSets:
f:transport:
.:
f:service:
.:
f:metadata:
.:
f:creationTimestamp:
f:spec:
f:tls:
.:
f:certificate:
f:updateStrategy:
.:
f:changeBudget:
f:status:
.:
f:availableNodes:
f:health:
f:phase:
f:version:
Manager: elastic-operator
Operation: Update
Time: 2021-04-22T07:18:51Z
Resource Version: 15613808
Self Link: /apis/elasticsearch.k8s.elastic.co/v1/namespaces/default/elasticsearches/quickstart
UID: 8e33b22d-642e-4d7a-b904-4ee3f4f615a5
Spec:
Auth:
Http:
Service:
Metadata:
Creation Timestamp: <nil>
Spec:
Tls:
Certificate:
Self Signed Certificate:
Disabled: true
Node Sets:
Config:
node.store.allow_mmap: false
xpack.security.authc.realms.ldap.ldap1.bind_dn: cn=admin,dc=kubesphere,dc=io
xpack.security.authc.realms.ldap.ldap1.order: 0
xpack.security.authc.realms.ldap.ldap1.unmapped_groups_as_roles: false
xpack.security.authc.realms.ldap.ldap1.url: ldap://openldap.kubesphere-system.svc:389
xpack.security.authc.realms.ldap.ldap1.user_search.base_dn: ou=Users,dc=kubesphere,dc=io
xpack.security.authc.realms.ldap.ldap1.user_search.filter: (uid={0})
Count: 1
Name: default
Pod Template:
Spec:
Affinity:
Pod Anti Affinity:
Preferred During Scheduling Ignored During Execution:
Pod Affinity Term:
Label Selector:
Match Labels:
elasticsearch.k8s.elastic.co/cluster-name: quickstart
Topology Key: kubernetes.io/hostname
Weight: 100
Containers:
Env:
Name: TZ
Value: Asia/Shanghai
Name: elasticsearch
Resources:
Limits:
Memory: 8Gi
Requests:
Cpu: 1
Memory: 8Gi
Init Containers:
Command:
sh
-c
sysctl -w vm.max_map_count=262144
Name: sysctl
Security Context:
Privileged: true
Command:
sh
-c
echo admin | elasticsearch-keystore add xpack.security.authc.realms.ldap.ldap1.secure_bind_password -f
Name: ldap1-init
Security Context:
Privileged: true
Volume Claim Templates:
Metadata:
Name: quickstart
Spec:
Access Modes:
ReadWriteOnce
Resources:
Requests:
Storage: 10Gi
Storage Class Name: rook-cephfs
Transport:
Service:
Metadata:
Creation Timestamp: <nil>
Spec:
Tls:
Certificate:
Update Strategy:
Change Budget:
Version: 7.12.0
Volume Claim Delete Policy: DeleteOnScaledownOnly
Status:
Available Nodes: 1
Health: unknown
Phase: ApplyingChanges
Version: 7.12.0
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning Unexpected 66s (x2 over 2m36s) elasticsearch-controller Could not update cluster license: while getting current license level 503 Service Unavailable:
Warning Unexpected 66s (x2 over 2m36s) elasticsearch-controller Could not update remote clusters in Elasticsearch settings
It looks like node quickstart-es-default-1
has no data on disk, and cannot elect a master:
{"type": "server", "timestamp": "2021-04-22T11:58:38,797+08:00", "level": "WARN", "component": "o.e.c.c.ClusterFormationFailureHelper", "cluster.name": "quickstart", "node.name": "quickstart-es-default-1", "message": "master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and [cluster.initial_master_nodes] is empty on this node: have discovered [{quickstart-es-default-1}{Tmr9zzDZTpm1nNfxJfL7qw}{dfzKfCnZRqi0Zg_JjnwEvg}{10.233.113.65}{10.233.113.65:9300}{cdfhilmrstw}{k8s_node_name=m3, ml.machine_memory=8589934592, xpack.installed=true, transform.node=true, ml.max_open_jobs=20, ml.max_jvm_size=4294967296}, {quickstart-es-default-0}{wb4bnltKQ46sFknBXOidTw}{eyIkCtBYSKyomykTjQ2vew}{10.233.72.83}{10.233.72.83:9300}{cdfhilmrstw}{k8s_node_name=n3, ml.machine_memory=8589934592, ml.max_open_jobs=20, xpack.installed=true, ml.max_jvm_size=4294967296, transform.node=true}]; discovery will continue using [127.0.0.1:9300, 127.0.0.1:9301, 127.0.0.1:9302, 127.0.0.1:9303, 127.0.0.1:9304, 127.0.0.1:9305, 10.233.72.83:9300] from hosts providers and [{quickstart-es-default-1}{Tmr9zzDZTpm1nNfxJfL7qw}{dfzKfCnZRqi0Zg_JjnwEvg}{10.233.113.65}{10.233.113.65:9300}{cdfhilmrstw}{k8s_node_name=m3, ml.machine_memory=8589934592, xpack.installed=true, transform.node=true, ml.max_open_jobs=20, ml.max_jvm_size=4294967296}] from last-known cluster state; node term 0, last-accepted version 0 in term 0" }
Do you get similar logs from quickstart-es-default-0
? Does that Pod exist?
I suspect you made some changes to the number of nodes since I'm also seeing count: 1
in your manifest. Did you manually delete some Pods?
Hi there. I'm facing what looks like a similar issue with a cluster of 3 "masters" + 3 "data" (e.g. everything else) nodes. Because of another issue (Circuit breaker issue) the cluster had a red health, and after trying to fix shards I very foolishly thought I could restart the masters by deleting the corresponding k8s pods, and that ECK would rebalance things. Now I also have the 2 error logs repeating over and over from the ECK operator, similar to OP:
[
{
"log.level": "error",
"@timestamp": "2021-06-10T09:07:26.060Z",
"log.logger": "driver",
"message": "Could not update remote clusters in Elasticsearch settings",
"service.version": "1.6.0+8326ca8a",
"service.type": "eck",
"ecs.version": "1.4.0",
"namespace": "elastic-elk-stack",
"es_name": "panther-elasticsearch",
"error": "503 Service Unavailable: ",
"error.stack_trace": "github.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/driver.(*defaultDriver).Reconcile\n\t/go/src/github.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/driver/driver.go:213\ngithub.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch.(*ReconcileElasticsearch).internalReconcile\n\t/go/src/github.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/elasticsearch_controller.go:286\ngithub.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch.(*ReconcileElasticsearch).Reconcile\n\t/go/src/github.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/elasticsearch_controller.go:195\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.8.3/pkg/internal/controller/controller.go:298\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.8.3/pkg/internal/controller/controller.go:253\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func1.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.8.3/pkg/internal/controller/controller.go:216\nk8s.io/apimachinery/pkg/util/wait.JitterUntilWithContext.func1\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:185\nk8s.io/apimachinery/pkg/util/wait.BackoffUntil.func1\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:155\nk8s.io/apimachinery/pkg/util/wait.BackoffUntil\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:156\nk8s.io/apimachinery/pkg/util/wait.JitterUntil\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:133\nk8s.io/apimachinery/pkg/util/wait.JitterUntilWithContext\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:185\nk8s.io/apimachinery/pkg/util/wait.UntilWithContext\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:99"
},
{
"log.level": "error",
"@timestamp": "2021-06-10T09:07:57.054Z",
"log.logger": "manager.eck-operator.controller.elasticsearch-controller",
"message": "Reconciler error",
"service.version": "1.6.0+8326ca8a",
"service.type": "eck",
"ecs.version": "1.4.0",
"name": "panther-elasticsearch",
"namespace": "elastic-elk-stack",
"error": "unable to delete /_cluster/voting_config_exclusions: 503 Service Unavailable: ",
"errorCauses": [
{
"error": "unable to delete /_cluster/voting_config_exclusions: 503 Service Unavailable: unknown",
"errorVerbose": "503 Service Unavailable: unknown\nunable to delete /_cluster/voting_config_exclusions\ngithub.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/client.(*clientV7).DeleteVotingConfigExclusions\n\t/go/src/github.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/client/v7.go:67\ngithub.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/version/zen2.ClearVotingConfigExclusions\n\t/go/src/github.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/version/zen2/voting_exclusions.go:71\ngithub.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/driver.(*defaultDriver).reconcileNodeSpecs\n\t/go/src/github.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/driver/nodes.go:165\ngithub.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/driver.(*defaultDriver).Reconcile\n\t/go/src/github.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/driver/driver.go:248\ngithub.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch.(*ReconcileElasticsearch).internalReconcile\n\t/go/src/github.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/elasticsearch_controller.go:286\ngithub.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch.(*ReconcileElasticsearch).Reconcile\n\t/go/src/github.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/elasticsearch_controller.go:195\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.8.3/pkg/internal/controller/controller.go:298\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.8.3/pkg/internal/controller/controller.go:253\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func1.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.8.3/pkg/internal/controller/controller.go:216\nk8s.io/apimachinery/pkg/util/wait.JitterUntilWithContext.func1\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:185\nk8s.io/apimachinery/pkg/util/wait.BackoffUntil.func1\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:155\nk8s.io/apimachinery/pkg/util/wait.BackoffUntil\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:156\nk8s.io/apimachinery/pkg/util/wait.JitterUntil\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:133\nk8s.io/apimachinery/pkg/util/wait.JitterUntilWithContext\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:185\nk8s.io/apimachinery/pkg/util/wait.UntilWithContext\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:99\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1371"
}
],
"error.stack_trace": "sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.8.3/pkg/internal/controller/controller.go:253\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func1.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.8.3/pkg/internal/controller/controller.go:216\nk8s.io/apimachinery/pkg/util/wait.JitterUntilWithContext.func1\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:185\nk8s.io/apimachinery/pkg/util/wait.BackoffUntil.func1\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:155\nk8s.io/apimachinery/pkg/util/wait.BackoffUntil\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:156\nk8s.io/apimachinery/pkg/util/wait.JitterUntil\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:133\nk8s.io/apimachinery/pkg/util/wait.JitterUntilWithContext\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:185\nk8s.io/apimachinery/pkg/util/wait.UntilWithContext\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:99"
}
]
Kubernetes has recreated the masters but they seem unaware of eachother:
{
"type": "server",
"timestamp": "2021-06-10T14:24:34,610Z",
"level": "WARN",
"component": "o.e.c.c.ClusterFormationFailureHelper",
"cluster.name": "panther-elasticsearch",
"node.name": "panther-elasticsearch-es-masters-1",
"message": "master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and [cluster.initial_master_nodes] is empty on this node: have discovered [{panther-elasticsearch-es-masters-1}{_rq8kaH9Rt2UpzfoeRRCSg}{K1rZcKEzQrCfcl3xwGtoLA}{10.240.1.37}{10.240.1.37:9300}{m}, {panther-elasticsearch-es-masters-0}{Zo1-xgQKSSKNa3CWWecJxQ}{zfgdUZ7bS8mRmgeK3TdmKA}{10.240.0.197}{10.240.0.197:9300}{m}]; discovery will continue using [127.0.0.1:9300, 127.0.0.1:9301, 127.0.0.1:9302, 127.0.0.1:9303, 127.0.0.1:9304, 127.0.0.1:9305, 10.240.0.197:9300] from hosts providers and [{panther-elasticsearch-es-masters-1}{_rq8kaH9Rt2UpzfoeRRCSg}{K1rZcKEzQrCfcl3xwGtoLA}{10.240.1.37}{10.240.1.37:9300}{m}] from last-known cluster state; node term 0, last-accepted version 0 in term 0"
}
@sebgl you mention manual deletion of pods, is there something that can be done to recover the cluster?
Hello,
I'm running into the same problem described above. In my case, the master nodes were rescheduled by k8s and were restarted. After they came back up, the master nodes were unable to join each other, despite having the same persistent volumes.
Elastic version: 7.9.2 Operator version: 1.6.0
elastic
{
"type": "server",
"timestamp": "2021-07-29T22:47:48,446Z",
"level": "WARN",
"component": "o.e.c.c.ClusterFormationFailureHelper",
"cluster.name": "polaris-noticeable",
"node.name": "polaris-noticeable-es-master-nodes-0",
"message": "master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and [cluster.initial_master_nodes] is empty on this node: have discovered [{polaris-noticeable-es-master-nodes-0}{5zdLes6PSA6G2VWJIdC5eg}{RC39yS0hTDKWqkK2BJ1Ppw}{10.65.175.127}{10.65.175.127:9300}{m}{k8s_node_name=ip-10-65-146-212.ec2.internal, xpack.installed=true, transform.node=false}, {polaris-noticeable-es-master-nodes-1}{0OmdqXEfTZKwc5wM1Q2EXg}{l3Nzb9mTRtWBb4Rp7RqlWg}{10.65.116.59}{10.65.116.59:9300}{m}{k8s_node_name=ip-10-65-116-141.ec2.internal, xpack.installed=true, transform.node=false}, {polaris-noticeable-es-master-nodes-2}{qVmhnbk2QyqAqgzH-zU9bQ}{PALERrw2QdSvWoGInk2njg}{10.65.130.201}{10.65.130.201:9300}{m}{k8s_node_name=ip-10-65-135-160.ec2.internal, xpack.installed=true, transform.node=false}]; discovery will continue using [127.0.0.1:9300, 127.0.0.1:9301, 127.0.0.1:9302, 127.0.0.1:9303, 127.0.0.1:9304, 127.0.0.1:9305, 10.65.116.59:9300, 10.65.130.201:9300] from hosts providers and [{polaris-noticeable-es-master-nodes-0}{5zdLes6PSA6G2VWJIdC5eg}{RC39yS0hTDKWqkK2BJ1Ppw}{10.65.175.127}{10.65.175.127:9300}{m}{k8s_node_name=ip-10-65-146-212.ec2.internal, xpack.installed=true, transform.node=false}] from last-known cluster state; node term 0, last-accepted version 0 in term 0"
}
operator logs
{
"log.level": "error",
"@timestamp": "2021-07-29T22:30:45.831Z",
"log.logger": "driver",
"message": "Could not update remote clusters in Elasticsearch settings",
"service.version": "1.6.0+8326ca8a",
"service.type": "eck",
"ecs.version": "1.4.0",
"namespace": "elastic-system",
"es_name": "polaris-noticeable",
"error": "503 Service Unavailable: ",
"error.stack_trace": "github.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/driver.(*defaultDriver).Reconcile\n\t/go/src/github.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/driver/driver.go:213\ngithub.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch.(*ReconcileElasticsearch).internalReconcile\n\t/go/src/github.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/elasticsearch_controller.go:286\ngithub.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch.(*ReconcileElasticsearch).Reconcile\n\t/go/src/github.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/elasticsearch_controller.go:195\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.8.3/pkg/internal/controller/controller.go:298\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.8.3/pkg/internal/controller/controller.go:253\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func1.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.8.3/pkg/internal/controller/controller.go:216\nk8s.io/apimachinery/pkg/util/wait.JitterUntilWithContext.func1\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:185\nk8s.io/apimachinery/pkg/util/wait.BackoffUntil.func1\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:155\nk8s.io/apimachinery/pkg/util/wait.BackoffUntil\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:156\nk8s.io/apimachinery/pkg/util/wait.JitterUntil\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:133\nk8s.io/apimachinery/pkg/util/wait.JitterUntilWithContext\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:185\nk8s.io/apimachinery/pkg/util/wait.UntilWithContext\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:99"
}
{
"log.level": "error",
"@timestamp": "2021-07-29T22:29:45.572Z",
"log.logger": "manager.eck-operator.controller.elasticsearch-controller",
"message": "Reconciler error",
"service.version": "1.6.0+8326ca8a",
"service.type": "eck",
"ecs.version": "1.4.0",
"name": "polaris-noticeable",
"namespace": "elastic-system",
"error": "unable to delete /_cluster/voting_config_exclusions: 503 Service Unavailable: ",
"errorCauses": [
{
"error": "unable to delete /_cluster/voting_config_exclusions: 503 Service Unavailable: unknown",
"errorVerbose": "503 Service Unavailable: unknown\nunable to delete /_cluster/voting_config_exclusions\ngithub.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/client.(*clientV7).DeleteVotingConfigExclusions\n\t/go/src/github.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/client/v7.go:67\ngithub.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/version/zen2.ClearVotingConfigExclusions\n\t/go/src/github.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/version/zen2/voting_exclusions.go:71\ngithub.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/driver.(*defaultDriver).reconcileNodeSpecs\n\t/go/src/github.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/driver/nodes.go:165\ngithub.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/driver.(*defaultDriver).Reconcile\n\t/go/src/github.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/driver/driver.go:248\ngithub.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch.(*ReconcileElasticsearch).internalReconcile\n\t/go/src/github.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/elasticsearch_controller.go:286\ngithub.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch.(*ReconcileElasticsearch).Reconcile\n\t/go/src/github.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/elasticsearch_controller.go:195\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.8.3/pkg/internal/controller/controller.go:298\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.8.3/pkg/internal/controller/controller.go:253\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func1.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.8.3/pkg/internal/controller/controller.go:216\nk8s.io/apimachinery/pkg/util/wait.JitterUntilWithContext.func1\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:185\nk8s.io/apimachinery/pkg/util/wait.BackoffUntil.func1\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:155\nk8s.io/apimachinery/pkg/util/wait.BackoffUntil\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:156\nk8s.io/apimachinery/pkg/util/wait.JitterUntil\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:133\nk8s.io/apimachinery/pkg/util/wait.JitterUntilWithContext\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:185\nk8s.io/apimachinery/pkg/util/wait.UntilWithContext\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:99\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1371"
}
],
"error.stack_trace": "sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.8.3/pkg/internal/controller/controller.go:253\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func1.2\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.8.3/pkg/internal/controller/controller.go:216\nk8s.io/apimachinery/pkg/util/wait.JitterUntilWithContext.func1\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:185\nk8s.io/apimachinery/pkg/util/wait.BackoffUntil.func1\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:155\nk8s.io/apimachinery/pkg/util/wait.BackoffUntil\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:156\nk8s.io/apimachinery/pkg/util/wait.JitterUntil\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:133\nk8s.io/apimachinery/pkg/util/wait.JitterUntilWithContext\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:185\nk8s.io/apimachinery/pkg/util/wait.UntilWithContext\n\t/go/pkg/mod/k8s.io/apimachinery@v0.20.2/pkg/util/wait/wait.go:99"
}
@Marchelune did you ever figure out a solution?
i have same error, any update ?
@bkbwese unfortunately not, after trying a few things I deleted the cluster and recreated it. I had snapshots to restore data, so it was simpler to redeploy than figuring how to get out of this state.
^ That's exactly what I ended up doing. Unfortunate that there isn't a fix for this yet. Thanks for the response.
@Marchelune thanks , recreate work for me
I am thinking the issues here might have been caused by accidental data volume misconfiguration leading to the symptoms described here. We have since added validations in https://github.com/elastic/cloud-on-k8s/pull/4526 and improved our documentation around this. I am closing this for now. Reopen if needed.
I try to use the enterprise_trial version features; add config file; https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-licensing.html#k8s-start-trial An error after updating the Elasticsearch configuration.
Environment
ECK version:
The ECK version is 1.5.0
Logs:
[root@m1 ~]# kubectl logs quickstart-es-default-1 --tail=50 "at java.lang.Thread.run(Thread.java:832) [?:?]"] } {"type": "server", "timestamp": "2021-04-22T11:57:48,793+08:00", "level": "WARN", "component": "o.e.c.c.ClusterFormationFailureHelper", "cluster.name": "quickstart", "node.name": "quickstart-es-default-1", "message": "master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and [cluster.initial_master_nodes] is empty on this node: have discovered [{quickstart-es-default-1}{Tmr9zzDZTpm1nNfxJfL7qw}{dfzKfCnZRqi0Zg_JjnwEvg}{10.233.113.65}{10.233.113.65:9300}{cdfhilmrstw}{k8s_node_name=m3, ml.machine_memory=8589934592, xpack.installed=true, transform.node=true, ml.max_open_jobs=20, ml.max_jvm_size=4294967296}, {quickstart-es-default-0}{wb4bnltKQ46sFknBXOidTw}{eyIkCtBYSKyomykTjQ2vew}{10.233.72.83}{10.233.72.83:9300}{cdfhilmrstw}{k8s_node_name=n3, ml.machine_memory=8589934592, ml.max_open_jobs=20, xpack.installed=true, ml.max_jvm_size=4294967296, transform.node=true}]; discovery will continue using [127.0.0.1:9300, 127.0.0.1:9301, 127.0.0.1:9302, 127.0.0.1:9303, 127.0.0.1:9304, 127.0.0.1:9305, 10.233.72.83:9300] from hosts providers and [{quickstart-es-default-1}{Tmr9zzDZTpm1nNfxJfL7qw}{dfzKfCnZRqi0Zg_JjnwEvg}{10.233.113.65}{10.233.113.65:9300}{cdfhilmrstw}{k8s_node_name=m3, ml.machine_memory=8589934592, xpack.installed=true, transform.node=true, ml.max_open_jobs=20, ml.max_jvm_size=4294967296}] from last-known cluster state; node term 0, last-accepted version 0 in term 0" } {"type": "server", "timestamp": "2021-04-22T11:57:58,794+08:00", "level": "WARN", "component": "o.e.c.c.ClusterFormationFailureHelper", "cluster.name": "quickstart", "node.name": "quickstart-es-default-1", "message": "master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and [cluster.initial_master_nodes] is empty on this node: have discovered [{quickstart-es-default-1}{Tmr9zzDZTpm1nNfxJfL7qw}{dfzKfCnZRqi0Zg_JjnwEvg}{10.233.113.65}{10.233.113.65:9300}{cdfhilmrstw}{k8s_node_name=m3, ml.machine_memory=8589934592, xpack.installed=true, transform.node=true, ml.max_open_jobs=20, ml.max_jvm_size=4294967296}, {quickstart-es-default-0}{wb4bnltKQ46sFknBXOidTw}{eyIkCtBYSKyomykTjQ2vew}{10.233.72.83}{10.233.72.83:9300}{cdfhilmrstw}{k8s_node_name=n3, ml.machine_memory=8589934592, ml.max_open_jobs=20, xpack.installed=true, ml.max_jvm_size=4294967296, transform.node=true}]; discovery will continue using [127.0.0.1:9300, 127.0.0.1:9301, 127.0.0.1:9302, 127.0.0.1:9303, 127.0.0.1:9304, 127.0.0.1:9305, 10.233.72.83:9300] from hosts providers and [{quickstart-es-default-1}{Tmr9zzDZTpm1nNfxJfL7qw}{dfzKfCnZRqi0Zg_JjnwEvg}{10.233.113.65}{10.233.113.65:9300}{cdfhilmrstw}{k8s_node_name=m3, ml.machine_memory=8589934592, xpack.installed=true, transform.node=true, ml.max_open_jobs=20, ml.max_jvm_size=4294967296}] from last-known cluster state; node term 0, last-accepted version 0 in term 0" } {"type": "server", "timestamp": "2021-04-22T11:57:59,574+08:00", "level": "WARN", "component": "r.suppressed", "cluster.name": "quickstart", "node.name": "quickstart-es-default-1", "message": "path: /_cluster/health, params: {}", "stacktrace": ["org.elasticsearch.discovery.MasterNotDiscoveredException: null", "at org.elasticsearch.action.support.master.TransportMasterNodeAction$AsyncSingleAction$2.onTimeout(TransportMasterNodeAction.java:219) [elasticsearch-7.12.0.jar:7.12.0]", "at org.elasticsearch.cluster.ClusterStateObserver$ContextPreservingListener.onTimeout(ClusterStateObserver.java:324) [elasticsearch-7.12.0.jar:7.12.0]", "at org.elasticsearch.cluster.ClusterStateObserver$ObserverClusterStateListener.onTimeout(ClusterStateObserver.java:241) [elasticsearch-7.12.0.jar:7.12.0]", "at org.elasticsearch.cluster.service.ClusterApplierService$NotifyTimeout.run(ClusterApplierService.java:590) [elasticsearch-7.12.0.jar:7.12.0]", "at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:673) [elasticsearch-7.12.0.jar:7.12.0]", "at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1130) [?:?]", "at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:630) [?:?]", "at java.lang.Thread.run(Thread.java:832) [?:?]"] } {"type": "server", "timestamp": "2021-04-22T11:58:08,795+08:00", "level": "WARN", "component": "o.e.c.c.ClusterFormationFailureHelper", "cluster.name": "quickstart", "node.name": "quickstart-es-default-1", "message": "master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and [cluster.initial_master_nodes] is empty on this node: have discovered [{quickstart-es-default-1}{Tmr9zzDZTpm1nNfxJfL7qw}{dfzKfCnZRqi0Zg_JjnwEvg}{10.233.113.65}{10.233.113.65:9300}{cdfhilmrstw}{k8s_node_name=m3, ml.machine_memory=8589934592, xpack.installed=true, transform.node=true, ml.max_open_jobs=20, ml.max_jvm_size=4294967296}, {quickstart-es-default-0}{wb4bnltKQ46sFknBXOidTw}{eyIkCtBYSKyomykTjQ2vew}{10.233.72.83}{10.233.72.83:9300}{cdfhilmrstw}{k8s_node_name=n3, ml.machine_memory=8589934592, ml.max_open_jobs=20, xpack.installed=true, ml.max_jvm_size=4294967296, transform.node=true}]; discovery will continue using [127.0.0.1:9300, 127.0.0.1:9301, 127.0.0.1:9302, 127.0.0.1:9303, 127.0.0.1:9304, 127.0.0.1:9305, 10.233.72.83:9300] from hosts providers and [{quickstart-es-default-1}{Tmr9zzDZTpm1nNfxJfL7qw}{dfzKfCnZRqi0Zg_JjnwEvg}{10.233.113.65}{10.233.113.65:9300}{cdfhilmrstw}{k8s_node_name=m3, ml.machine_memory=8589934592, xpack.installed=true, transform.node=true, ml.max_open_jobs=20, ml.max_jvm_size=4294967296}] from last-known cluster state; node term 0, last-accepted version 0 in term 0" } {"type": "server", "timestamp": "2021-04-22T11:58:18,796+08:00", "level": "WARN", "component": "o.e.c.c.ClusterFormationFailureHelper", "cluster.name": "quickstart", "node.name": "quickstart-es-default-1", "message": "master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and [cluster.initial_master_nodes] is empty on this node: have discovered [{quickstart-es-default-1}{Tmr9zzDZTpm1nNfxJfL7qw}{dfzKfCnZRqi0Zg_JjnwEvg}{10.233.113.65}{10.233.113.65:9300}{cdfhilmrstw}{k8s_node_name=m3, ml.machine_memory=8589934592, xpack.installed=true, transform.node=true, ml.max_open_jobs=20, ml.max_jvm_size=4294967296}, {quickstart-es-default-0}{wb4bnltKQ46sFknBXOidTw}{eyIkCtBYSKyomykTjQ2vew}{10.233.72.83}{10.233.72.83:9300}{cdfhilmrstw}{k8s_node_name=n3, ml.machine_memory=8589934592, ml.max_open_jobs=20, xpack.installed=true, ml.max_jvm_size=4294967296, transform.node=true}]; discovery will continue using [127.0.0.1:9300, 127.0.0.1:9301, 127.0.0.1:9302, 127.0.0.1:9303, 127.0.0.1:9304, 127.0.0.1:9305, 10.233.72.83:9300] from hosts providers and [{quickstart-es-default-1}{Tmr9zzDZTpm1nNfxJfL7qw}{dfzKfCnZRqi0Zg_JjnwEvg}{10.233.113.65}{10.233.113.65:9300}{cdfhilmrstw}{k8s_node_name=m3, ml.machine_memory=8589934592, xpack.installed=true, transform.node=true, ml.max_open_jobs=20, ml.max_jvm_size=4294967296}] from last-known cluster state; node term 0, last-accepted version 0 in term 0" } {"type": "server", "timestamp": "2021-04-22T11:58:19,576+08:00", "level": "WARN", "component": "r.suppressed", "cluster.name": "quickstart", "node.name": "quickstart-es-default-1", "message": "path: /_cluster/health, params: {}", "stacktrace": ["org.elasticsearch.discovery.MasterNotDiscoveredException: null", "at org.elasticsearch.action.support.master.TransportMasterNodeAction$AsyncSingleAction$2.onTimeout(TransportMasterNodeAction.java:219) [elasticsearch-7.12.0.jar:7.12.0]", "at org.elasticsearch.cluster.ClusterStateObserver$ContextPreservingListener.onTimeout(ClusterStateObserver.java:324) [elasticsearch-7.12.0.jar:7.12.0]", "at org.elasticsearch.cluster.ClusterStateObserver$ObserverClusterStateListener.onTimeout(ClusterStateObserver.java:241) [elasticsearch-7.12.0.jar:7.12.0]", "at org.elasticsearch.cluster.service.ClusterApplierService$NotifyTimeout.run(ClusterApplierService.java:590) [elasticsearch-7.12.0.jar:7.12.0]", "at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:673) [elasticsearch-7.12.0.jar:7.12.0]", "at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1130) [?:?]", "at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:630) [?:?]", "at java.lang.Thread.run(Thread.java:832) [?:?]"] } {"type": "server", "timestamp": "2021-04-22T11:58:28,797+08:00", "level": "WARN", "component": "o.e.c.c.ClusterFormationFailureHelper", "cluster.name": "quickstart", "node.name": "quickstart-es-default-1", "message": "master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and [cluster.initial_master_nodes] is empty on this node: have discovered [{quickstart-es-default-1}{Tmr9zzDZTpm1nNfxJfL7qw}{dfzKfCnZRqi0Zg_JjnwEvg}{10.233.113.65}{10.233.113.65:9300}{cdfhilmrstw}{k8s_node_name=m3, ml.machine_memory=8589934592, xpack.installed=true, transform.node=true, ml.max_open_jobs=20, ml.max_jvm_size=4294967296}, {quickstart-es-default-0}{wb4bnltKQ46sFknBXOidTw}{eyIkCtBYSKyomykTjQ2vew}{10.233.72.83}{10.233.72.83:9300}{cdfhilmrstw}{k8s_node_name=n3, ml.machine_memory=8589934592, ml.max_open_jobs=20, xpack.installed=true, ml.max_jvm_size=4294967296, transform.node=true}]; discovery will continue using [127.0.0.1:9300, 127.0.0.1:9301, 127.0.0.1:9302, 127.0.0.1:9303, 127.0.0.1:9304, 127.0.0.1:9305, 10.233.72.83:9300] from hosts providers and [{quickstart-es-default-1}{Tmr9zzDZTpm1nNfxJfL7qw}{dfzKfCnZRqi0Zg_JjnwEvg}{10.233.113.65}{10.233.113.65:9300}{cdfhilmrstw}{k8s_node_name=m3, ml.machine_memory=8589934592, xpack.installed=true, transform.node=true, ml.max_open_jobs=20, ml.max_jvm_size=4294967296}] from last-known cluster state; node term 0, last-accepted version 0 in term 0" } {"type": "server", "timestamp": "2021-04-22T11:58:38,797+08:00", "level": "WARN", "component": "o.e.c.c.ClusterFormationFailureHelper", "cluster.name": "quickstart", "node.name": "quickstart-es-default-1", "message": "master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and [cluster.initial_master_nodes] is empty on this node: have discovered [{quickstart-es-default-1}{Tmr9zzDZTpm1nNfxJfL7qw}{dfzKfCnZRqi0Zg_JjnwEvg}{10.233.113.65}{10.233.113.65:9300}{cdfhilmrstw}{k8s_node_name=m3, ml.machine_memory=8589934592, xpack.installed=true, transform.node=true, ml.max_open_jobs=20, ml.max_jvm_size=4294967296}, {quickstart-es-default-0}{wb4bnltKQ46sFknBXOidTw}{eyIkCtBYSKyomykTjQ2vew}{10.233.72.83}{10.233.72.83:9300}{cdfhilmrstw}{k8s_node_name=n3, ml.machine_memory=8589934592, ml.max_open_jobs=20, xpack.installed=true, ml.max_jvm_size=4294967296, transform.node=true}]; discovery will continue using [127.0.0.1:9300, 127.0.0.1:9301, 127.0.0.1:9302, 127.0.0.1:9303, 127.0.0.1:9304, 127.0.0.1:9305, 10.233.72.83:9300] from hosts providers and [{quickstart-es-default-1}{Tmr9zzDZTpm1nNfxJfL7qw}{dfzKfCnZRqi0Zg_JjnwEvg}{10.233.113.65}{10.233.113.65:9300}{cdfhilmrstw}{k8s_node_name=m3, ml.machine_memory=8589934592, xpack.installed=true, transform.node=true, ml.max_open_jobs=20, ml.max_jvm_size=4294967296}] from last-known cluster state; node term 0, last-accepted version 0 in term 0" } {"type": "server", "timestamp": "2021-04-22T11:58:39,577+08:00", "level": "WARN", "component": "r.suppressed", "cluster.name": "quickstart", "node.name": "quickstart-es-default-1", "message": "path: /_cluster/health, params: {}", "stacktrace": ["org.elasticsearch.discovery.MasterNotDiscoveredException: null", "at org.elasticsearch.action.support.master.TransportMasterNodeAction$AsyncSingleAction$2.onTimeout(TransportMasterNodeAction.java:219) [elasticsearch-7.12.0.jar:7.12.0]", "at org.elasticsearch.cluster.ClusterStateObserver$ContextPreservingListener.onTimeout(ClusterStateObserver.java:324) [elasticsearch-7.12.0.jar:7.12.0]", "at org.elasticsearch.cluster.ClusterStateObserver$ObserverClusterStateListener.onTimeout(ClusterStateObserver.java:241) [elasticsearch-7.12.0.jar:7.12.0]", "at org.elasticsearch.cluster.service.ClusterApplierService$NotifyTimeout.run(ClusterApplierService.java:590) [elasticsearch-7.12.0.jar:7.12.0]", "at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:673) [elasticsearch-7.12.0.jar:7.12.0]", "at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1130) [?:?]", "at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:630) [?:?]", "at java.lang.Thread.run(Thread.java:832) [?:?]"] } {"type": "server", "timestamp": "2021-04-22T11:58:48,798+08:00", "level": "WARN", "component": "o.e.c.c.ClusterFormationFailureHelper", "cluster.name": "quickstart", "node.name": "quickstart-es-default-1", "message": "master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and [cluster.initial_master_nodes] is empty on this node: have discovered [{quickstart-es-default-1}{Tmr9zzDZTpm1nNfxJfL7qw}{dfzKfCnZRqi0Zg_JjnwEvg}{10.233.113.65}{10.233.113.65:9300}{cdfhilmrstw}{k8s_node_name=m3, ml.machine_memory=8589934592, xpack.installed=true, transform.node=true, ml.max_open_jobs=20, ml.max_jvm_size=4294967296}, {quickstart-es-default-0}{wb4bnltKQ46sFknBXOidTw}{eyIkCtBYSKyomykTjQ2vew}{10.233.72.83}{10.233.72.83:9300}{cdfhilmrstw}{k8s_node_name=n3, ml.machine_memory=8589934592, ml.max_open_jobs=20, xpack.installed=true, ml.max_jvm_size=4294967296, transform.node=true}]; discovery will continue using [127.0.0.1:9300, 127.0.0.1:9301, 127.0.0.1:9302, 127.0.0.1:9303, 127.0.0.1:9304, 127.0.0.1:9305, 10.233.72.83:9300] from hosts providers and [{quickstart-es-default-1}{Tmr9zzDZTpm1nNfxJfL7qw}{dfzKfCnZRqi0Zg_JjnwEvg}{10.233.113.65}{10.233.113.65:9300}{cdfhilmrstw}{k8s_node_name=m3, ml.machine_memory=8589934592, xpack.installed=true, transform.node=true, ml.max_open_jobs=20, ml.max_jvm_size=4294967296}] from last-known cluster state; node term 0, last-accepted version 0 in term 0" } {"type": "server", "timestamp": "2021-04-22T11:58:58,799+08:00", "level": "WARN", "component": "o.e.c.c.ClusterFormationFailureHelper", "cluster.name": "quickstart", "node.name": "quickstart-es-default-1", "message": "master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and [cluster.initial_master_nodes] is empty on this node: have discovered [{quickstart-es-default-1}{Tmr9zzDZTpm1nNfxJfL7qw}{dfzKfCnZRqi0Zg_JjnwEvg}{10.233.113.65}{10.233.113.65:9300}{cdfhilmrstw}{k8s_node_name=m3, ml.machine_memory=8589934592, xpack.installed=true, transform.node=true, ml.max_open_jobs=20, ml.max_jvm_size=4294967296}, {quickstart-es-default-0}{wb4bnltKQ46sFknBXOidTw}{eyIkCtBYSKyomykTjQ2vew}{10.233.72.83}{10.233.72.83:9300}{cdfhilmrstw}{k8s_node_name=n3, ml.machine_memory=8589934592, ml.max_open_jobs=20, xpack.installed=true, ml.max_jvm_size=4294967296, transform.node=true}]; discovery will continue using [127.0.0.1:9300, 127.0.0.1:9301, 127.0.0.1:9302, 127.0.0.1:9303, 127.0.0.1:9304, 127.0.0.1:9305, 10.233.72.83:9300] from hosts providers and [{quickstart-es-default-1}{Tmr9zzDZTpm1nNfxJfL7qw}{dfzKfCnZRqi0Zg_JjnwEvg}{10.233.113.65}{10.233.113.65:9300}{cdfhilmrstw}{k8s_node_name=m3, ml.machine_memory=8589934592, xpack.installed=true, transform.node=true, ml.max_open_jobs=20, ml.max_jvm_size=4294967296}] from last-known cluster state; node term 0, last-accepted version 0 in term 0" } {"type": "server", "timestamp": "2021-04-22T11:58:59,578+08:00", "level": "WARN", "component": "r.suppressed", "cluster.name": "quickstart", "node.name": "quickstart-es-default-1", "message": "path: /_cluster/health, params: {}", "stacktrace": ["org.elasticsearch.discovery.MasterNotDiscoveredException: null", "at org.elasticsearch.action.support.master.TransportMasterNodeAction$AsyncSingleAction$2.onTimeout(TransportMasterNodeAction.java:219) [elasticsearch-7.12.0.jar:7.12.0]", "at org.elasticsearch.cluster.ClusterStateObserver$ContextPreservingListener.onTimeout(ClusterStateObserver.java:324) [elasticsearch-7.12.0.jar:7.12.0]", "at org.elasticsearch.cluster.ClusterStateObserver$ObserverClusterStateListener.onTimeout(ClusterStateObserver.java:241) [elasticsearch-7.12.0.jar:7.12.0]", "at org.elasticsearch.cluster.service.ClusterApplierService$NotifyTimeout.run(ClusterApplierService.java:590) [elasticsearch-7.12.0.jar:7.12.0]", "at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:673) [elasticsearch-7.12.0.jar:7.12.0]", "at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1130) [?:?]", "at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:630) [?:?]", "at java.lang.Thread.run(Thread.java:832) [?:?]"] } {"type": "server", "timestamp": "2021-04-22T11:59:08,800+08:00", "level": "WARN", "component": "o.e.c.c.ClusterFormationFailureHelper", "cluster.name": "quickstart", "node.name": "quickstart-es-default-1", "message": "master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and [cluster.initial_master_nodes] is empty on this node: have discovered [{quickstart-es-default-1}{Tmr9zzDZTpm1nNfxJfL7qw}{dfzKfCnZRqi0Zg_JjnwEvg}{10.233.113.65}{10.233.113.65:9300}{cdfhilmrstw}{k8s_node_name=m3, ml.machine_memory=8589934592, xpack.installed=true, transform.node=true, ml.max_open_jobs=20, ml.max_jvm_size=4294967296}, {quickstart-es-default-0}{wb4bnltKQ46sFknBXOidTw}{eyIkCtBYSKyomykTjQ2vew}{10.233.72.83}{10.233.72.83:9300}{cdfhilmrstw}{k8s_node_name=n3, ml.machine_memory=8589934592, ml.max_open_jobs=20, xpack.installed=true, ml.max_jvm_size=4294967296, transform.node=true}]; discovery will continue using [127.0.0.1:9300, 127.0.0.1:9301, 127.0.0.1:9302, 127.0.0.1:9303, 127.0.0.1:9304, 127.0.0.1:9305, 10.233.72.83:9300] from hosts providers and [{quickstart-es-default-1}{Tmr9zzDZTpm1nNfxJfL7qw}{dfzKfCnZRqi0Zg_JjnwEvg}{10.233.113.65}{10.233.113.65:9300}{cdfhilmrstw}{k8s_node_name=m3, ml.machine_memory=8589934592, xpack.installed=true, transform.node=true, ml.max_open_jobs=20, ml.max_jvm_size=4294967296}] from last-known cluster state; node term 0, last-accepted version 0 in term 0" }