strimzi / strimzi-kafka-operator

Apache Kafka® running on Kubernetes
https://strimzi.io/
Apache License 2.0
4.85k stars 1.3k forks source link

Own Kafka listener certificates - SSL hand shaking error #3815

Closed cloudcafetech closed 4 years ago

cloudcafetech commented 4 years ago

Trying to setup own CA as well as own Kafka listener certificates but getting SSL hand shaking error ....

Though scenario is little complex, not sure whats is missing .. PLEASE help

Please find following details ...

cat <<EOF > req.conf
[ req ]
default_bits = 4096
default_keyfile = extlis.key
distinguished_name = req_distinguished_name
req_extensions = v3_req
prompt = no
[ req_distinguished_name ]
C = IN
ST = WB
L = Kolkata
O = CloudCafe
OU = ITDivision
CN= *.streaming.svc.cluster.local
[v3_ca]
subjectKeyIdentifier=hash
authorityKeyIdentifier=keyid:always,issuer:always
basicConstraints = CA:true
[v3_req]
# Extensions to add to a certificate request
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = *.prod-cluster-kafka-brokers
DNS.2 = *.prod-cluster-kafka-brokers.streaming.svc
DNS.3 = prod-cluster-kafka-bootstrap
DNS.4 = prod-cluster-kafka-bootstrap.streaming.svc
DNS.5 = streaming.10.128.0.24.nip.io
DNS.6 = prod-cluster-bootstrap-streaming.10.128.0.24.nip.io
DNS.7 = prod-cluster-broker-0-streaming.10.128.0.24.nip.io
DNS.8 = prod-cluster-broker-1-streaming.10.128.0.24.nip.io
DNS.9 = prod-cluster-broker-2-streaming.10.128.0.24.nip.io
DNS.10 = prod-cluster-broker-3-streaming.10.128.0.24.nip.io
DNS.11 = localhost
DNS.12 = *.streaming.svc.cluster.local
EOF

openssl genrsa -out ca-extlis.key 4096
openssl req -x509 -new -nodes -key ca-extlis.key -days 3650 -config req.conf -out ca-extlis.pem
openssl genrsa -out extlis.key 4096
openssl req -new -key extlis.key -out extlis.csr -config req.conf
openssl x509 -req -in extlis.csr -CA ca-extlis.pem -CAkey ca-extlis.key -CAcreateserial -out extlis.crt -days 365

openssl genrsa -out ca.key 2048
openssl req -new -x509 -days 3650 -key ca.key -out ca.crt -subj "/C=IN/ST=WB/L=Kolkata/O=CloudCafe/CN=cluster.local"
kubectl create secret generic prod-cluster-cluster-ca-cert --from-file=ca.crt=ca.crt 
kubectl create secret generic prod-cluster-cluster-ca --from-file=ca.key=ca.key 
kubectl label secret prod-cluster-cluster-ca-cert strimzi.io/kind=Kafka strimzi.io/cluster=prod-cluster 
kubectl label secret prod-cluster-cluster-ca strimzi.io/kind=Kafka strimzi.io/cluster=prod-cluster 

kubectl create secret generic extlis --from-file=extlis.key --from-file=extlis.crt
apiVersion: kafka.strimzi.io/v1beta1
kind: Kafka
metadata:
  name: prod-cluster
spec:
  clusterCa:
    generateCertificateAuthority: false
  kafka:
    version: 2.5.0
    replicas: 1
    listeners:
      plain: {}
      tls:
        authentication:
          type: tls
      external:
        type: ingress
        authentication:
          type: scram-sha-512
        configuration:
          brokerCertChainAndKey:
            secretName: extlis
            certificate: extlis.crt
            key: extlis.key
          bootstrap:
            host: prod-cluster-bootstrap-streaming.10.128.0.24.nip.io
          brokers:
          - broker: 0
            host: prod-cluster-broker-0-streaming.10.128.0.24.nip.io
    authorization:
      type: simple
      superUsers:
        - CN=admin-in
        - admin-ex
    config:
      offsets.topic.replication.factor: 1
      transaction.state.log.replication.factor: 1
      transaction.state.log.min.isr: 1
      log.message.format.version: "2.5"
    storage:
      type: ephemeral
  zookeeper:
    replicas: 3
    storage:
      type: ephemeral
  entityOperator:
    topicOperator: {}
    userOperator: {}
apiVersion: kafka.strimzi.io/v1beta1
kind: KafkaUser
metadata:
  name: admin-ex
  labels:
    strimzi.io/cluster: prod-cluster
spec:
  authentication:
    type: scram-sha-51
---
apiVersion: kafka.strimzi.io/v1beta1
kind: KafkaUser
metadata:
  name: admin-in
  labels:
    strimzi.io/cluster: prod-cluster
spec:
  authentication:
    type: tls
kubectl get secret extlis -o jsonpath='{.data.extlis\.crt}' | base64 -d > ca.crt
kubectl get secret admin-in -o jsonpath='{.data.user\.crt}' | base64 --decode > user.crt
kubectl get secret admin-in -o jsonpath='{.data.user\.key}' | base64 --decode > user.key
kubectl get secret admin-in -o jsonpath='{.data.user\.p12}' | base64 --decode > user.p12
kubectl get secret admin-in -o jsonpath='{.data.user\.password}' | base64 --decode > user.password
USERPASSWORD=`cat user.password`

keytool -importkeystore -deststorepass CMwUVpKRi0l1RUqhjauRJh0x5vyUrNZ2 -destkeystore keystore.jks -srckeystore user.p12 -srcstorepass $USERPASSWORD -srcstoretype PKCS12
keytool -import -trustcacerts -alias root -file ca.crt -keystore truststore.jks -storepass CMwUVpKRi0l1RUqhjauRJh0x5vyUrNZ2 -noprompt
bootstrap.servers=prod-cluster-bootstrap-streaming.10.128.0.24.nip.io:443
security.protocol=SSL
ssl.truststore.password=CMwUVpKRi0l1RUqhjauRJh0x5vyUrNZ2
ssl.truststore.location=truststore.jks
ssl.keystore.location=keystore.jks
ssl.keystore.password=CMwUVpKRi0l1RUqhjauRJh0x5vyUrNZ2
ssl.key.password=rXX9yuDEyMYC
[root@k3s kafka]# oc get po,ing
Warning: extensions/v1beta1 Ingress is deprecated in v1.14+, unavailable in v1.22+; use networking.k8s.io/v1 Ingress
NAME                                                READY   STATUS        RESTARTS   AGE
pod/akhq-ddcfcfd8b-lx78x                            1/1     Terminating   0          54s
pod/akhq-ddcfcfd8b-x8n29                            1/1     Running       0          18s
pod/kafka-acl-viewer-7f5b7d95bd-zjn8d               1/1     Running       3          26m
pod/prod-cluster-entity-operator-756bdcfc78-584g9   3/3     Running       0          50m
pod/prod-cluster-kafka-0                            2/2     Running       0          2m35s
pod/prod-cluster-zookeeper-0                        1/1     Running       0          51m
pod/prod-cluster-zookeeper-1                        1/1     Running       0          51m
pod/prod-cluster-zookeeper-2                        1/1     Running       0          51m
pod/strimzi-cluster-operator-68b6d59f74-chckb       1/1     Running       0          53m

NAME                                              CLASS    HOSTS                                                 ADDRESS     PORTS     AGE
ingress.extensions/kafka-acl-viewer               <none>   kafkaview-streaming.35.239.239.46.nip.io              localhost   80        26m
ingress.extensions/kafka-management               <none>   kafkamanage-streaming.35.239.239.46.nip.io                        80        19s
ingress.extensions/prod-cluster-kafka-0           <none>   prod-cluster-broker-0-streaming.10.128.0.24.nip.io    localhost   80, 443   51m
ingress.extensions/prod-cluster-kafka-bootstrap   <none>   prod-cluster-bootstrap-streaming.10.128.0.24.nip.io   localhost   80, 443   51m

[root@k3s kafka]# oc logs -f pod/prod-cluster-kafka-0 kafka
+ ./kafka_pre_start.sh
Waiting for the TLS sidecar to get ready
TLS sidecar is not ready yet, waiting for another 1 second
TLS sidecar should be ready
++ hostname
++ awk -F- '{print $NF}'
STRIMZI_BROKER_ID=0
+ STRIMZI_BROKER_ID=0
+ export STRIMZI_BROKER_ID
+ echo STRIMZI_BROKER_ID=0
+ export GC_LOG_ENABLED=false
+ GC_LOG_ENABLED=false
+ '[' -z '' ']'
+ export KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:/opt/kafka/custom-config/log4j.properties
+ KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:/opt/kafka/custom-config/log4j.properties
+ rm -f /var/opt/kafka/kafka-ready /var/opt/kafka/zk-connected
++ ls /opt/kafka/libs/kafka-agent.jar
+ KAFKA_OPTS=' -javaagent:/opt/kafka/libs/kafka-agent.jar=/var/opt/kafka/kafka-ready:/var/opt/kafka/zk-connected'
+ export KAFKA_OPTS
+ '[' '' = true ']'
+ '[' -n '' ']'
+ KAFKA_OPTS=' -javaagent:/opt/kafka/libs/kafka-agent.jar=/var/opt/kafka/kafka-ready:/var/opt/kafka/zk-connected '
+ '[' false = true ']'
+ export LOG_DIR=/opt/kafka
+ LOG_DIR=/opt/kafka
++ tr -dc _A-Z-a-z-0-9
++ head -c32
+ CERTS_STORE_PASSWORD=7hqt1D-cP58OSk9CRVhrYN3qSEbpd6Mb
+ export CERTS_STORE_PASSWORD
+ mkdir -p /tmp/kafka
+ ./kafka_tls_prepare_certificates.sh
Preparing truststore for replication listener
Adding /opt/kafka/cluster-ca-certs/ca.crt to truststore /tmp/kafka/cluster.truststore.p12 with alias ca
Certificate was added to keystore
Preparing truststore for replication listener is complete
Looking for the right CA
Found the right CA: /opt/kafka/cluster-ca-certs/ca.crt
Preparing keystore for replication and clienttls listener
Preparing keystore for replication and clienttls listener is complete
Preparing custom keystore for external listener
Preparing custom keystore for external listener is complete
Preparing truststore for clienttls listener
Adding /opt/kafka/client-ca-certs/ca.crt to truststore /tmp/kafka/clients.truststore.p12 with alias ca
Certificate was added to keystore
Preparing truststore for clienttls listener is complete
+ echo 'Starting Kafka with configuration:'
Starting Kafka with configuration:
+ ./kafka_config_generator.sh
+ sed -e 's/sasl.jaas.config=.*/sasl.jaas.config=[hidden]/g' -e 's/password=.*/password=[hidden]/g'
+ tee /tmp/strimzi.properties
##############################
##############################
# This file is automatically generated by the Strimzi Cluster Operator
# Any changes to this file will be ignored and overwritten!
##############################
##############################

##########
# Broker ID
##########
broker.id=0

##########
# Zookeeper
##########
zookeeper.connect=localhost:2181

##########
# Kafka message logs configuration
##########
log.dirs=/var/lib/kafka/data/kafka-log0

##########
# Replication listener
##########
listener.name.replication-9091.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12
listener.name.replication-9091.ssl.keystore.password=[hidden]
listener.name.replication-9091.ssl.keystore.type=PKCS12
listener.name.replication-9091.ssl.truststore.location=/tmp/kafka/cluster.truststore.p12
listener.name.replication-9091.ssl.truststore.password=[hidden]
listener.name.replication-9091.ssl.truststore.type=PKCS12
listener.name.replication-9091.ssl.client.auth=required

##########
# Plain listener
##########

##########
# TLS listener
##########
listener.name.tls-9093.ssl.client.auth=required
listener.name.tls-9093.ssl.truststore.location=/tmp/kafka/clients.truststore.p12
listener.name.tls-9093.ssl.truststore.password=[hidden]
listener.name.tls-9093.ssl.truststore.type=PKCS12

listener.name.tls-9093.ssl.keystore.location=/tmp/kafka/cluster.keystore.p12
listener.name.tls-9093.ssl.keystore.password=[hidden]
listener.name.tls-9093.ssl.keystore.type=PKCS12

##########
# External listener
##########
listener.name.external-9094.scram-sha-512.sasl.jaas.config=[hidden]
listener.name.external-9094.sasl.enabled.mechanisms=SCRAM-SHA-512

listener.name.external-9094.ssl.keystore.location=/tmp/kafka/custom-external-9094.keystore.p12
listener.name.external-9094.ssl.keystore.password=[hidden]
listener.name.external-9094.ssl.keystore.type=PKCS12

##########
# Common listener configuration
##########
listeners=REPLICATION-9091://0.0.0.0:9091,PLAIN-9092://0.0.0.0:9092,TLS-9093://0.0.0.0:9093,EXTERNAL-9094://0.0.0.0:9094
advertised.listeners=REPLICATION-9091://prod-cluster-kafka-0.prod-cluster-kafka-brokers.streaming.svc:9091,PLAIN-9092://prod-cluster-kafka-0.prod-cluster-kafka-brokers.streaming.svc:9092,TLS-9093://prod-cluster-kafka-0.prod-cluster-kafka-brokers.streaming.svc:9093,EXTERNAL-9094://prod-cluster-broker-0-streaming.10.128.0.24.nip.io:443
listener.security.protocol.map=REPLICATION-9091:SSL,PLAIN-9092:PLAINTEXT,TLS-9093:SSL,EXTERNAL-9094:SASL_SSL
inter.broker.listener.name=REPLICATION-9091
sasl.enabled.mechanisms=
ssl.secure.random.implementation=SHA1PRNG
ssl.endpoint.identification.algorithm=HTTPS

##########
# Authorization
##########
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
super.users=User:CN=prod-cluster-kafka,O=io.strimzi;User:CN=prod-cluster-entity-operator,O=io.strimzi;User:CN=prod-cluster-kafka-exporter,O=io.strimzi;User:CN=prod-cluster-cruise-control,O=io.strimzi;User:CN=cluster-operator,O=io.strimzi;User:CN=admin-in;User:admin-ex

##########
# User provided configuration
##########
log.message.format.version=2.5
offsets.topic.replication.factor=1
transaction.state.log.min.isr=1
+ echo ''
+ '[' -z -Xms128M ']'
+ . ./set_kafka_gc_options.sh
++ set -e
++ '[' false == true ']'
++ export 'KAFKA_GC_LOG_OPTS= '
++ KAFKA_GC_LOG_OPTS=' '
++ export GC_LOG_ENABLED=false
++ GC_LOG_ENABLED=false
+ exec /usr/bin/tini -w -e 143 -- /opt/kafka/bin/kafka-server-start.sh /tmp/strimzi.properties
transaction.state.log.replication.factor=1
2020-10-15 09:48:37,741 INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$) [main]
2020-10-15 09:48:39,368 INFO Setting -D jdk.tls.rejectClientInitiatedRenegotiation=true to disable client-initiated TLS renegotiation (org.apache.zookeeper.common.X509Util) [main]
2020-10-15 09:48:39,633 INFO Registered signal handlers for TERM, INT, HUP (org.apache.kafka.common.utils.LoggingSignalHandler) [main]
2020-10-15 09:48:39,641 INFO starting (kafka.server.KafkaServer) [main]
2020-10-15 09:48:39,642 INFO Connecting to zookeeper on localhost:2181 (kafka.server.KafkaServer) [main]
2020-10-15 09:48:39,682 INFO [ZooKeeperClient Kafka server] Initializing a new session to localhost:2181. (kafka.zookeeper.ZooKeeperClient) [main]
2020-10-15 09:48:39,696 INFO Client environment:zookeeper.version=3.5.7-f0fdd52973d373ffd9c86b81d99842dc2c7f660e, built on 02/10/2020 11:30 GMT (org.apache.zookeeper.ZooKeeper) [main]
2020-10-15 09:48:39,696 INFO Client environment:host.name=prod-cluster-kafka-0.prod-cluster-kafka-brokers.streaming.svc.cluster.local (org.apache.zookeeper.ZooKeeper) [main]
2020-10-15 09:48:39,696 INFO Client environment:java.version=11.0.7 (org.apache.zookeeper.ZooKeeper) [main]
2020-10-15 09:48:39,696 INFO Client environment:java.vendor=Oracle Corporation (org.apache.zookeeper.ZooKeeper) [main]
2020-10-15 09:48:39,696 INFO Client environment:java.home=/usr/lib/jvm/java-11-openjdk-11.0.7.10-4.el7_8.x86_64 (org.apache.zookeeper.ZooKeeper) [main]
2020-10-15 09:48:39,696 INFO Client environment:java.class.path=/opt/kafka/bin/../libs/activation-1.1.1.jar:/opt/kafka/bin/../libs/annotations-13.0.jar:/opt/kafka/bin/../libs/aopalliance-repackaged-2.5.0.jar:/opt/kafka/bin/../libs/argparse4j-0.7.0.jar:/opt/kafka/bin/../libs/audience-annotations-0.5.0.jar:/opt/kafka/bin/../libs/bcpkix-jdk15on-1.62.jar:/opt/kafka/bin/../libs/bcprov-jdk15on-1.60.jar:/opt/kafka/bin/../libs/commons-cli-1.4.jar:/opt/kafka/bin/../libs/commons-lang-2.6.jar:/opt/kafka/bin/../libs/commons-lang3-3.8.1.jar:/opt/kafka/bin/../libs/connect-api-2.5.0.jar:/opt/kafka/bin/../libs/connect-basic-auth-extension-2.5.0.jar:/opt/kafka/bin/../libs/connect-file-2.5.0.jar:/opt/kafka/bin/../libs/connect-json-2.5.0.jar:/opt/kafka/bin/../libs/connect-mirror-2.5.0.jar:/opt/kafka/bin/../libs/connect-mirror-client-2.5.0.jar:/opt/kafka/bin/../libs/connect-runtime-2.5.0.jar:/opt/kafka/bin/../libs/connect-transforms-2.5.0.jar:/opt/kafka/bin/../libs/cruise-control-metrics-reporter-2.0.108.jar:/opt/kafka/bin/../libs/gson-2.8.6.jar:/opt/kafka/bin/../libs/guava-20.0.jar:/opt/kafka/bin/../libs/hk2-api-2.5.0.jar:/opt/kafka/bin/../libs/hk2-locator-2.5.0.jar:/opt/kafka/bin/../libs/hk2-utils-2.5.0.jar:/opt/kafka/bin/../libs/jackson-annotations-2.10.2.jar:/opt/kafka/bin/../libs/jackson-core-2.10.2.jar:/opt/kafka/bin/../libs/jackson-databind-2.10.2.jar:/opt/kafka/bin/../libs/jackson-dataformat-csv-2.10.2.jar:/opt/kafka/bin/../libs/jackson-datatype-jdk8-2.10.2.jar:/opt/kafka/bin/../libs/jackson-jaxrs-base-2.10.2.jar:/opt/kafka/bin/../libs/jackson-jaxrs-json-provider-2.10.2.jar:/opt/kafka/bin/../libs/jackson-module-jaxb-annotations-2.10.2.jar:/opt/kafka/bin/../libs/jackson-module-paranamer-2.10.2.jar:/opt/kafka/bin/../libs/jackson-module-scala_2.12-2.10.2.jar:/opt/kafka/bin/../libs/jaeger-client-1.1.0.jar:/opt/kafka/bin/../libs/jaeger-core-1.1.0.jar:/opt/kafka/bin/../libs/jaeger-thrift-1.1.0.jar:/opt/kafka/bin/../libs/jaeger-tracerresolver-1.1.0.jar:/opt/kafka/bin/../libs/jakarta.activation-api-1.2.1.jar:/opt/kafka/bin/../libs/jakarta.annotation-api-1.3.4.jar:/opt/kafka/bin/../libs/jakarta.inject-2.5.0.jar:/opt/kafka/bin/../libs/jakarta.ws.rs-api-2.1.5.jar:/opt/kafka/bin/../libs/jakarta.xml.bind-api-2.3.2.jar:/opt/kafka/bin/../libs/javassist-3.22.0-CR2.jar:/opt/kafka/bin/../libs/javassist-3.26.0-GA.jar:/opt/kafka/bin/../libs/javax.servlet-api-3.1.0.jar:/opt/kafka/bin/../libs/javax.ws.rs-api-2.1.1.jar:/opt/kafka/bin/../libs/jaxb-api-2.3.0.jar:/opt/kafka/bin/../libs/jersey-client-2.28.jar:/opt/kafka/bin/../libs/jersey-common-2.28.jar:/opt/kafka/bin/../libs/jersey-container-servlet-2.28.jar:/opt/kafka/bin/../libs/jersey-container-servlet-core-2.28.jar:/opt/kafka/bin/../libs/jersey-hk2-2.28.jar:/opt/kafka/bin/../libs/jersey-media-jaxb-2.28.jar:/opt/kafka/bin/../libs/jersey-server-2.28.jar:/opt/kafka/bin/../libs/jetty-client-9.4.24.v20191120.jar:/opt/kafka/bin/../libs/jetty-continuation-9.4.24.v20191120.jar:/opt/kafka/bin/../libs/jetty-http-9.4.24.v20191120.jar:/opt/kafka/bin/../libs/jetty-io-9.4.24.v20191120.jar:/opt/kafka/bin/../libs/jetty-security-9.4.24.v20191120.jar:/opt/kafka/bin/../libs/jetty-server-9.4.24.v20191120.jar:/opt/kafka/bin/../libs/jetty-servlet-9.4.24.v20191120.jar:/opt/kafka/bin/../libs/jetty-servlets-9.4.24.v20191120.jar:/opt/kafka/bin/../libs/jetty-util-9.4.24.v20191120.jar:/opt/kafka/bin/../libs/jmx_prometheus_javaagent-0.12.0.jar:/opt/kafka/bin/../libs/jopt-simple-5.0.4.jar:/opt/kafka/bin/../libs/json-smart-1.1.1.jar:/opt/kafka/bin/../libs/jsonevent-layout-1.7.jar:/opt/kafka/bin/../libs/jsr305-3.0.2.jar:/opt/kafka/bin/../libs/kafka-agent.jar:/opt/kafka/bin/../libs/kafka-clients-2.5.0.jar:/opt/kafka/bin/../libs/kafka-log4j-appender-2.5.0.jar:/opt/kafka/bin/../libs/kafka-oauth-client-0.5.0.jar:/opt/kafka/bin/../libs/kafka-oauth-common-0.5.0.jar:/opt/kafka/bin/../libs/kafka-oauth-keycloak-authorizer-0.5.0.jar:/opt/kafka/bin/../libs/kafka-oauth-server-0.5.0.jar:/opt/kafka/bin/../libs/kafka-streams-2.5.0.jar:/opt/kafka/bin/../libs/kafka-streams-examples-2.5.0.jar:/opt/kafka/bin/../libs/kafka-streams-scala_2.12-2.5.0.jar:/opt/kafka/bin/../libs/kafka-streams-test-utils-2.5.0.jar:/opt/kafka/bin/../libs/kafka-tools-2.5.0.jar:/opt/kafka/bin/../libs/kafka_2.12-2.5.0-sources.jar:/opt/kafka/bin/../libs/kafka_2.12-2.5.0.jar:/opt/kafka/bin/../libs/keycloak-common-10.0.0.jar:/opt/kafka/bin/../libs/keycloak-core-10.0.0.jar:/opt/kafka/bin/../libs/kotlin-stdlib-1.3.50.jar:/opt/kafka/bin/../libs/kotlin-stdlib-common-1.3.50.jar:/opt/kafka/bin/../libs/libthrift-0.13.0.jar:/opt/kafka/bin/../libs/log4j-1.2.17.jar:/opt/kafka/bin/../libs/lz4-java-1.7.1.jar:/opt/kafka/bin/../libs/maven-artifact-3.6.3.jar:/opt/kafka/bin/../libs/metrics-core-2.2.0.jar:/opt/kafka/bin/../libs/mirror-maker-agent.jar:/opt/kafka/bin/../libs/netty-buffer-4.1.45.Final.jar:/opt/kafka/bin/../libs/netty-codec-4.1.45.Final.jar:/opt/kafka/bin/../libs/netty-common-4.1.45.Final.jar:/opt/kafka/bin/../libs/netty-handler-4.1.45.Final.jar:/opt/kafka/bin/../libs/netty-resolver-4.1.45.Final.jar:/opt/kafka/bin/../libs/netty-transport-4.1.45.Final.jar:/opt/kafka/bin/../libs/netty-transport-native-epoll-4.1.45.Final.jar:/opt/kafka/bin/../libs/netty-transport-native-unix-common-4.1.45.Final.jar:/opt/kafka/bin/../libs/okhttp-4.2.2.jar:/opt/kafka/bin/../libs/okio-2.2.2.jar:/opt/kafka/bin/../libs/opa-authorizer-0.4.1.jar:/opt/kafka/bin/../libs/opentracing-api-0.33.0.jar:/opt/kafka/bin/../libs/opentracing-kafka-client-0.1.12.jar:/opt/kafka/bin/../libs/opentracing-noop-0.33.0.jar:/opt/kafka/bin/../libs/opentracing-tracerresolver-0.1.8.jar:/opt/kafka/bin/../libs/opentracing-util-0.33.0.jar:/opt/kafka/bin/../libs/osgi-resource-locator-1.0.1.jar:/opt/kafka/bin/../libs/paranamer-2.8.jar:/opt/kafka/bin/../libs/plexus-utils-3.2.1.jar:/opt/kafka/bin/../libs/reflections-0.9.12.jar:/opt/kafka/bin/../libs/rocksdbjni-5.18.3.jar:/opt/kafka/bin/../libs/scala-collection-compat_2.12-2.1.3.jar:/opt/kafka/bin/../libs/scala-java8-compat_2.12-0.9.0.jar:/opt/kafka/bin/../libs/scala-library-2.12.10.jar:/opt/kafka/bin/../libs/scala-logging_2.12-3.9.2.jar:/opt/kafka/bin/../libs/scala-reflect-2.12.10.jar:/opt/kafka/bin/../libs/slf4j-api-1.7.30.jar:/opt/kafka/bin/../libs/slf4j-log4j12-1.7.30.jar:/opt/kafka/bin/../libs/snappy-java-1.1.7.3.jar:/opt/kafka/bin/../libs/tracing-agent.jar:/opt/kafka/bin/../libs/validation-api-2.0.1.Final.jar:/opt/kafka/bin/../libs/zookeeper-3.5.7.jar:/opt/kafka/bin/../libs/zookeeper-jute-3.5.7.jar:/opt/kafka/bin/../libs/zstd-jni-1.4.4-7.jar (org.apache.zookeeper.ZooKeeper) [main]
2020-10-15 09:48:39,697 INFO Client environment:java.library.path=/usr/java/packages/lib:/usr/lib64:/lib64:/lib:/usr/lib (org.apache.zookeeper.ZooKeeper) [main]
2020-10-15 09:48:39,697 INFO Client environment:java.io.tmpdir=/tmp (org.apache.zookeeper.ZooKeeper) [main]
2020-10-15 09:48:39,697 INFO Client environment:java.compiler=<NA> (org.apache.zookeeper.ZooKeeper) [main]
2020-10-15 09:48:39,697 INFO Client environment:os.name=Linux (org.apache.zookeeper.ZooKeeper) [main]
2020-10-15 09:48:39,697 INFO Client environment:os.arch=amd64 (org.apache.zookeeper.ZooKeeper) [main]
2020-10-15 09:48:39,697 INFO Client environment:os.version=3.10.0-1127.19.1.el7.x86_64 (org.apache.zookeeper.ZooKeeper) [main]
2020-10-15 09:48:39,697 INFO Client environment:user.name=kafka (org.apache.zookeeper.ZooKeeper) [main]
2020-10-15 09:48:39,697 INFO Client environment:user.home=/home/kafka (org.apache.zookeeper.ZooKeeper) [main]
2020-10-15 09:48:39,697 INFO Client environment:user.dir=/opt/kafka (org.apache.zookeeper.ZooKeeper) [main]
2020-10-15 09:48:39,698 INFO Client environment:os.memory.free=242MB (org.apache.zookeeper.ZooKeeper) [main]
2020-10-15 09:48:39,698 INFO Client environment:os.memory.max=1830MB (org.apache.zookeeper.ZooKeeper) [main]
2020-10-15 09:48:39,698 INFO Client environment:os.memory.total=260MB (org.apache.zookeeper.ZooKeeper) [main]
2020-10-15 09:48:39,702 INFO Initiating client connection, connectString=localhost:2181 sessionTimeout=18000 watcher=kafka.zookeeper.ZooKeeperClient$ZooKeeperClientWatcher$@2462cb01 (org.apache.zookeeper.ZooKeeper) [main]
2020-10-15 09:48:39,710 INFO jute.maxbuffer value is 4194304 Bytes (org.apache.zookeeper.ClientCnxnSocket) [main]
2020-10-15 09:48:39,724 INFO zookeeper.request.timeout value is 0. feature enabled= (org.apache.zookeeper.ClientCnxn) [main]
2020-10-15 09:48:39,726 INFO Starting poller (io.strimzi.kafka.agent.KafkaAgent) [main]
2020-10-15 09:48:39,734 INFO [ZooKeeperClient Kafka server] Waiting until connected. (kafka.zookeeper.ZooKeeperClient) [main]
2020-10-15 09:48:39,744 INFO Opening socket connection to server localhost/0:0:0:0:0:0:0:1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) [main-SendThread(localhost:2181)]
2020-10-15 09:48:39,756 INFO Socket error occurred: localhost/0:0:0:0:0:0:0:1:2181: Connection refused (org.apache.zookeeper.ClientCnxn) [main-SendThread(localhost:2181)]
2020-10-15 09:48:40,859 INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) [main-SendThread(localhost:2181)]
2020-10-15 09:48:40,860 INFO Socket connection established, initiating session, client: /127.0.0.1:43500, server: localhost/127.0.0.1:2181 (org.apache.zookeeper.ClientCnxn) [main-SendThread(localhost:2181)]
2020-10-15 09:48:40,959 INFO Session establishment complete on server localhost/127.0.0.1:2181, sessionid = 0x20000107f66000d, negotiated timeout = 18000 (org.apache.zookeeper.ClientCnxn) [main-SendThread(localhost:2181)]
2020-10-15 09:48:40,968 INFO [ZooKeeperClient Kafka server] Connected. (kafka.zookeeper.ZooKeeperClient) [main]
2020-10-15 09:48:41,777 INFO Cluster ID = W9_E0VBTQnqVIIMYOYdsqQ (kafka.server.KafkaServer) [main]
2020-10-15 09:48:41,785 WARN No meta.properties file under dir /var/lib/kafka/data/kafka-log0/meta.properties (kafka.server.BrokerMetadataCheckpoint) [main]
2020-10-15 09:48:41,965 INFO KafkaConfig values:
        advertised.host.name = null
        advertised.listeners = REPLICATION-9091://prod-cluster-kafka-0.prod-cluster-kafka-brokers.streaming.svc:9091,PLAIN-9092://prod-cluster-kafka-0.prod-cluster-kafka-brokers.streaming.svc:9092,TLS-9093://prod-cluster-kafka-0.prod-cluster-kafka-brokers.streaming.svc:9093,EXTERNAL-9094://prod-cluster-broker-0-streaming.10.128.0.24.nip.io:443
        advertised.port = null
        alter.config.policy.class.name = null
        alter.log.dirs.replication.quota.window.num = 11
        alter.log.dirs.replication.quota.window.size.seconds = 1
        authorizer.class.name = kafka.security.auth.SimpleAclAuthorizer
        auto.create.topics.enable = true
        auto.leader.rebalance.enable = true
        background.threads = 10
        broker.id = 0
        broker.id.generation.enable = true
        broker.rack = null
        client.quota.callback.class = null
        compression.type = producer
        connection.failed.authentication.delay.ms = 100
        connections.max.idle.ms = 600000
        connections.max.reauth.ms = 0
        control.plane.listener.name = null
        controlled.shutdown.enable = true
        controlled.shutdown.max.retries = 3
        controlled.shutdown.retry.backoff.ms = 5000
        controller.socket.timeout.ms = 30000
        create.topic.policy.class.name = null
        default.replication.factor = 1
        delegation.token.expiry.check.interval.ms = 3600000
        delegation.token.expiry.time.ms = 86400000
        delegation.token.master.key = null
        delegation.token.max.lifetime.ms = 604800000
        delete.records.purgatory.purge.interval.requests = 1
        delete.topic.enable = true
        fetch.max.bytes = 57671680
        fetch.purgatory.purge.interval.requests = 1000
        group.initial.rebalance.delay.ms = 3000
        group.max.session.timeout.ms = 1800000
        group.max.size = 2147483647
        group.min.session.timeout.ms = 6000
        host.name =
        inter.broker.listener.name = REPLICATION-9091
        inter.broker.protocol.version = 2.5-IV0
        kafka.metrics.polling.interval.secs = 10
        kafka.metrics.reporters = []
        leader.imbalance.check.interval.seconds = 300
        leader.imbalance.per.broker.percentage = 10
        listener.security.protocol.map = REPLICATION-9091:SSL,PLAIN-9092:PLAINTEXT,TLS-9093:SSL,EXTERNAL-9094:SASL_SSL
        listeners = REPLICATION-9091://0.0.0.0:9091,PLAIN-9092://0.0.0.0:9092,TLS-9093://0.0.0.0:9093,EXTERNAL-9094://0.0.0.0:9094
        log.cleaner.backoff.ms = 15000
        log.cleaner.dedupe.buffer.size = 134217728
        log.cleaner.delete.retention.ms = 86400000
        log.cleaner.enable = true
        log.cleaner.io.buffer.load.factor = 0.9
        log.cleaner.io.buffer.size = 524288
        log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
        log.cleaner.max.compaction.lag.ms = 9223372036854775807
        log.cleaner.min.cleanable.ratio = 0.5
        log.cleaner.min.compaction.lag.ms = 0
        log.cleaner.threads = 1
        log.cleanup.policy = [delete]
        log.dir = /tmp/kafka-logs
        log.dirs = /var/lib/kafka/data/kafka-log0
        log.flush.interval.messages = 9223372036854775807
        log.flush.interval.ms = null
        log.flush.offset.checkpoint.interval.ms = 60000
        log.flush.scheduler.interval.ms = 9223372036854775807
        log.flush.start.offset.checkpoint.interval.ms = 60000
        log.index.interval.bytes = 4096
        log.index.size.max.bytes = 10485760
        log.message.downconversion.enable = true
        log.message.format.version = 2.5
        log.message.timestamp.difference.max.ms = 9223372036854775807
        log.message.timestamp.type = CreateTime
        log.preallocate = false
        log.retention.bytes = -1
        log.retention.check.interval.ms = 300000
        log.retention.hours = 168
        log.retention.minutes = null
        log.retention.ms = null
        log.roll.hours = 168
        log.roll.jitter.hours = 0
        log.roll.jitter.ms = null
        log.roll.ms = null
        log.segment.bytes = 1073741824
        log.segment.delete.delay.ms = 60000
        max.connections = 2147483647
        max.connections.per.ip = 2147483647
        max.connections.per.ip.overrides =
        max.incremental.fetch.session.cache.slots = 1000
        message.max.bytes = 1048588
        metric.reporters = []
        metrics.num.samples = 2
        metrics.recording.level = INFO
        metrics.sample.window.ms = 30000
        min.insync.replicas = 1
        num.io.threads = 8
        num.network.threads = 3
        num.partitions = 1
        num.recovery.threads.per.data.dir = 1
        num.replica.alter.log.dirs.threads = null
        num.replica.fetchers = 1
        offset.metadata.max.bytes = 4096
        offsets.commit.required.acks = -1
        offsets.commit.timeout.ms = 5000
        offsets.load.buffer.size = 5242880
        offsets.retention.check.interval.ms = 600000
        offsets.retention.minutes = 10080
        offsets.topic.compression.codec = 0
        offsets.topic.num.partitions = 50
        offsets.topic.replication.factor = 1
        offsets.topic.segment.bytes = 104857600
        password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding
        password.encoder.iterations = 4096
        password.encoder.key.length = 128
        password.encoder.keyfactory.algorithm = null
        password.encoder.old.secret = null
        password.encoder.secret = null
        port = 9092
        principal.builder.class = null
        producer.purgatory.purge.interval.requests = 1000
        queued.max.request.bytes = -1
        queued.max.requests = 500
        quota.consumer.default = 9223372036854775807
        quota.producer.default = 9223372036854775807
        quota.window.num = 11
        quota.window.size.seconds = 1
        replica.fetch.backoff.ms = 1000
        replica.fetch.max.bytes = 1048576
        replica.fetch.min.bytes = 1
        replica.fetch.response.max.bytes = 10485760
        replica.fetch.wait.max.ms = 500
        replica.high.watermark.checkpoint.interval.ms = 5000
        replica.lag.time.max.ms = 30000
        replica.selector.class = null
        replica.socket.receive.buffer.bytes = 65536
        replica.socket.timeout.ms = 30000
        replication.quota.window.num = 11
        replication.quota.window.size.seconds = 1
        request.timeout.ms = 30000
        reserved.broker.max.id = 1000
        sasl.client.callback.handler.class = null
        sasl.enabled.mechanisms = []
        sasl.jaas.config = null
        sasl.kerberos.kinit.cmd = /usr/bin/kinit
        sasl.kerberos.min.time.before.relogin = 60000
        sasl.kerberos.principal.to.local.rules = [DEFAULT]
        sasl.kerberos.service.name = null
        sasl.kerberos.ticket.renew.jitter = 0.05
        sasl.kerberos.ticket.renew.window.factor = 0.8
        sasl.login.callback.handler.class = null
        sasl.login.class = null
        sasl.login.refresh.buffer.seconds = 300
        sasl.login.refresh.min.period.seconds = 60
        sasl.login.refresh.window.factor = 0.8
        sasl.login.refresh.window.jitter = 0.05
        sasl.mechanism.inter.broker.protocol = GSSAPI
        sasl.server.callback.handler.class = null
        security.inter.broker.protocol = PLAINTEXT
        security.providers = null
        socket.receive.buffer.bytes = 102400
        socket.request.max.bytes = 104857600
        socket.send.buffer.bytes = 102400
        ssl.cipher.suites = []
        ssl.client.auth = none
        ssl.enabled.protocols = [TLSv1.2]
        ssl.endpoint.identification.algorithm = HTTPS
        ssl.key.password = null
        ssl.keymanager.algorithm = SunX509
        ssl.keystore.location = null
        ssl.keystore.password = null
        ssl.keystore.type = JKS
        ssl.principal.mapping.rules = DEFAULT
        ssl.protocol = TLSv1.2
        ssl.provider = null
        ssl.secure.random.implementation = SHA1PRNG
        ssl.trustmanager.algorithm = PKIX
        ssl.truststore.location = null
        ssl.truststore.password = null
        ssl.truststore.type = JKS
        transaction.abort.timed.out.transaction.cleanup.interval.ms = 10000
        transaction.max.timeout.ms = 900000
        transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
        transaction.state.log.load.buffer.size = 5242880
        transaction.state.log.min.isr = 1
        transaction.state.log.num.partitions = 50
        transaction.state.log.replication.factor = 1
        transaction.state.log.segment.bytes = 104857600
        transactional.id.expiration.ms = 604800000
        unclean.leader.election.enable = false
        zookeeper.clientCnxnSocket = null
        zookeeper.connect = localhost:2181
        zookeeper.connection.timeout.ms = null
        zookeeper.max.in.flight.requests = 10
        zookeeper.session.timeout.ms = 18000
        zookeeper.set.acl = false
        zookeeper.ssl.cipher.suites = null
        zookeeper.ssl.client.enable = false
        zookeeper.ssl.crl.enable = false
        zookeeper.ssl.enabled.protocols = null
        zookeeper.ssl.endpoint.identification.algorithm = HTTPS
        zookeeper.ssl.keystore.location = null
        zookeeper.ssl.keystore.password = null
        zookeeper.ssl.keystore.type = null
        zookeeper.ssl.ocsp.enable = false
        zookeeper.ssl.protocol = TLSv1.2
        zookeeper.ssl.truststore.location = null
        zookeeper.ssl.truststore.password = null
        zookeeper.ssl.truststore.type = null
        zookeeper.sync.time.ms = 2000
 (kafka.server.KafkaConfig) [main]
2020-10-15 09:48:41,996 INFO KafkaConfig values:
        advertised.host.name = null
        advertised.listeners = REPLICATION-9091://prod-cluster-kafka-0.prod-cluster-kafka-brokers.streaming.svc:9091,PLAIN-9092://prod-cluster-kafka-0.prod-cluster-kafka-brokers.streaming.svc:9092,TLS-9093://prod-cluster-kafka-0.prod-cluster-kafka-brokers.streaming.svc:9093,EXTERNAL-9094://prod-cluster-broker-0-streaming.10.128.0.24.nip.io:443
        advertised.port = null
        alter.config.policy.class.name = null
        alter.log.dirs.replication.quota.window.num = 11
        alter.log.dirs.replication.quota.window.size.seconds = 1
        authorizer.class.name = kafka.security.auth.SimpleAclAuthorizer
        auto.create.topics.enable = true
        auto.leader.rebalance.enable = true
        background.threads = 10
        broker.id = 0
        broker.id.generation.enable = true
        broker.rack = null
        client.quota.callback.class = null
        compression.type = producer
        connection.failed.authentication.delay.ms = 100
        connections.max.idle.ms = 600000
        connections.max.reauth.ms = 0
        control.plane.listener.name = null
        controlled.shutdown.enable = true
        controlled.shutdown.max.retries = 3
        controlled.shutdown.retry.backoff.ms = 5000
        controller.socket.timeout.ms = 30000
        create.topic.policy.class.name = null
        default.replication.factor = 1
        delegation.token.expiry.check.interval.ms = 3600000
        delegation.token.expiry.time.ms = 86400000
        delegation.token.master.key = null
        delegation.token.max.lifetime.ms = 604800000
        delete.records.purgatory.purge.interval.requests = 1
        delete.topic.enable = true
        fetch.max.bytes = 57671680
        fetch.purgatory.purge.interval.requests = 1000
        group.initial.rebalance.delay.ms = 3000
        group.max.session.timeout.ms = 1800000
        group.max.size = 2147483647
        group.min.session.timeout.ms = 6000
        host.name =
        inter.broker.listener.name = REPLICATION-9091
        inter.broker.protocol.version = 2.5-IV0
        kafka.metrics.polling.interval.secs = 10
        kafka.metrics.reporters = []
        leader.imbalance.check.interval.seconds = 300
        leader.imbalance.per.broker.percentage = 10
        listener.security.protocol.map = REPLICATION-9091:SSL,PLAIN-9092:PLAINTEXT,TLS-9093:SSL,EXTERNAL-9094:SASL_SSL
        listeners = REPLICATION-9091://0.0.0.0:9091,PLAIN-9092://0.0.0.0:9092,TLS-9093://0.0.0.0:9093,EXTERNAL-9094://0.0.0.0:9094
        log.cleaner.backoff.ms = 15000
        log.cleaner.dedupe.buffer.size = 134217728
        log.cleaner.delete.retention.ms = 86400000
        log.cleaner.enable = true
        log.cleaner.io.buffer.load.factor = 0.9
        log.cleaner.io.buffer.size = 524288
        log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
        log.cleaner.max.compaction.lag.ms = 9223372036854775807
        log.cleaner.min.cleanable.ratio = 0.5
        log.cleaner.min.compaction.lag.ms = 0
        log.cleaner.threads = 1
        log.cleanup.policy = [delete]
        log.dir = /tmp/kafka-logs
        log.dirs = /var/lib/kafka/data/kafka-log0
        log.flush.interval.messages = 9223372036854775807
        log.flush.interval.ms = null
        log.flush.offset.checkpoint.interval.ms = 60000
        log.flush.scheduler.interval.ms = 9223372036854775807
        log.flush.start.offset.checkpoint.interval.ms = 60000
        log.index.interval.bytes = 4096
        log.index.size.max.bytes = 10485760
        log.message.downconversion.enable = true
        log.message.format.version = 2.5
        log.message.timestamp.difference.max.ms = 9223372036854775807
        log.message.timestamp.type = CreateTime
        log.preallocate = false
        log.retention.bytes = -1
        log.retention.check.interval.ms = 300000
        log.retention.hours = 168
        log.retention.minutes = null
        log.retention.ms = null
        log.roll.hours = 168
        log.roll.jitter.hours = 0
        log.roll.jitter.ms = null
        log.roll.ms = null
        log.segment.bytes = 1073741824
        log.segment.delete.delay.ms = 60000
        max.connections = 2147483647
        max.connections.per.ip = 2147483647
        max.connections.per.ip.overrides =
        max.incremental.fetch.session.cache.slots = 1000
        message.max.bytes = 1048588
        metric.reporters = []
        metrics.num.samples = 2
        metrics.recording.level = INFO
        metrics.sample.window.ms = 30000
        min.insync.replicas = 1
        num.io.threads = 8
        num.network.threads = 3
        num.partitions = 1
        num.recovery.threads.per.data.dir = 1
        num.replica.alter.log.dirs.threads = null
        num.replica.fetchers = 1
        offset.metadata.max.bytes = 4096
        offsets.commit.required.acks = -1
        offsets.commit.timeout.ms = 5000
        offsets.load.buffer.size = 5242880
        offsets.retention.check.interval.ms = 600000
        offsets.retention.minutes = 10080
        offsets.topic.compression.codec = 0
        offsets.topic.num.partitions = 50
        offsets.topic.replication.factor = 1
        offsets.topic.segment.bytes = 104857600
        password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding
        password.encoder.iterations = 4096
        password.encoder.key.length = 128
        password.encoder.keyfactory.algorithm = null
        password.encoder.old.secret = null
        password.encoder.secret = null
        port = 9092
        principal.builder.class = null
        producer.purgatory.purge.interval.requests = 1000
        queued.max.request.bytes = -1
        queued.max.requests = 500
        quota.consumer.default = 9223372036854775807
        quota.producer.default = 9223372036854775807
        quota.window.num = 11
        quota.window.size.seconds = 1
        replica.fetch.backoff.ms = 1000
        replica.fetch.max.bytes = 1048576
        replica.fetch.min.bytes = 1
        replica.fetch.response.max.bytes = 10485760
        replica.fetch.wait.max.ms = 500
        replica.high.watermark.checkpoint.interval.ms = 5000
        replica.lag.time.max.ms = 30000
        replica.selector.class = null
        replica.socket.receive.buffer.bytes = 65536
        replica.socket.timeout.ms = 30000
        replication.quota.window.num = 11
        replication.quota.window.size.seconds = 1
        request.timeout.ms = 30000
        reserved.broker.max.id = 1000
        sasl.client.callback.handler.class = null
        sasl.enabled.mechanisms = []
        sasl.jaas.config = null
        sasl.kerberos.kinit.cmd = /usr/bin/kinit
        sasl.kerberos.min.time.before.relogin = 60000
        sasl.kerberos.principal.to.local.rules = [DEFAULT]
        sasl.kerberos.service.name = null
        sasl.kerberos.ticket.renew.jitter = 0.05
        sasl.kerberos.ticket.renew.window.factor = 0.8
        sasl.login.callback.handler.class = null
        sasl.login.class = null
        sasl.login.refresh.buffer.seconds = 300
        sasl.login.refresh.min.period.seconds = 60
        sasl.login.refresh.window.factor = 0.8
        sasl.login.refresh.window.jitter = 0.05
        sasl.mechanism.inter.broker.protocol = GSSAPI
        sasl.server.callback.handler.class = null
        security.inter.broker.protocol = PLAINTEXT
        security.providers = null
        socket.receive.buffer.bytes = 102400
        socket.request.max.bytes = 104857600
        socket.send.buffer.bytes = 102400
        ssl.cipher.suites = []
        ssl.client.auth = none
        ssl.enabled.protocols = [TLSv1.2]
        ssl.endpoint.identification.algorithm = HTTPS
        ssl.key.password = null
        ssl.keymanager.algorithm = SunX509
        ssl.keystore.location = null
        ssl.keystore.password = null
        ssl.keystore.type = JKS
        ssl.principal.mapping.rules = DEFAULT
        ssl.protocol = TLSv1.2
        ssl.provider = null
        ssl.secure.random.implementation = SHA1PRNG
        ssl.trustmanager.algorithm = PKIX
        ssl.truststore.location = null
        ssl.truststore.password = null
        ssl.truststore.type = JKS
        transaction.abort.timed.out.transaction.cleanup.interval.ms = 10000
        transaction.max.timeout.ms = 900000
        transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
        transaction.state.log.load.buffer.size = 5242880
        transaction.state.log.min.isr = 1
        transaction.state.log.num.partitions = 50
        transaction.state.log.replication.factor = 1
        transaction.state.log.segment.bytes = 104857600
        transactional.id.expiration.ms = 604800000
        unclean.leader.election.enable = false
        zookeeper.clientCnxnSocket = null
        zookeeper.connect = localhost:2181
        zookeeper.connection.timeout.ms = null
        zookeeper.max.in.flight.requests = 10
        zookeeper.session.timeout.ms = 18000
        zookeeper.set.acl = false
        zookeeper.ssl.cipher.suites = null
        zookeeper.ssl.client.enable = false
        zookeeper.ssl.crl.enable = false
        zookeeper.ssl.enabled.protocols = null
        zookeeper.ssl.endpoint.identification.algorithm = HTTPS
        zookeeper.ssl.keystore.location = null
        zookeeper.ssl.keystore.password = null
        zookeeper.ssl.keystore.type = null
        zookeeper.ssl.ocsp.enable = false
        zookeeper.ssl.protocol = TLSv1.2
        zookeeper.ssl.truststore.location = null
        zookeeper.ssl.truststore.password = null
        zookeeper.ssl.truststore.type = null
        zookeeper.sync.time.ms = 2000
 (kafka.server.KafkaConfig) [main]
2020-10-15 09:48:42,116 INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) [ThrottledChannelReaper-Fetch]
2020-10-15 09:48:42,118 INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) [ThrottledChannelReaper-Produce]
2020-10-15 09:48:42,121 INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) [ThrottledChannelReaper-Request]
2020-10-15 09:48:42,214 INFO Log directory /var/lib/kafka/data/kafka-log0 not found, creating it. (kafka.log.LogManager) [main]
2020-10-15 09:48:42,254 INFO Loading logs. (kafka.log.LogManager) [main]
2020-10-15 09:48:42,273 INFO Logs loading complete in 19 ms. (kafka.log.LogManager) [main]
2020-10-15 09:48:42,305 INFO Starting log cleanup with a period of 300000 ms. (kafka.log.LogManager) [main]
2020-10-15 09:48:42,310 INFO Starting log flusher with a default period of 9223372036854775807 ms. (kafka.log.LogManager) [main]
2020-10-15 09:48:42,326 INFO Starting the log cleaner (kafka.log.LogCleaner) [main]
2020-10-15 09:48:42,498 INFO [kafka-log-cleaner-thread-0]: Starting (kafka.log.LogCleaner) [kafka-log-cleaner-thread-0]
2020-10-15 09:48:43,439 INFO Awaiting socket connections on 0.0.0.0:9091. (kafka.network.Acceptor) [main]
2020-10-15 09:48:46,464 INFO [SocketServer brokerId=0] Created data-plane acceptor and processors for endpoint : EndPoint(0.0.0.0,9091,ListenerName(REPLICATION-9091),SSL) (kafka.network.SocketServer) [main]
2020-10-15 09:48:46,470 INFO Awaiting socket connections on 0.0.0.0:9092. (kafka.network.Acceptor) [main]
2020-10-15 09:48:46,529 INFO [SocketServer brokerId=0] Created data-plane acceptor and processors for endpoint : EndPoint(0.0.0.0,9092,ListenerName(PLAIN-9092),PLAINTEXT) (kafka.network.SocketServer) [main]
2020-10-15 09:48:46,530 INFO Awaiting socket connections on 0.0.0.0:9093. (kafka.network.Acceptor) [main]
2020-10-15 09:48:47,260 INFO [SocketServer brokerId=0] Created data-plane acceptor and processors for endpoint : EndPoint(0.0.0.0,9093,ListenerName(TLS-9093),SSL) (kafka.network.SocketServer) [main]
2020-10-15 09:48:47,261 INFO Awaiting socket connections on 0.0.0.0:9094. (kafka.network.Acceptor) [main]
2020-10-15 09:48:47,393 INFO Successfully logged in. (org.apache.kafka.common.security.authenticator.AbstractLogin) [main]
2020-10-15 09:48:47,490 INFO [SocketServer brokerId=0] Created data-plane acceptor and processors for endpoint : EndPoint(0.0.0.0,9094,ListenerName(EXTERNAL-9094),SASL_SSL) (kafka.network.SocketServer) [main]
2020-10-15 09:48:47,493 INFO [SocketServer brokerId=0] Started 4 acceptor threads for data-plane (kafka.network.SocketServer) [main]
2020-10-15 09:48:47,560 INFO [ExpirationReaper-0-Fetch]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [ExpirationReaper-0-Fetch]
2020-10-15 09:48:47,585 INFO [ExpirationReaper-0-ElectLeader]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [ExpirationReaper-0-ElectLeader]
2020-10-15 09:48:47,580 INFO [ExpirationReaper-0-DeleteRecords]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [ExpirationReaper-0-DeleteRecords]
2020-10-15 09:48:47,586 INFO [ExpirationReaper-0-Produce]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [ExpirationReaper-0-Produce]
2020-10-15 09:48:47,666 INFO [LogDirFailureHandler]: Starting (kafka.server.ReplicaManager$LogDirFailureHandler) [LogDirFailureHandler]
2020-10-15 09:48:47,793 INFO Creating /brokers/ids/0 (is it secure? false) (kafka.zk.KafkaZkClient) [main]
2020-10-15 09:48:47,833 INFO Stat of the created znode at /brokers/ids/0 is: 4294967720,4294967720,1602755327814,1602755327814,1,0,0,144115258932723725,604,0,4294967720
 (kafka.zk.KafkaZkClient) [main]
2020-10-15 09:48:47,835 INFO Registered broker 0 at path /brokers/ids/0 with addresses: ArrayBuffer(EndPoint(prod-cluster-kafka-0.prod-cluster-kafka-brokers.streaming.svc,9091,ListenerName(REPLICATION-9091),SSL), EndPoint(prod-cluster-kafka-0.prod-cluster-kafka-brokers.streaming.svc,9092,ListenerName(PLAIN-9092),PLAINTEXT), EndPoint(prod-cluster-kafka-0.prod-cluster-kafka-brokers.streaming.svc,9093,ListenerName(TLS-9093),SSL), EndPoint(prod-cluster-broker-0-streaming.10.128.0.24.nip.io,443,ListenerName(EXTERNAL-9094),SASL_SSL)), czxid (broker epoch): 4294967720 (kafka.zk.KafkaZkClient) [main]
2020-10-15 09:48:48,054 INFO [ControllerEventThread controllerId=0] Starting (kafka.controller.ControllerEventManager$ControllerEventThread) [controller-event-thread]
2020-10-15 09:48:48,081 INFO [ExpirationReaper-0-topic]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [ExpirationReaper-0-topic]
2020-10-15 09:48:48,088 INFO [ExpirationReaper-0-Heartbeat]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [ExpirationReaper-0-Heartbeat]
2020-10-15 09:48:48,096 INFO [ExpirationReaper-0-Rebalance]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [ExpirationReaper-0-Rebalance]
2020-10-15 09:48:48,181 INFO [Controller id=0] 0 successfully elected as the controller. Epoch incremented to 3 and epoch zk version is now 3 (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:48:48,182 INFO [Controller id=0] Registering handlers (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:48:48,203 INFO [Controller id=0] Deleting log dir event notifications (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:48:48,218 INFO [GroupCoordinator 0]: Starting up. (kafka.coordinator.group.GroupCoordinator) [main]
2020-10-15 09:48:48,224 INFO [GroupCoordinator 0]: Startup complete. (kafka.coordinator.group.GroupCoordinator) [main]
2020-10-15 09:48:48,225 INFO [Controller id=0] Deleting isr change notifications (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:48:48,245 INFO [Controller id=0] Initializing controller context (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:48:48,252 INFO [GroupMetadataManager brokerId=0] Removed 0 expired offsets in 29 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [group-metadata-manager-0]
2020-10-15 09:48:48,296 INFO [ProducerId Manager 0]: Acquired new producerId block (brokerId:0,blockStartProducerId:2000,blockEndProducerId:2999) by writing to Zk with path version 3 (kafka.coordinator.transaction.ProducerIdManager) [main]
2020-10-15 09:48:48,322 INFO [Controller id=0] Initialized broker epochs cache: Map(0 -> 4294967720) (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:48:48,365 DEBUG [Controller id=0] Register BrokerModifications handler for Set(0) (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:48:48,466 DEBUG [Channel manager on controller 0]: Controller 0 trying to connect to broker 0 (kafka.controller.ControllerChannelManager) [controller-event-thread]
2020-10-15 09:48:48,638 INFO [TransactionCoordinator id=0] Starting up. (kafka.coordinator.transaction.TransactionCoordinator) [main]
2020-10-15 09:48:48,653 INFO [TransactionCoordinator id=0] Startup complete. (kafka.coordinator.transaction.TransactionCoordinator) [main]
2020-10-15 09:48:48,672 INFO [Transaction Marker Channel Manager 0]: Starting (kafka.coordinator.transaction.TransactionMarkerChannelManager) [TxnMarkerSenderThread-0]
2020-10-15 09:48:48,742 INFO [Controller id=0] Currently active brokers in the cluster: Set(0) (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:48:48,743 INFO [Controller id=0] Currently shutting brokers in the cluster: Set() (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:48:48,744 INFO [Controller id=0] Current list of topics in the cluster: Set(shipments, sales, users) (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:48:48,744 INFO [Controller id=0] Fetching topic deletions in progress (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:48:48,746 INFO [ZooKeeperClient ACL authorizer] Initializing a new session to localhost:2181. (kafka.zookeeper.ZooKeeperClient) [main]
2020-10-15 09:48:48,746 INFO Initiating client connection, connectString=localhost:2181 sessionTimeout=18000 watcher=kafka.zookeeper.ZooKeeperClient$ZooKeeperClientWatcher$@15f8701f (org.apache.zookeeper.ZooKeeper) [main]
2020-10-15 09:48:48,747 INFO jute.maxbuffer value is 4194304 Bytes (org.apache.zookeeper.ClientCnxnSocket) [main]
2020-10-15 09:48:48,747 INFO zookeeper.request.timeout value is 0. feature enabled= (org.apache.zookeeper.ClientCnxn) [main]
2020-10-15 09:48:48,767 INFO [Controller id=0] List of topics to be deleted:  (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:48:48,767 INFO [Controller id=0] List of topics ineligible for deletion:  (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:48:48,768 INFO [Controller id=0] Initializing topic deletion manager (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:48:48,769 INFO [Topic Deletion Manager 0] Initializing manager with initial deletions: Set(), initial ineligible deletions: Set() (kafka.controller.TopicDeletionManager) [controller-event-thread]
2020-10-15 09:48:48,770 INFO [Controller id=0] Sending update metadata request (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:48:48,772 INFO [ZooKeeperClient ACL authorizer] Waiting until connected. (kafka.zookeeper.ZooKeeperClient) [main]
2020-10-15 09:48:48,774 INFO [RequestSendThread controllerId=0] Starting (kafka.controller.RequestSendThread) [Controller-0-to-broker-0-send-thread]
2020-10-15 09:48:48,774 INFO Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) [main-SendThread(localhost:2181)]
2020-10-15 09:48:48,774 INFO Socket connection established, initiating session, client: /127.0.0.1:43608, server: localhost/127.0.0.1:2181 (org.apache.zookeeper.ClientCnxn) [main-SendThread(localhost:2181)]
2020-10-15 09:48:48,886 INFO Session establishment complete on server localhost/127.0.0.1:2181, sessionid = 0x20000107f66000e, negotiated timeout = 18000 (org.apache.zookeeper.ClientCnxn) [main-SendThread(localhost:2181)]
2020-10-15 09:48:48,887 INFO [ZooKeeperClient ACL authorizer] Connected. (kafka.zookeeper.ZooKeeperClient) [main]
2020-10-15 09:48:48,916 INFO [ReplicaStateMachine controllerId=0] Initializing replica state (kafka.controller.ZkReplicaStateMachine) [controller-event-thread]
2020-10-15 09:48:49,005 INFO [ReplicaStateMachine controllerId=0] Triggering online replica state changes (kafka.controller.ZkReplicaStateMachine) [controller-event-thread]
2020-10-15 09:48:49,066 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition users-8 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,068 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition users-1 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,068 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition shipments-7 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,069 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition sales-0 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,070 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition users-6 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,071 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition sales-6 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,072 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition shipments-0 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,077 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition shipments-8 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,081 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition shipments-6 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,082 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition users-4 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,082 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition sales-5 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,083 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition users-3 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,092 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition sales-8 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,092 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition users-7 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,093 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition users-9 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,099 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition shipments-3 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,100 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition sales-4 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,101 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition sales-3 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,115 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition sales-7 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,116 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition shipments-4 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,117 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition shipments-5 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,117 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition users-0 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,117 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition shipments-1 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,118 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition sales-2 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,121 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition sales-1 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,121 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition shipments-9 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,122 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition users-5 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,124 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition shipments-2 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,126 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition sales-9 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,126 TRACE [Controller id=0 epoch=3] Changed state of replica 0 for partition users-2 from OnlineReplica to OnlineReplica (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,130 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='shipments', partitionIndex=3, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition shipments-3 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,130 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='shipments', partitionIndex=8, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition shipments-8 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,130 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='sales', partitionIndex=7, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition sales-7 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,131 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='shipments', partitionIndex=0, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition shipments-0 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,131 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='users', partitionIndex=7, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition users-7 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,131 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='shipments', partitionIndex=5, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition shipments-5 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,131 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='users', partitionIndex=1, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition users-1 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,131 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='sales', partitionIndex=4, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition sales-4 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,131 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='sales', partitionIndex=9, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition sales-9 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,131 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='users', partitionIndex=4, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition users-4 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,131 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='shipments', partitionIndex=2, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition shipments-2 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,131 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='sales', partitionIndex=1, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition sales-1 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,131 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='users', partitionIndex=9, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition users-9 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,131 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='sales', partitionIndex=6, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition sales-6 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,131 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='users', partitionIndex=6, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition users-6 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,131 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='sales', partitionIndex=3, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition sales-3 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,132 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='users', partitionIndex=3, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition users-3 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,132 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='shipments', partitionIndex=7, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition shipments-7 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,132 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='users', partitionIndex=8, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition users-8 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,132 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='sales', partitionIndex=0, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition sales-0 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,132 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='shipments', partitionIndex=1, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition shipments-1 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,138 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='users', partitionIndex=0, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition users-0 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,138 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='shipments', partitionIndex=4, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition shipments-4 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,138 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='sales', partitionIndex=8, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition sales-8 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,138 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='users', partitionIndex=5, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition users-5 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,138 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='shipments', partitionIndex=9, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition shipments-9 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,138 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='sales', partitionIndex=2, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition sales-2 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,139 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='sales', partitionIndex=5, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition sales-5 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,139 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='users', partitionIndex=2, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition users-2 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,139 TRACE [Controller id=0 epoch=3] Sending become-leader LeaderAndIsr request LeaderAndIsrPartitionState(topicName='shipments', partitionIndex=6, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) to broker 0 for partition shipments-6 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,157 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='shipments', partitionIndex=3, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition shipments-3 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,158 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='shipments', partitionIndex=8, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition shipments-8 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,158 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='sales', partitionIndex=7, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition sales-7 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,158 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='shipments', partitionIndex=0, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition shipments-0 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,158 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='users', partitionIndex=7, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition users-7 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,158 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='shipments', partitionIndex=5, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition shipments-5 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,159 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='users', partitionIndex=1, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition users-1 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,159 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='sales', partitionIndex=4, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition sales-4 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,159 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='sales', partitionIndex=9, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition sales-9 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,159 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='users', partitionIndex=4, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition users-4 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,159 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='shipments', partitionIndex=2, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition shipments-2 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,160 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='sales', partitionIndex=1, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition sales-1 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,161 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='users', partitionIndex=9, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition users-9 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,161 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='sales', partitionIndex=6, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition sales-6 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,161 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='users', partitionIndex=6, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition users-6 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,161 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='sales', partitionIndex=3, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition sales-3 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,161 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='users', partitionIndex=3, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition users-3 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,161 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='shipments', partitionIndex=7, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition shipments-7 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,164 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='users', partitionIndex=8, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition users-8 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,165 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='sales', partitionIndex=0, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition sales-0 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,165 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='shipments', partitionIndex=1, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition shipments-1 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,165 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='users', partitionIndex=0, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition users-0 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,166 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='shipments', partitionIndex=4, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition shipments-4 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,166 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='sales', partitionIndex=8, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition sales-8 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,166 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='users', partitionIndex=5, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition users-5 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,167 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='shipments', partitionIndex=9, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition shipments-9 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,167 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='sales', partitionIndex=2, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition sales-2 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,167 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='sales', partitionIndex=5, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition sales-5 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,167 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='users', partitionIndex=2, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition users-2 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,167 TRACE [Controller id=0 epoch=3] Sending UpdateMetadata request UpdateMetadataPartitionState(topicName='shipments', partitionIndex=6, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) to brokers Set(0) for partition shipments-6 (state.change.logger) [controller-event-thread]
2020-10-15 09:48:49,169 INFO [ReplicaStateMachine controllerId=0] Triggering offline replica state changes (kafka.controller.ZkReplicaStateMachine) [controller-event-thread]
2020-10-15 09:48:49,170 DEBUG [ReplicaStateMachine controllerId=0] Started replica state machine with initial state -> Map([Topic=shipments,Partition=3,Replica=0] -> OnlineReplica, [Topic=shipments,Partition=1,Replica=0] -> OnlineReplica, [Topic=shipments,Partition=8,Replica=0] -> OnlineReplica, [Topic=sales,Partition=5,Replica=0] -> OnlineReplica, [Topic=sales,Partition=1,Replica=0] -> OnlineReplica, [Topic=users,Partition=3,Replica=0] -> OnlineReplica, [Topic=users,Partition=9,Replica=0] -> OnlineReplica, [Topic=users,Partition=4,Replica=0] -> OnlineReplica, [Topic=users,Partition=2,Replica=0] -> OnlineReplica, [Topic=users,Partition=6,Replica=0] -> OnlineReplica, [Topic=users,Partition=0,Replica=0] -> OnlineReplica, [Topic=shipments,Partition=6,Replica=0] -> OnlineReplica, [Topic=sales,Partition=9,Replica=0] -> OnlineReplica, [Topic=shipments,Partition=0,Replica=0] -> OnlineReplica, [Topic=sales,Partition=2,Replica=0] -> OnlineReplica, [Topic=sales,Partition=0,Replica=0] -> OnlineReplica, [Topic=shipments,Partition=2,Replica=0] -> OnlineReplica, [Topic=users,Partition=7,Replica=0] -> OnlineReplica, [Topic=users,Partition=5,Replica=0] -> OnlineReplica, [Topic=shipments,Partition=5,Replica=0] -> OnlineReplica, [Topic=shipments,Partition=7,Replica=0] -> OnlineReplica, [Topic=shipments,Partition=9,Replica=0] -> OnlineReplica, [Topic=sales,Partition=7,Replica=0] -> OnlineReplica, [Topic=sales,Partition=6,Replica=0] -> OnlineReplica, [Topic=sales,Partition=8,Replica=0] -> OnlineReplica, [Topic=shipments,Partition=4,Replica=0] -> OnlineReplica, [Topic=users,Partition=1,Replica=0] -> OnlineReplica, [Topic=users,Partition=8,Replica=0] -> OnlineReplica, [Topic=sales,Partition=3,Replica=0] -> OnlineReplica, [Topic=sales,Partition=4,Replica=0] -> OnlineReplica) (kafka.controller.ZkReplicaStateMachine) [controller-event-thread]
2020-10-15 09:48:49,176 INFO [PartitionStateMachine controllerId=0] Initializing partition state (kafka.controller.ZkPartitionStateMachine) [controller-event-thread]
2020-10-15 09:48:49,200 INFO [PartitionStateMachine controllerId=0] Triggering online partition state changes (kafka.controller.ZkPartitionStateMachine) [controller-event-thread]
2020-10-15 09:48:49,207 DEBUG [PartitionStateMachine controllerId=0] Started partition state machine with initial state -> Map(shipments-3 -> OnlinePartition, shipments-8 -> OnlinePartition, sales-7 -> OnlinePartition, shipments-0 -> OnlinePartition, users-7 -> OnlinePartition, shipments-5 -> OnlinePartition, users-1 -> OnlinePartition, sales-4 -> OnlinePartition, sales-9 -> OnlinePartition, users-4 -> OnlinePartition, shipments-2 -> OnlinePartition, sales-1 -> OnlinePartition, users-9 -> OnlinePartition, sales-6 -> OnlinePartition, users-6 -> OnlinePartition, sales-3 -> OnlinePartition, users-3 -> OnlinePartition, shipments-7 -> OnlinePartition, users-8 -> OnlinePartition, sales-0 -> OnlinePartition, shipments-1 -> OnlinePartition, users-0 -> OnlinePartition, shipments-4 -> OnlinePartition, sales-8 -> OnlinePartition, shipments-9 -> OnlinePartition, users-5 -> OnlinePartition, sales-2 -> OnlinePartition, sales-5 -> OnlinePartition, users-2 -> OnlinePartition, shipments-6 -> OnlinePartition) (kafka.controller.ZkPartitionStateMachine) [controller-event-thread]
2020-10-15 09:48:49,208 INFO [Controller id=0] Ready to serve as the new controller with epoch 3 (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:48:49,263 INFO [Controller id=0] Partitions undergoing preferred replica election:  (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:48:49,264 INFO [Controller id=0] Partitions that completed preferred replica election:  (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:48:49,266 INFO [Controller id=0] Skipping preferred replica election for partitions due to topic deletion:  (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:48:49,267 INFO [Controller id=0] Resuming preferred replica election for partitions:  (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:48:49,270 INFO [Controller id=0] Starting replica leader election (PREFERRED) for partitions  triggered by ZkTriggered (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:48:49,278 INFO [/kafka-acl-changes-event-process-thread]: Starting (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread) [/kafka-acl-changes-event-process-thread]
2020-10-15 09:48:49,284 INFO [/kafka-acl-extended-changes-event-process-thread]: Starting (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread) [/kafka-acl-extended-changes-event-process-thread]
2020-10-15 09:48:49,319 INFO Processing notification(s) to /kafka-acl-changes (kafka.common.ZkNodeChangeNotificationListener) [/kafka-acl-changes-event-process-thread]
2020-10-15 09:48:49,324 INFO [Controller id=0] Starting the controller scheduler (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:48:49,581 INFO [ExpirationReaper-0-AlterAcls]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [ExpirationReaper-0-AlterAcls]
2020-10-15 09:48:49,637 INFO [/config/changes-event-process-thread]: Starting (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread) [/config/changes-event-process-thread]
2020-10-15 09:48:49,651 INFO Processing notification(s) to /config/changes (kafka.common.ZkNodeChangeNotificationListener) [/config/changes-event-process-thread]
2020-10-15 09:48:49,666 INFO Processing override for entityPath: users/admin-ex with config: Map(SCRAM-SHA-512 -> [hidden]) (kafka.server.DynamicConfigManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,681 INFO Removing PRODUCE quota for user admin-ex (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,690 INFO Removing FETCH quota for user admin-ex (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,691 INFO Removing REQUEST quota for user admin-ex (kafka.server.ClientRequestQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,715 INFO Processing override for entityPath: users/kafka-topic-admin-external with config: Map(SCRAM-SHA-512 -> [hidden]) (kafka.server.DynamicConfigManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,715 INFO Removing PRODUCE quota for user kafka-topic-admin-external (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,716 INFO Removing FETCH quota for user kafka-topic-admin-external (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,716 INFO Removing REQUEST quota for user kafka-topic-admin-external (kafka.server.ClientRequestQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,731 INFO Removing PRODUCE quota for user admin-ex (kafka.server.ClientQuotaManager) [main]
2020-10-15 09:48:49,732 INFO Removing FETCH quota for user admin-ex (kafka.server.ClientQuotaManager) [main]
2020-10-15 09:48:49,732 INFO Removing REQUEST quota for user admin-ex (kafka.server.ClientRequestQuotaManager) [main]
2020-10-15 09:48:49,734 INFO Removing PRODUCE quota for user kafka-topic-admin-external (kafka.server.ClientQuotaManager) [main]
2020-10-15 09:48:49,736 INFO Removing FETCH quota for user kafka-topic-admin-external (kafka.server.ClientQuotaManager) [main]
2020-10-15 09:48:49,736 INFO Removing REQUEST quota for user kafka-topic-admin-external (kafka.server.ClientRequestQuotaManager) [main]
2020-10-15 09:48:49,753 INFO Processing override for entityPath: users/admin-ex with config: Map(SCRAM-SHA-512 -> [hidden]) (kafka.server.DynamicConfigManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,753 INFO Removing PRODUCE quota for user admin-ex (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,754 INFO Removing FETCH quota for user admin-ex (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,754 INFO Removing REQUEST quota for user admin-ex (kafka.server.ClientRequestQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,774 INFO Processing override for entityPath: users/kafka-topic-admin-external with config: Map(SCRAM-SHA-512 -> [hidden]) (kafka.server.DynamicConfigManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,774 INFO Removing PRODUCE quota for user kafka-topic-admin-external (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,775 INFO Removing FETCH quota for user kafka-topic-admin-external (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,775 INFO Removing REQUEST quota for user kafka-topic-admin-external (kafka.server.ClientRequestQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,811 INFO Processing override for entityPath: users/admin-ex with config: Map(SCRAM-SHA-512 -> [hidden]) (kafka.server.DynamicConfigManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,812 INFO Removing PRODUCE quota for user admin-ex (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,812 INFO Removing FETCH quota for user admin-ex (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,812 INFO Removing REQUEST quota for user admin-ex (kafka.server.ClientRequestQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,838 INFO Processing override for entityPath: users/kafka-topic-admin-external with config: Map(SCRAM-SHA-512 -> [hidden]) (kafka.server.DynamicConfigManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,838 INFO Removing PRODUCE quota for user kafka-topic-admin-external (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,838 INFO Removing FETCH quota for user kafka-topic-admin-external (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,838 INFO Removing REQUEST quota for user kafka-topic-admin-external (kafka.server.ClientRequestQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,848 INFO Processing override for entityPath: users/admin-ex with config: Map(SCRAM-SHA-512 -> [hidden]) (kafka.server.DynamicConfigManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,848 INFO Removing PRODUCE quota for user admin-ex (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,849 INFO Removing FETCH quota for user admin-ex (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,849 INFO Removing REQUEST quota for user admin-ex (kafka.server.ClientRequestQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,857 INFO Processing override for entityPath: users/kafka-topic-admin-external with config: Map(SCRAM-SHA-512 -> [hidden]) (kafka.server.DynamicConfigManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,858 INFO Removing PRODUCE quota for user kafka-topic-admin-external (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,858 INFO Removing FETCH quota for user kafka-topic-admin-external (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,858 INFO Removing REQUEST quota for user kafka-topic-admin-external (kafka.server.ClientRequestQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,870 INFO Processing override for entityPath: users/admin-ex with config: Map(SCRAM-SHA-512 -> [hidden]) (kafka.server.DynamicConfigManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,870 INFO Removing PRODUCE quota for user admin-ex (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,870 INFO Removing FETCH quota for user admin-ex (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,870 INFO Removing REQUEST quota for user admin-ex (kafka.server.ClientRequestQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,900 INFO Processing override for entityPath: users/kafka-topic-admin-external with config: Map(SCRAM-SHA-512 -> [hidden]) (kafka.server.DynamicConfigManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,901 INFO Removing PRODUCE quota for user kafka-topic-admin-external (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,901 INFO Removing FETCH quota for user kafka-topic-admin-external (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,901 INFO Removing REQUEST quota for user kafka-topic-admin-external (kafka.server.ClientRequestQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,913 INFO Processing override for entityPath: users/admin-ex with config: Map(SCRAM-SHA-512 -> [hidden]) (kafka.server.DynamicConfigManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,913 INFO Removing PRODUCE quota for user admin-ex (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,913 INFO Removing FETCH quota for user admin-ex (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,913 INFO Removing REQUEST quota for user admin-ex (kafka.server.ClientRequestQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,914 INFO [SocketServer brokerId=0] Started data-plane processors for 4 acceptors (kafka.network.SocketServer) [main]
2020-10-15 09:48:49,974 INFO Kafka version: 2.5.0 (org.apache.kafka.common.utils.AppInfoParser) [main]
2020-10-15 09:48:49,975 INFO Kafka commitId: 66563e712b0b9f84 (org.apache.kafka.common.utils.AppInfoParser) [main]
2020-10-15 09:48:49,975 INFO Kafka startTimeMs: 1602755329920 (org.apache.kafka.common.utils.AppInfoParser) [main]
2020-10-15 09:48:49,977 INFO [KafkaServer id=0] started (kafka.server.KafkaServer) [main]
2020-10-15 09:48:49,991 INFO Processing override for entityPath: users/kafka-topic-admin-external with config: Map(SCRAM-SHA-512 -> [hidden]) (kafka.server.DynamicConfigManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,992 INFO Removing PRODUCE quota for user kafka-topic-admin-external (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,992 INFO Removing FETCH quota for user kafka-topic-admin-external (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:49,995 INFO Removing REQUEST quota for user kafka-topic-admin-external (kafka.server.ClientRequestQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:50,047 INFO Processing override for entityPath: users/admin-ex with config: Map(SCRAM-SHA-512 -> [hidden]) (kafka.server.DynamicConfigManager) [/config/changes-event-process-thread]
2020-10-15 09:48:50,047 INFO Removing PRODUCE quota for user admin-ex (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:50,047 INFO Removing FETCH quota for user admin-ex (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:50,048 INFO Removing REQUEST quota for user admin-ex (kafka.server.ClientRequestQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:50,097 INFO Processing override for entityPath: users/kafka-topic-admin-external with config: Map(SCRAM-SHA-512 -> [hidden]) (kafka.server.DynamicConfigManager) [/config/changes-event-process-thread]
2020-10-15 09:48:50,097 INFO Removing PRODUCE quota for user kafka-topic-admin-external (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:50,098 INFO Removing FETCH quota for user kafka-topic-admin-external (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:50,102 INFO Removing REQUEST quota for user kafka-topic-admin-external (kafka.server.ClientRequestQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:50,137 INFO Processing override for entityPath: users/admin-ex with config: Map(SCRAM-SHA-512 -> [hidden]) (kafka.server.DynamicConfigManager) [/config/changes-event-process-thread]
2020-10-15 09:48:50,140 INFO Removing PRODUCE quota for user admin-ex (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:50,141 INFO Removing FETCH quota for user admin-ex (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:50,141 INFO Removing REQUEST quota for user admin-ex (kafka.server.ClientRequestQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:50,163 INFO Processing override for entityPath: users/kafka-topic-admin-external with config: Map(SCRAM-SHA-512 -> [hidden]) (kafka.server.DynamicConfigManager) [/config/changes-event-process-thread]
2020-10-15 09:48:50,164 INFO Removing PRODUCE quota for user kafka-topic-admin-external (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:50,164 INFO Removing FETCH quota for user kafka-topic-admin-external (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:50,164 INFO Removing REQUEST quota for user kafka-topic-admin-external (kafka.server.ClientRequestQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:48:50,304 INFO [RequestSendThread controllerId=0] Controller 0 connected to prod-cluster-kafka-0.prod-cluster-kafka-brokers.streaming.svc:9091 (id: 0 rack: null) for sending state change requests (kafka.controller.RequestSendThread) [Controller-0-to-broker-0-send-thread]
2020-10-15 09:48:50,545 INFO Processing notification(s) to /config/changes (kafka.common.ZkNodeChangeNotificationListener) [/config/changes-event-process-thread]
2020-10-15 09:48:50,764 INFO Running as server according to kafka.server:type=KafkaServer,name=BrokerState => ready (io.strimzi.kafka.agent.KafkaAgent) [KafkaAgentPoller]
2020-10-15 09:48:50,957 TRACE [Controller id=0 epoch=3] Received response {error_code=0,_tagged_fields={}} for request UPDATE_METADATA with correlation id 0 sent to broker prod-cluster-kafka-0.prod-cluster-kafka-brokers.streaming.svc:9091 (id: 0 rack: null) (state.change.logger) [Controller-0-to-broker-0-send-thread]
2020-10-15 09:48:51,091 TRACE [Broker id=0] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='shipments', partitionIndex=3, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 0 epoch 3 (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:51,412 TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 3 starting the become-leader transition for partition users-4 (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:51,412 TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 3 starting the become-leader transition for partition shipments-3 (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:51,412 TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 3 starting the become-leader transition for partition users-1 (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:51,421 INFO [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(shipments-9, users-6, sales-1, sales-9, users-3, sales-2, users-1, users-7, shipments-6, users-2, sales-3, sales-0, sales-8, shipments-4, sales-5, sales-7, sales-4, sales-6, shipments-7, shipments-3, shipments-0, users-4, shipments-5, users-5, shipments-1, users-8, users-9, shipments-2, users-0, shipments-8) (kafka.server.ReplicaFetcherManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:51,730 INFO [Log partition=shipments-0, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:51,801 INFO [Log partition=shipments-0, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 270 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:51,824 INFO Created log for partition shipments-0 in /var/lib/kafka/data/kafka-log0/shipments-0 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:51,839 INFO [Partition shipments-0 broker=0] No checkpointed highwatermark is found for partition shipments-0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:51,846 INFO [Partition shipments-0 broker=0] Log loaded for partition shipments-0 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:51,861 INFO [Partition shipments-0 broker=0] shipments-0 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:51,984 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition shipments-0 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,026 INFO [Log partition=sales-8, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,029 INFO [Log partition=sales-8, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 15 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,037 INFO Created log for partition sales-8 in /var/lib/kafka/data/kafka-log0/sales-8 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,038 INFO [Partition sales-8 broker=0] No checkpointed highwatermark is found for partition sales-8 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,038 INFO [Partition sales-8 broker=0] Log loaded for partition sales-8 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,039 INFO [Partition sales-8 broker=0] sales-8 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,047 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition sales-8 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,085 INFO [Log partition=users-8, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,090 INFO [Log partition=users-8, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 26 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,094 INFO Created log for partition users-8 in /var/lib/kafka/data/kafka-log0/users-8 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,094 INFO [Partition users-8 broker=0] No checkpointed highwatermark is found for partition users-8 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,094 INFO [Partition users-8 broker=0] Log loaded for partition users-8 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,095 INFO [Partition users-8 broker=0] users-8 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,104 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition users-8 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,125 INFO [Log partition=sales-5, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,126 INFO [Log partition=sales-5, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 9 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,130 INFO Created log for partition sales-5 in /var/lib/kafka/data/kafka-log0/sales-5 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,130 INFO [Partition sales-5 broker=0] No checkpointed highwatermark is found for partition sales-5 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,131 INFO [Partition sales-5 broker=0] Log loaded for partition sales-5 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,131 INFO [Partition sales-5 broker=0] sales-5 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,138 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition sales-5 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,154 INFO [Log partition=sales-2, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,165 INFO [Log partition=sales-2, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 20 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,171 INFO Created log for partition sales-2 in /var/lib/kafka/data/kafka-log0/sales-2 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,172 INFO [Partition sales-2 broker=0] No checkpointed highwatermark is found for partition sales-2 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,172 INFO [Partition sales-2 broker=0] Log loaded for partition sales-2 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,172 INFO [Partition sales-2 broker=0] sales-2 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,193 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition sales-2 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,214 INFO [Log partition=shipments-7, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,222 INFO [Log partition=shipments-7, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 15 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,228 INFO Created log for partition shipments-7 in /var/lib/kafka/data/kafka-log0/shipments-7 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,228 INFO [Partition shipments-7 broker=0] No checkpointed highwatermark is found for partition shipments-7 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,228 INFO [Partition shipments-7 broker=0] Log loaded for partition shipments-7 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,237 INFO [Partition shipments-7 broker=0] shipments-7 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,258 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition shipments-7 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,304 INFO [Log partition=users-5, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,305 INFO [Log partition=users-5, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 4 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,329 INFO Created log for partition users-5 in /var/lib/kafka/data/kafka-log0/users-5 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,331 INFO [Partition users-5 broker=0] No checkpointed highwatermark is found for partition users-5 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,331 INFO [Partition users-5 broker=0] Log loaded for partition users-5 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,331 INFO [Partition users-5 broker=0] users-5 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,353 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition users-5 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,401 INFO [Log partition=users-2, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,402 INFO [Log partition=users-2, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 7 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,404 INFO Created log for partition users-2 in /var/lib/kafka/data/kafka-log0/users-2 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,405 INFO [Partition users-2 broker=0] No checkpointed highwatermark is found for partition users-2 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,405 INFO [Partition users-2 broker=0] Log loaded for partition users-2 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,414 INFO [Partition users-2 broker=0] users-2 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,420 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition users-2 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,432 INFO [Log partition=shipments-4, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,433 INFO [Log partition=shipments-4, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 5 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,438 INFO Created log for partition shipments-4 in /var/lib/kafka/data/kafka-log0/shipments-4 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,440 INFO [Partition shipments-4 broker=0] No checkpointed highwatermark is found for partition shipments-4 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,440 INFO [Partition shipments-4 broker=0] Log loaded for partition shipments-4 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,440 INFO [Partition shipments-4 broker=0] shipments-4 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,450 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition shipments-4 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,464 INFO [Log partition=users-9, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,465 INFO [Log partition=users-9, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 7 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,467 INFO Created log for partition users-9 in /var/lib/kafka/data/kafka-log0/users-9 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,467 INFO [Partition users-9 broker=0] No checkpointed highwatermark is found for partition users-9 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,470 INFO [Partition users-9 broker=0] Log loaded for partition users-9 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,470 INFO [Partition users-9 broker=0] users-9 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,479 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition users-9 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,489 INFO [Log partition=shipments-1, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,490 INFO [Log partition=shipments-1, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 4 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,497 INFO Created log for partition shipments-1 in /var/lib/kafka/data/kafka-log0/shipments-1 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,498 INFO [Partition shipments-1 broker=0] No checkpointed highwatermark is found for partition shipments-1 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,498 INFO [Partition shipments-1 broker=0] Log loaded for partition shipments-1 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,498 INFO [Partition shipments-1 broker=0] shipments-1 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,503 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition shipments-1 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,517 INFO [Log partition=users-6, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,518 INFO [Log partition=users-6, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 4 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,520 INFO Created log for partition users-6 in /var/lib/kafka/data/kafka-log0/users-6 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,520 INFO [Partition users-6 broker=0] No checkpointed highwatermark is found for partition users-6 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,520 INFO [Partition users-6 broker=0] Log loaded for partition users-6 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,520 INFO [Partition users-6 broker=0] users-6 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,528 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition users-6 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,551 INFO [Log partition=users-3, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,552 INFO [Log partition=users-3, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 7 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,554 INFO Created log for partition users-3 in /var/lib/kafka/data/kafka-log0/users-3 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,555 INFO [Partition users-3 broker=0] No checkpointed highwatermark is found for partition users-3 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,555 INFO [Partition users-3 broker=0] Log loaded for partition users-3 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,555 INFO [Partition users-3 broker=0] users-3 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,568 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition users-3 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,582 INFO [Log partition=sales-9, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,583 INFO [Log partition=sales-9, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 7 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,587 INFO Created log for partition sales-9 in /var/lib/kafka/data/kafka-log0/sales-9 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,587 INFO [Partition sales-9 broker=0] No checkpointed highwatermark is found for partition sales-9 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,587 INFO [Partition sales-9 broker=0] Log loaded for partition sales-9 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,588 INFO [Partition sales-9 broker=0] sales-9 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,593 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition sales-9 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,604 INFO [Log partition=sales-6, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,606 INFO [Log partition=sales-6, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 4 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,608 INFO Created log for partition sales-6 in /var/lib/kafka/data/kafka-log0/sales-6 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,608 INFO [Partition sales-6 broker=0] No checkpointed highwatermark is found for partition sales-6 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,609 INFO [Partition sales-6 broker=0] Log loaded for partition sales-6 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,609 INFO [Partition sales-6 broker=0] sales-6 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,621 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition sales-6 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,641 INFO [Log partition=users-0, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,643 INFO [Log partition=users-0, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 11 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,645 INFO Created log for partition users-0 in /var/lib/kafka/data/kafka-log0/users-0 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,645 INFO [Partition users-0 broker=0] No checkpointed highwatermark is found for partition users-0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,645 INFO [Partition users-0 broker=0] Log loaded for partition users-0 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,645 INFO [Partition users-0 broker=0] users-0 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,655 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition users-0 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,669 INFO [Log partition=sales-3, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,670 INFO [Log partition=sales-3, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 5 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,672 INFO Created log for partition sales-3 in /var/lib/kafka/data/kafka-log0/sales-3 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,673 INFO [Partition sales-3 broker=0] No checkpointed highwatermark is found for partition sales-3 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,673 INFO [Partition sales-3 broker=0] Log loaded for partition sales-3 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,673 INFO [Partition sales-3 broker=0] sales-3 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,684 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition sales-3 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,692 INFO [Log partition=shipments-8, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,696 INFO [Log partition=shipments-8, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 7 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,701 INFO Created log for partition shipments-8 in /var/lib/kafka/data/kafka-log0/shipments-8 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,701 INFO [Partition shipments-8 broker=0] No checkpointed highwatermark is found for partition shipments-8 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,702 INFO [Partition shipments-8 broker=0] Log loaded for partition shipments-8 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,702 INFO [Partition shipments-8 broker=0] shipments-8 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,710 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition shipments-8 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,719 INFO [Log partition=sales-0, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,719 INFO [Log partition=sales-0, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 4 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,724 INFO Created log for partition sales-0 in /var/lib/kafka/data/kafka-log0/sales-0 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,725 INFO [Partition sales-0 broker=0] No checkpointed highwatermark is found for partition sales-0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,725 INFO [Partition sales-0 broker=0] Log loaded for partition sales-0 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,725 INFO [Partition sales-0 broker=0] sales-0 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,732 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition sales-0 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,745 INFO [Log partition=shipments-5, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,748 INFO [Log partition=shipments-5, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 7 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,775 INFO Created log for partition shipments-5 in /var/lib/kafka/data/kafka-log0/shipments-5 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,775 INFO [Partition shipments-5 broker=0] No checkpointed highwatermark is found for partition shipments-5 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,775 INFO [Partition shipments-5 broker=0] Log loaded for partition shipments-5 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,776 INFO [Partition shipments-5 broker=0] shipments-5 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,784 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition shipments-5 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,791 INFO [Log partition=shipments-2, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,792 INFO [Log partition=shipments-2, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 3 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,793 INFO Created log for partition shipments-2 in /var/lib/kafka/data/kafka-log0/shipments-2 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,794 INFO [Partition shipments-2 broker=0] No checkpointed highwatermark is found for partition shipments-2 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,794 INFO [Partition shipments-2 broker=0] Log loaded for partition shipments-2 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,794 INFO [Partition shipments-2 broker=0] shipments-2 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,800 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition shipments-2 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,809 INFO [Log partition=sales-7, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,810 INFO [Log partition=sales-7, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 4 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,812 INFO Created log for partition sales-7 in /var/lib/kafka/data/kafka-log0/sales-7 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,812 INFO [Partition sales-7 broker=0] No checkpointed highwatermark is found for partition sales-7 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,812 INFO [Partition sales-7 broker=0] Log loaded for partition sales-7 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,812 INFO [Partition sales-7 broker=0] sales-7 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,820 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition sales-7 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,832 INFO [Log partition=users-7, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,834 INFO [Log partition=users-7, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 7 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,844 INFO Created log for partition users-7 in /var/lib/kafka/data/kafka-log0/users-7 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,845 INFO [Partition users-7 broker=0] No checkpointed highwatermark is found for partition users-7 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,846 INFO [Partition users-7 broker=0] Log loaded for partition users-7 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,846 INFO [Partition users-7 broker=0] users-7 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,852 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition users-7 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,864 INFO [Log partition=sales-4, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,865 INFO [Log partition=sales-4, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 4 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,866 INFO Created log for partition sales-4 in /var/lib/kafka/data/kafka-log0/sales-4 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,866 INFO [Partition sales-4 broker=0] No checkpointed highwatermark is found for partition sales-4 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,866 INFO [Partition sales-4 broker=0] Log loaded for partition sales-4 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,866 INFO [Partition sales-4 broker=0] sales-4 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,872 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition sales-4 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,881 INFO [Log partition=shipments-9, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,882 INFO [Log partition=shipments-9, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 4 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,884 INFO Created log for partition shipments-9 in /var/lib/kafka/data/kafka-log0/shipments-9 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,884 INFO [Partition shipments-9 broker=0] No checkpointed highwatermark is found for partition shipments-9 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,884 INFO [Partition shipments-9 broker=0] Log loaded for partition shipments-9 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,884 INFO [Partition shipments-9 broker=0] shipments-9 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,890 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition shipments-9 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,918 INFO [Log partition=sales-1, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,920 INFO [Log partition=sales-1, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 22 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,926 INFO Created log for partition sales-1 in /var/lib/kafka/data/kafka-log0/sales-1 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,926 INFO [Partition sales-1 broker=0] No checkpointed highwatermark is found for partition sales-1 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,927 INFO [Partition sales-1 broker=0] Log loaded for partition sales-1 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,927 INFO [Partition sales-1 broker=0] sales-1 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,934 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition sales-1 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,944 INFO [Log partition=shipments-6, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,945 INFO [Log partition=shipments-6, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 5 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,948 INFO Created log for partition shipments-6 in /var/lib/kafka/data/kafka-log0/shipments-6 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,948 INFO [Partition shipments-6 broker=0] No checkpointed highwatermark is found for partition shipments-6 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,948 INFO [Partition shipments-6 broker=0] Log loaded for partition shipments-6 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,948 INFO [Partition shipments-6 broker=0] shipments-6 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,958 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition shipments-6 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,981 INFO [Log partition=users-4, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,982 INFO [Log partition=users-4, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 4 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,985 INFO Created log for partition users-4 in /var/lib/kafka/data/kafka-log0/users-4 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,985 INFO [Partition users-4 broker=0] No checkpointed highwatermark is found for partition users-4 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,985 INFO [Partition users-4 broker=0] Log loaded for partition users-4 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:52,990 INFO [Partition users-4 broker=0] users-4 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:53,010 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition users-4 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:53,032 INFO [Log partition=shipments-3, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:53,034 INFO [Log partition=shipments-3, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 14 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:53,039 INFO Created log for partition shipments-3 in /var/lib/kafka/data/kafka-log0/shipments-3 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:53,039 INFO [Partition shipments-3 broker=0] No checkpointed highwatermark is found for partition shipments-3 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:53,039 INFO [Partition shipments-3 broker=0] Log loaded for partition shipments-3 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:53,039 INFO [Partition shipments-3 broker=0] shipments-3 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:53,051 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition shipments-3 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:53,072 INFO [Log partition=users-1, dir=/var/lib/kafka/data/kafka-log0] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:53,073 INFO [Log partition=users-1, dir=/var/lib/kafka/data/kafka-log0] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 4 ms (kafka.log.Log) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:53,074 INFO Created log for partition users-1 in /var/lib/kafka/data/kafka-log0/users-1 with properties {compression.type -> producer, min.insync.replicas -> 1, message.downconversion.enable -> true, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, retention.ms -> 604800000, segment.bytes -> 1073741824, flush.messages -> 9223372036854775807, message.format.version -> 2.5-IV0, max.compaction.lag.ms -> 9223372036854775807, file.delete.delay.ms -> 60000, max.message.bytes -> 1048588, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, index.interval.bytes -> 4096, min.cleanable.dirty.ratio -> 0.5, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:53,075 INFO [Partition users-1 broker=0] No checkpointed highwatermark is found for partition users-1 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:53,075 INFO [Partition users-1 broker=0] Log loaded for partition users-1 with initial high watermark 0 (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:53,075 INFO [Partition users-1 broker=0] users-1 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (kafka.cluster.Partition) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:53,084 TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 0 epoch 3 with correlation id 1 for partition users-1 (last update controller epoch 2) (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:53,088 TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 3 for the become-leader transition for partition shipments-0 (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:53,088 TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 3 for the become-leader transition for partition shipments-9 (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:53,088 TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 3 for the become-leader transition for partition sales-1 (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:53,088 TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 3 for the become-leader transition for partition shipments-6 (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:53,088 TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 3 for the become-leader transition for partition users-4 (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:53,088 TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 3 for the become-leader transition for partition shipments-3 (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:53,088 TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 3 for the become-leader transition for partition users-1 (state.change.logger) [data-plane-kafka-request-handler-3]
2020-10-15 09:48:53,151 TRACE [Controller id=0 epoch=3] Received response {error_code=0,partition_errors=[{topic_name=shipments,partition_index=3,error_code=0,_tagged_fields={}},{topic_name=shipments,partition_index=8,error_code=0,_tagged_fields={}},{topic_name=sales,partition_index=7,error_code=0,_tagged_fields={}},{topic_name=shipments,partition_index=0,error_code=0,_tagged_fields={}},{topic_name=users,partition_index=7,error_code=0,_tagged_fields={}},{topic_name=shipments,partition_index=5,error_code=0,_tagged_fields={}},{topic_name=users,partition_index=1,error_code=0,_tagged_fields={}},{topic_name=sales,partition_index=4,error_code=0,_tagged_fields={}},{topic_name=sales,partition_index=9,error_code=0,_tagged_fields={}},{topic_name=users,partition_index=4,error_code=0,_tagged_fields={}},{topic_name=sales,partition_index=1,error_code=0,_tagged_fields={}},{topic_name=shipments,partition_index=2,error_code=0,_tagged_fields={}},{topic_name=users,partition_index=9,error_code=0,_tagged_fields={}},{topic_name=sales,partition_index=6,error_code=0,_tagged_fields={}},{topic_name=users,partition_index=6,error_code=0,_tagged_fields={}},{topic_name=sales,partition_index=3,error_code=0,_tagged_fields={}},{topic_name=users,partition_index=3,error_code=0,_tagged_fields={}},{topic_name=shipments,partition_index=7,error_code=0,_tagged_fields={}},{topic_name=users,partition_index=8,error_code=0,_tagged_fields={}},{topic_name=sales,partition_index=0,error_code=0,_tagged_fields={}},{topic_name=shipments,partition_index=1,error_code=0,_tagged_fields={}},{topic_name=users,partition_index=0,error_code=0,_tagged_fields={}},{topic_name=shipments,partition_index=4,error_code=0,_tagged_fields={}},{topic_name=sales,partition_index=8,error_code=0,_tagged_fields={}},{topic_name=users,partition_index=5,error_code=0,_tagged_fields={}},{topic_name=shipments,partition_index=9,error_code=0,_tagged_fields={}},{topic_name=sales,partition_index=2,error_code=0,_tagged_fields={}},{topic_name=sales,partition_index=5,error_code=0,_tagged_fields={}},{topic_name=shipments,partition_index=6,error_code=0,_tagged_fields={}},{topic_name=users,partition_index=2,error_code=0,_tagged_fields={}}],_tagged_fields={}} for request LEADER_AND_ISR with correlation id 1 sent to broker prod-cluster-kafka-0.prod-cluster-kafka-brokers.streaming.svc:9091 (id: 0 rack: null) (state.change.logger) [Controller-0-to-broker-0-send-thread]
2020-10-15 09:48:53,185 TRACE [Broker id=0] Cached leader info UpdateMetadataPartitionState(topicName='shipments', partitionIndex=3, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) for partition shipments-3 in response to UpdateMetadata request sent by controller 0 epoch 3 with correlation id 2 (state.change.logger) [data-plane-kafka-request-handler-7]
020-10-15 09:48:53,187 TRACE [Broker id=0] Cached leader info UpdateMetadataPartitionState(topicName='sales', partitionIndex=7, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) for partition sales-7 in response to UpdateMetadata request sent by controller 0 epoch 3 with correlation id 2 (state.change.logger) [data-plane-kafka-request-handler-7]
2020-10-15 09:48:53,187 TRACE [Broker id=0] Cached leader info UpdateMetadataPartitionState(topicName='sales', partitionIndex=4, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) for partition sales-4 in response to UpdateMetadata request sent by controller 0 epoch 3 with correlation id 2 (state.change.logger) [data-plane-kafka-request-handler-7]
2020-10-15 09:48:53,187 TRACE [Broker id=0] Cached leader info UpdateMetadataPartitionState(topicName='sales', partitionIndex=9, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) for partition sales-9 in response to UpdateMetadata request sent by controller 0 epoch 3 with correlation id 2 (state.change.logger) [data-plane-kafka-request-handler-7]
2020-10-15 09:48:53,187 TRACE [Broker id=0] Cached leader info UpdateMetadataPartitionState(topicName='sales', partitionIndex=1, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) for partition sales-1 in response to UpdateMetadata request sent by controller 0 epoch 3 with correlation id 2 (state.change.logger) [data-plane-kafka-request-handler-7]
2020-10-15 09:48:53,187 TRACE [Broker id=0] Cached leader info UpdateMetadataPartitionState(topicName='sales', partitionIndex=6, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) for partition sales-6 in response to UpdateMetadata request sent by controller 0 epoch 3 with correlation id 2 (state.change.logger) [data-plane-kafka-request-handler-7]
2020-10-15 09:48:53,187 TRACE [Broker id=0] Cached leader info UpdateMetadataPartitionState(topicName='sales', partitionIndex=3, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) for partition sales-3 in response to UpdateMetadata request sent by controller 0 epoch 3 with correlation id 2 (state.change.logger) [data-plane-kafka-request-handler-7]
2020-10-15 09:48:53,187 TRACE [Broker id=0] Cached leader info UpdateMetadataPartitionState(topicName='sales', partitionIndex=0, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) for partition sales-0 in response to UpdateMetadata request sent by controller 0 epoch 3 with correlation id 2 (state.change.logger) [data-plane-kafka-request-handler-7]
2020-10-15 09:48:53,187 TRACE [Broker id=0] Cached leader info UpdateMetadataPartitionState(topicName='sales', partitionIndex=8, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) for partition sales-8 in response to UpdateMetadata request sent by controller 0 epoch 3 with correlation id 2 (state.change.logger) [data-plane-kafka-request-handler-7]
2020-10-15 09:48:53,188 TRACE [Broker id=0] Cached leader info UpdateMetadataPartitionState(topicName='sales', partitionIndex=2, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) for partition sales-2 in response to UpdateMetadata request sent by controller 0 epoch 3 with correlation id 2 (state.change.logger) [data-plane-kafka-request-handler-7]
2020-10-15 09:48:53,190 TRACE [Broker id=0] Cached leader info UpdateMetadataPartitionState(topicName='sales', partitionIndex=5, controllerEpoch=2, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) for partition sales-5 in response to UpdateMetadata request sent by controller 0 epoch 3 with correlation id 2 (state.change.logger) [data-plane-kafka-request-handler-7]
2020-10-15 09:48:53,193 TRACE [Controller id=0 epoch=3] Received response {error_code=0,_tagged_fields={}} for request UPDATE_METADATA with correlation id 2 sent to broker prod-cluster-kafka-0.prod-cluster-kafka-brokers.streaming.svc:9091 (id: 0 rack: null) (state.change.logger) [Controller-0-to-broker-0-send-thread]
2020-10-15 09:48:54,327 INFO [Controller id=0] Processing automatic preferred replica leader election (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:48:54,330 TRACE [Controller id=0] Checking need to trigger auto leader balancing (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:48:54,344 DEBUG [Controller id=0] Topics not in preferred replica for broker 0 Map() (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:48:54,347 TRACE [Controller id=0] Leader imbalance ratio for broker 0 is 0.0 (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:48:56,315 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.20 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-6]
2020-10-15 09:49:30,439 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.20 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-7]
2020-10-15 09:49:31,665 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.20 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-8]
2020-10-15 09:49:32,795 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.20 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-6]
2020-10-15 09:49:33,630 INFO Processing notification(s) to /config/changes (kafka.common.ZkNodeChangeNotificationListener) [/config/changes-event-process-thread]
2020-10-15 09:49:33,657 INFO Processing override for entityPath: users/admin-ex with config: Map(SCRAM-SHA-512 -> [hidden]) (kafka.server.DynamicConfigManager) [/config/changes-event-process-thread]
2020-10-15 09:49:33,657 INFO Removing PRODUCE quota for user admin-ex (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:49:33,657 INFO Removing FETCH quota for user admin-ex (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:49:33,657 INFO Removing REQUEST quota for user admin-ex (kafka.server.ClientRequestQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:49:33,757 INFO Processing notification(s) to /config/changes (kafka.common.ZkNodeChangeNotificationListener) [/config/changes-event-process-thread]
2020-10-15 09:49:34,031 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.20 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-7]
2020-10-15 09:49:34,931 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.20 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-8]
2020-10-15 09:49:36,146 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.20 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-6]
2020-10-15 09:50:08,840 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.20 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-7]
2020-10-15 09:50:10,080 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.20 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-8]
2020-10-15 09:50:11,158 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.20 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-6]
2020-10-15 09:50:12,108 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.20 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-7]
2020-10-15 09:50:13,053 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.20 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-8]
2020-10-15 09:50:27,775 INFO Processing notification(s) to /config/changes (kafka.common.ZkNodeChangeNotificationListener) [/config/changes-event-process-thread]
2020-10-15 09:50:27,804 INFO Processing override for entityPath: users/kafka-topic-admin-external with config: Map(SCRAM-SHA-512 -> [hidden]) (kafka.server.DynamicConfigManager) [/config/changes-event-process-thread]
2020-10-15 09:50:27,804 INFO Removing PRODUCE quota for user kafka-topic-admin-external (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:50:27,804 INFO Removing FETCH quota for user kafka-topic-admin-external (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:50:27,804 INFO Removing REQUEST quota for user kafka-topic-admin-external (kafka.server.ClientRequestQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:50:28,043 INFO Processing notification(s) to /config/changes (kafka.common.ZkNodeChangeNotificationListener) [/config/changes-event-process-thread]
2020-10-15 09:50:33,525 INFO Processing notification(s) to /config/changes (kafka.common.ZkNodeChangeNotificationListener) [/config/changes-event-process-thread]
2020-10-15 09:50:33,568 INFO Processing override for entityPath: users/admin-ex with config: Map(SCRAM-SHA-512 -> [hidden]) (kafka.server.DynamicConfigManager) [/config/changes-event-process-thread]
2020-10-15 09:50:33,569 INFO Removing PRODUCE quota for user admin-ex (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:50:33,569 INFO Removing FETCH quota for user admin-ex (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:50:33,569 INFO Removing REQUEST quota for user admin-ex (kafka.server.ClientRequestQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:50:33,824 INFO Processing notification(s) to /config/changes (kafka.common.ZkNodeChangeNotificationListener) [/config/changes-event-process-thread]
2020-10-15 09:50:33,853 INFO Processing override for entityPath: users/kafka-topic-admin-external with config: Map(SCRAM-SHA-512 -> [hidden]) (kafka.server.DynamicConfigManager) [/config/changes-event-process-thread]
2020-10-15 09:50:33,853 INFO Removing PRODUCE quota for user kafka-topic-admin-external (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:50:33,854 INFO Removing FETCH quota for user kafka-topic-admin-external (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:50:33,854 INFO Removing REQUEST quota for user kafka-topic-admin-external (kafka.server.ClientRequestQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:51:18,525 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-6]
2020-10-15 09:51:18,675 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-7]
2020-10-15 09:51:19,110 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-8]
2020-10-15 09:52:31,556 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-6]
2020-10-15 09:52:32,776 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-7]
2020-10-15 09:52:33,597 INFO Processing notification(s) to /config/changes (kafka.common.ZkNodeChangeNotificationListener) [/config/changes-event-process-thread]
2020-10-15 09:52:33,616 INFO Processing override for entityPath: users/admin-ex with config: Map(SCRAM-SHA-512 -> [hidden]) (kafka.server.DynamicConfigManager) [/config/changes-event-process-thread]
2020-10-15 09:52:33,616 INFO Removing PRODUCE quota for user admin-ex (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:52:33,617 INFO Removing FETCH quota for user admin-ex (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:52:33,617 INFO Removing REQUEST quota for user admin-ex (kafka.server.ClientRequestQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:52:33,773 INFO Processing notification(s) to /config/changes (kafka.common.ZkNodeChangeNotificationListener) [/config/changes-event-process-thread]
2020-10-15 09:52:33,845 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-8]
2020-10-15 09:52:33,926 INFO Processing notification(s) to /config/changes (kafka.common.ZkNodeChangeNotificationListener) [/config/changes-event-process-thread]
2020-10-15 09:52:33,956 INFO Processing override for entityPath: users/kafka-topic-admin-external with config: Map(SCRAM-SHA-512 -> [hidden]) (kafka.server.DynamicConfigManager) [/config/changes-event-process-thread]
2020-10-15 09:52:33,956 INFO Removing PRODUCE quota for user kafka-topic-admin-external (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:52:33,957 INFO Removing FETCH quota for user kafka-topic-admin-external (kafka.server.ClientQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:52:33,957 INFO Removing REQUEST quota for user kafka-topic-admin-external (kafka.server.ClientRequestQuotaManager) [/config/changes-event-process-thread]
2020-10-15 09:53:16,907 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-8]
2020-10-15 09:53:17,923 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-6]
2020-10-15 09:53:18,892 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-7]
2020-10-15 09:53:20,132 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-8]
2020-10-15 09:53:21,314 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-6]
2020-10-15 09:53:22,133 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-7]
2020-10-15 09:53:23,349 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-8]
2020-10-15 09:53:24,168 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-6]
2020-10-15 09:53:25,036 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-7]
2020-10-15 09:53:26,263 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-8]
2020-10-15 09:53:27,510 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-6]
2020-10-15 09:53:28,596 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-7]
2020-10-15 09:53:29,781 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-8]
2020-10-15 09:53:30,711 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-6]
2020-10-15 09:53:31,650 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-7]
2020-10-15 09:53:32,478 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-8]
2020-10-15 09:53:33,509 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-6]
2020-10-15 09:53:34,745 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-7]
2020-10-15 09:53:35,674 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-8]
2020-10-15 09:53:36,830 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-6]
2020-10-15 09:53:37,926 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-7]
2020-10-15 09:53:38,958 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-8]
2020-10-15 09:53:39,885 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-6]
2020-10-15 09:53:40,808 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-7]
2020-10-15 09:53:41,980 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-8]
2020-10-15 09:53:42,910 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-6]
2020-10-15 09:53:43,940 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-7]
2020-10-15 09:53:45,054 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-8]
2020-10-15 09:53:46,284 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-6]
2020-10-15 09:53:47,264 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-7]
2020-10-15 09:53:48,399 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-8]
2020-10-15 09:53:49,363 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-6]
2020-10-15 09:53:50,287 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-7]
2020-10-15 09:53:51,210 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-8]
2020-10-15 09:53:52,242 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-6]
2020-10-15 09:53:53,182 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-7]
2020-10-15 09:53:54,347 INFO [Controller id=0] Processing automatic preferred replica leader election (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:53:54,348 TRACE [Controller id=0] Checking need to trigger auto leader balancing (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:53:54,349 DEBUG [Controller id=0] Topics not in preferred replica for broker 0 Map() (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:53:54,349 TRACE [Controller id=0] Leader imbalance ratio for broker 0 is 0.0 (kafka.controller.KafkaController) [controller-event-thread]
2020-10-15 09:53:54,365 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-8]
2020-10-15 09:53:55,289 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-6]
2020-10-15 09:54:22,783 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-8]
2020-10-15 09:54:23,807 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-6]
2020-10-15 09:54:24,938 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-7]
2020-10-15 09:54:26,057 INFO [SocketServer brokerId=0] Failed authentication with /10.244.0.25 (SSL handshake failed) (org.apache.kafka.common.network.Selector) [data-plane-kafka-network-thread-0-ListenerName(TLS-9093)-SSL-8]

[root@k3s kafka]# oc logs -f pod/prod-cluster-kafka-0
error: a container name must be specified for pod prod-cluster-kafka-0, choose one of: [kafka tls-sidecar]
[root@k3s kafka]# oc logs -f pod/prod-cluster-kafka-0 tls-sidecar
Starting Stunnel with configuration:
pid = /usr/local/var/run/stunnel.pid
foreground = yes
debug = notice
sslVersion = TLSv1.2
[zookeeper-2181]
client = yes
CAfile = /tmp/cluster-ca.crt
cert = /etc/tls-sidecar/kafka-brokers/prod-cluster-kafka-0.crt
key = /etc/tls-sidecar/kafka-brokers/prod-cluster-kafka-0.key
accept = 127.0.0.1:2181
connect = prod-cluster-zookeeper-client:2181
delay = yes
verify = 2

2020.10.15 09:48:32 LOG5[19:140686888998976]: stunnel 4.56 on x86_64-redhat-linux-gnu platform
2020.10.15 09:48:32 LOG5[19:140686888998976]: Compiled/running with OpenSSL 1.0.1e-fips 11 Feb 2013
2020.10.15 09:48:32 LOG5[19:140686888998976]: Threading:PTHREAD Sockets:POLL,IPv6 SSL:ENGINE,OCSP,FIPS Auth:LIBWRAP
2020.10.15 09:48:32 LOG5[19:140686888998976]: Reading configuration from file /tmp/stunnel.conf
2020.10.15 09:48:32 LOG5[19:140686888998976]: FIPS mode is enabled
2020.10.15 09:48:32 LOG4[19:140686888998976]: Insecure file permissions on /etc/tls-sidecar/kafka-brokers/prod-cluster-kafka-0.key
2020.10.15 09:48:32 LOG5[19:140686888998976]: Configuration successful
2020.10.15 09:48:40 LOG5[19:140686888994560]: Service [zookeeper-2181] accepted connection from 127.0.0.1:43500
2020.10.15 09:48:40 LOG5[19:140686888994560]: connect_blocking: connected 10.96.9.195:2181
2020.10.15 09:48:40 LOG5[19:140686888994560]: Service [zookeeper-2181] connected remote server from 10.244.0.23:34572
2020.10.15 09:48:40 LOG5[19:140686888994560]: Certificate accepted: depth=1, /C=IN/ST=WB/L=Kolkata/O=CloudCafe/CN=cluster.local
2020.10.15 09:48:40 LOG5[19:140686888994560]: Certificate accepted: depth=0, /O=io.strimzi/CN=prod-cluster-zookeeper
2020.10.15 09:48:48 LOG5[19:140686888924928]: Service [zookeeper-2181] accepted connection from 127.0.0.1:43608
2020.10.15 09:48:48 LOG5[19:140686888924928]: connect_blocking: connected 10.96.9.195:2181
2020.10.15 09:48:48 LOG5[19:140686888924928]: Service [zookeeper-2181] connected remote server from 10.244.0.23:34680
2020.10.15 09:48:48 LOG5[19:140686888924928]: Certificate accepted: depth=1, /C=IN/ST=WB/L=Kolkata/O=CloudCafe/CN=cluster.local
2020.10.15 09:48:48 LOG5[19:140686888924928]: Certificate accepted: depth=0, /O=io.strimzi/CN=prod-cluster-zookeeper
scholzj commented 4 years ago

Sorry, but where exactly are you getting any errors? Also, the CA / certificates you are using for this are exactly as self-signed as the Strimzi default certificates - If you want to use company certificates, you should use them from the beginning because they will be generated differently and might not be the same as you are trying to test here..